file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
image_processor_2.0.py | _color = (255,0,0)
possible_target_color = (0,255,0)
#used to judge whether a polygon side is near vertical or near horizontal, for filtering out shapes that don't match expected target characteristics
vert_threshold = math.tan(math.radians(90-20))
horiz_threshold = math.tan(math.radians(20))
#used to look for only horizontal or vertical rectangles of an aspect ratio that matches the targets.
#currently open wide to find both horizontal and vertical targets
max_target_aspect_ratio = 10 # 1.0 # top target is expected to be 24.5 in x 4 in.
min_target_aspect_ratio = 0.1 #0.01# 3# 0.5
angle_to_robot = 0 #camera's 0 bearing to robot's 0 bearing
camera_offset_position = 0
morph_close_iterations = 9
angle_to_shooter = 0 #camera's 0 bearing to shooter's 0 bearing
camera_color_intensity = 0 #value subject to change
camera_saturation = 0 #value subject to change
camera_contrast = 0 #value subject to change
camera_color_hue = 0 #value subject to change
camera_brightness = 20 #value subject to change
camera_gain = 0 #value subject to change
camera_exposure = 20
robot_heading = 0.0 #input from SmartDashboard if enabled, else hard coded here.
x_resolution = 640 #needs to match the camera.
y_resolution = 480
#theta = math.radians(49.165) #half of field of view of the camera
# field_of_view_degrees = 53.0 horizontal field of view
field_of_view_degrees = 26.4382 # vertical field of view
theta = math.radians(field_of_view_degrees/2.0) #half of field of view of the camera, in radians to work with math.tan function.
# real_target_width = 24.5 #inches #24 * 0.0254 #1 inch / 0.254 meters target is 24 inches wide
real_target_height = 28.5 #using these constants and may not be correct for current robot configuration.
angle_to_shooter = 0
#not currently using these constants and may not be correct for current robot configuration.
# target_min_width = 20
# target_max_width = 200
# degrees_horiz_field_of_view = 47.0
# degrees_vert_field_of_view = 480.0/640*degrees_horiz_field_of_view
# inches_camera_height = 54.0
# inches_top_target_height = 98 + 2 + 98
# degrees_camera_pitch = 21.0
# degrees_sighting_offset = -1.55
def __init__(self, img_path):
self.img_path = img_path
self.layout_result_windows(self.h,self.s,self.v)
self.vc = VideoCapture(0)
SmartDashboard.PutNumber(angle_to_robot_title, self.angle_to_robot)
SmartDashboard.PutNumber(camera_offset_position_title, self.camera_offset_position)
SmartDashboard.PutNumber(morph_close_iterations_title, self.morph_close_iterations)
SmartDashboard.PutNumber(angle_to_shooter_title, self.angle_to_shooter)
SmartDashboard.PutNumber(camera_color_intensity_title, self.camera_color_intensity)
SmartDashboard.PutNumber(camera_exposure_title, self.camera_exposure)
SmartDashboard.PutNumber(camera_saturation.title, self.saturation)
SmartDashboard.PutNumber(camera_contrast_title, self.contrast)
SmartDashboard.PutNumber(camera_color_hue_title, self.camera_color_hue)
SmartDashboard.PutNumber(camera_brihtness_title, self.camera_brightness)
def video_feed(self):
while True:
if self.img is not None:
|
if self.img_path is None:
rval, self.img = self.vc.read() #might set to None
else:
self.img = imread(self.img_path)
def process(self):
if enable_dashboard:
self.camera_saturation = int(SmartDashboard.GetNumber(camera_saturation_title)
self.angle_to_robot = int(SmartDashboard.GetNumber(angle_to_robot_title)
self.camera_offset_postion = int(SmartDashboard.GetNumber(camera_offset_position_title)
self.morph_close_iterations = int(SmartDashboard.GetNumber(morph_close_iterations_title)
self.angle_to_shooter = int(SmartDashboard.GetNumber(angle_to_shooter_title)
self.camera_color_intensity = int(SmartDashboard.GetNumber(camera_color_intensity_title)
self.camera_contrast = int(SmartDashboard.GetNumber(camera_contrast_title)
self.camera_color_hue = int(SmartDashboard.GetNumber(camera_color_hue_title)
self.camera_brightness = int(SmartDashboard.GetNumber(camera_brightness_title)
self.camera_exposure = int(SmartDashboard.GetNumber(camera_exposure_title)
self.camera_gain = int(SmartDashboard.GetNumber(camera_gain_title)
if self.img_path is None:
commands.getoutput(" yavta --set-control '0x009a0901 1' /dev/video0")
#print(commands.getoutput(" yavta --get-control '0x009a0901' /dev/video0") )
commands.getoutput("yavta --set-control '0x009a0902 %s' /dev/video0" % self.camera_exposure)
#print(commands.getoutput(" yavta --get-control '0x009a0902' /dev/video0"))
drawing = np.zeros(self.img.shape, dtype=np.uint8)
self.hsv = cvtColor(self.img, cv.CV_BGR2HSV)
self.h, self.s, self.v = split(self.hsv)
self.h_clipped = self.threshold_in_range(self.h, self.hue_thresh-self.hue_delta, self.hue_thresh+self.hue_delta)
self.s_clipped = self.threshold_in_range(self.s, self.sat_thresh-self.sat_delta, self.sat_thresh+self.sat_delta)
self.v_clipped = self.threshold_in_range(self.v, self.val_thresh-self.val_delta, self.val_thresh+self.val_delta)
if show_windows:
h_scaled = resize(self.h_clipped, window_size)
s_scaled = resize(self.s_clipped, window_size)
v_scaled = resize(self.v_clipped, window_size)
imshow(self.h_title, h_scaled)
imshow(self.s_title, s_scaled)
imshow(self.v_title, v_scaled)
self.find_targets()
if waitKey(self.video_pause) == ord('q'):
exit(1)
def layout_result_windows(self, h, s, v):
if show_windows:
pos_x, pos_y = 500,500
# imshow(self.img_path, self.img)
h_scaled = resize(h, window_size)
s_scaled = resize(s, window_size)
v_scaled = resize(v, window_size)
combined_scaled = resize(self.combined, window_size)
img_scaled = resize(self.img, window_size)
imshow(self.h_title , h_scaled)
imshow(self.s_title , s_scaled)
imshow(self.v_title , v_scaled)
imshow(self.combined_title, combined_scaled)
imshow(self.targets_title , img_scaled)
#moveWindow(self.h_title, pos_x*1, pos_y*0);
#moveWindow(self.s_title, pos_x*0, pos_y*1);
#moveWindow(self.v_title, pos_x*1, pos_y*1);
#moveWindow(self.combined_title, pos_x*2, pos_y*0);
#moveWindow(self.targets_title, pos_x*2, pos_y*1);
#these seem to be placed alphabetically....
# createTrackbar( "Hue High Threshold:", self.source_title, self.hue_high_thresh, self.max_thresh, self.update_hue_high_threshold);
# createTrackbar( "Hue Low Threshold:", self.source_title, self.hue_low_thresh, self.max_thresh, self.update_hue_low_threshold);
# createTrackbar( "Sat High Threshold:", self.source_title, self.sat_high_thresh, self.max_thresh, self.update_sat_high_threshold);
# createTrackbar( "Sat Low Threshold:", self.source_title, self.sat_low_thresh, self.max_thresh, self.update_sat_low_threshold);
# createTrackbar( "Val High Threshold:", self.source_title, self.val_high_thresh, self.max_thresh, self.update_val_high_threshold);
# createTrackbar( "Val Low Threshold:", self.source_title, self.val_low_thresh, self.max_thresh, self.update_val_low_threshold);
def update_hue_threshold(self, thresh):
delta = 15
self.h_clipped = self.threshold_in_range(self.h, thresh-delta, thresh+delta)
imshow(self.h_title, self.h_clipped)
self.find_targets()
def update_sat_threshold(self, thresh):
| self.process() | conditional_block |
image_processor_2.0.py |
# degrees_camera_pitch = 21.0
# degrees_sighting_offset = -1.55
def __init__(self, img_path):
self.img_path = img_path
self.layout_result_windows(self.h,self.s,self.v)
self.vc = VideoCapture(0)
SmartDashboard.PutNumber(angle_to_robot_title, self.angle_to_robot)
SmartDashboard.PutNumber(camera_offset_position_title, self.camera_offset_position)
SmartDashboard.PutNumber(morph_close_iterations_title, self.morph_close_iterations)
SmartDashboard.PutNumber(angle_to_shooter_title, self.angle_to_shooter)
SmartDashboard.PutNumber(camera_color_intensity_title, self.camera_color_intensity)
SmartDashboard.PutNumber(camera_exposure_title, self.camera_exposure)
SmartDashboard.PutNumber(camera_saturation.title, self.saturation)
SmartDashboard.PutNumber(camera_contrast_title, self.contrast)
SmartDashboard.PutNumber(camera_color_hue_title, self.camera_color_hue)
SmartDashboard.PutNumber(camera_brihtness_title, self.camera_brightness)
def video_feed(self):
while True:
if self.img is not None:
self.process()
if self.img_path is None:
rval, self.img = self.vc.read() #might set to None
else:
self.img = imread(self.img_path)
def process(self):
if enable_dashboard:
self.camera_saturation = int(SmartDashboard.GetNumber(camera_saturation_title)
self.angle_to_robot = int(SmartDashboard.GetNumber(angle_to_robot_title)
self.camera_offset_postion = int(SmartDashboard.GetNumber(camera_offset_position_title)
self.morph_close_iterations = int(SmartDashboard.GetNumber(morph_close_iterations_title)
self.angle_to_shooter = int(SmartDashboard.GetNumber(angle_to_shooter_title)
self.camera_color_intensity = int(SmartDashboard.GetNumber(camera_color_intensity_title)
self.camera_contrast = int(SmartDashboard.GetNumber(camera_contrast_title)
self.camera_color_hue = int(SmartDashboard.GetNumber(camera_color_hue_title)
self.camera_brightness = int(SmartDashboard.GetNumber(camera_brightness_title)
self.camera_exposure = int(SmartDashboard.GetNumber(camera_exposure_title)
self.camera_gain = int(SmartDashboard.GetNumber(camera_gain_title)
if self.img_path is None:
commands.getoutput(" yavta --set-control '0x009a0901 1' /dev/video0")
#print(commands.getoutput(" yavta --get-control '0x009a0901' /dev/video0") )
commands.getoutput("yavta --set-control '0x009a0902 %s' /dev/video0" % self.camera_exposure)
#print(commands.getoutput(" yavta --get-control '0x009a0902' /dev/video0"))
drawing = np.zeros(self.img.shape, dtype=np.uint8)
self.hsv = cvtColor(self.img, cv.CV_BGR2HSV)
self.h, self.s, self.v = split(self.hsv)
self.h_clipped = self.threshold_in_range(self.h, self.hue_thresh-self.hue_delta, self.hue_thresh+self.hue_delta)
self.s_clipped = self.threshold_in_range(self.s, self.sat_thresh-self.sat_delta, self.sat_thresh+self.sat_delta)
self.v_clipped = self.threshold_in_range(self.v, self.val_thresh-self.val_delta, self.val_thresh+self.val_delta)
if show_windows:
h_scaled = resize(self.h_clipped, window_size)
s_scaled = resize(self.s_clipped, window_size)
v_scaled = resize(self.v_clipped, window_size)
imshow(self.h_title, h_scaled)
imshow(self.s_title, s_scaled)
imshow(self.v_title, v_scaled)
self.find_targets()
if waitKey(self.video_pause) == ord('q'):
exit(1)
def layout_result_windows(self, h, s, v):
if show_windows:
pos_x, pos_y = 500,500
# imshow(self.img_path, self.img)
h_scaled = resize(h, window_size)
s_scaled = resize(s, window_size)
v_scaled = resize(v, window_size)
combined_scaled = resize(self.combined, window_size)
img_scaled = resize(self.img, window_size)
imshow(self.h_title , h_scaled)
imshow(self.s_title , s_scaled)
imshow(self.v_title , v_scaled)
imshow(self.combined_title, combined_scaled)
imshow(self.targets_title , img_scaled)
#moveWindow(self.h_title, pos_x*1, pos_y*0);
#moveWindow(self.s_title, pos_x*0, pos_y*1);
#moveWindow(self.v_title, pos_x*1, pos_y*1);
#moveWindow(self.combined_title, pos_x*2, pos_y*0);
#moveWindow(self.targets_title, pos_x*2, pos_y*1);
#these seem to be placed alphabetically....
# createTrackbar( "Hue High Threshold:", self.source_title, self.hue_high_thresh, self.max_thresh, self.update_hue_high_threshold);
# createTrackbar( "Hue Low Threshold:", self.source_title, self.hue_low_thresh, self.max_thresh, self.update_hue_low_threshold);
# createTrackbar( "Sat High Threshold:", self.source_title, self.sat_high_thresh, self.max_thresh, self.update_sat_high_threshold);
# createTrackbar( "Sat Low Threshold:", self.source_title, self.sat_low_thresh, self.max_thresh, self.update_sat_low_threshold);
# createTrackbar( "Val High Threshold:", self.source_title, self.val_high_thresh, self.max_thresh, self.update_val_high_threshold);
# createTrackbar( "Val Low Threshold:", self.source_title, self.val_low_thresh, self.max_thresh, self.update_val_low_threshold);
def update_hue_threshold(self, thresh):
delta = 15
self.h_clipped = self.threshold_in_range(self.h, thresh-delta, thresh+delta)
imshow(self.h_title, self.h_clipped)
self.find_targets()
def update_sat_threshold(self, thresh):
delta = 25
self.s_clipped = self.threshold_in_range(self.s, thresh-delta, thresh+delta)
imshow(self.s_title, self.s_clipped)
self.find_targets()
def update_val_threshold(self, thresh):
delta = 100
self.v_clipped = self.threshold_in_range(self.v, thresh-delta, thresh+delta)
imshow(self.v_title, self.v_clipped)
self.find_targets()
def threshold_in_range(self, img, low, high):
unused, above = threshold(img, low, self.max_thresh, THRESH_BINARY)
unused, below = threshold(img, high, self.max_thresh, THRESH_BINARY_INV)
return bitwise_and(above, below)
def find_targets(self):
#combine all the masks together to get their overlapping regions.
if True:
self.reset_targeting()
self.combined = bitwise_and(self.h_clipped, bitwise_and(self.s_clipped, self.v_clipped))
#comment above line and uncomment next line to ignore hue channel til we sort out red light hue matching around zero.
#self.combined = bitwise_and(self.s_clipped, self.v_clipped)
self.combined = morphologyEx(src=self.combined, op=MORPH_CLOSE, kernel=self.kernel, iterations=self.morph_close_iterations)
if show_windows:
combined_scaled = resize(self.combined, window_size)
imshow(self.combined_title, combined_scaled )
self.contoured = self.combined.copy()
contours, heirarchy = findContours(self.contoured, RETR_LIST, CHAIN_APPROX_TC89_KCOS)
#print("number of contours found = "+str(len(contours)))
#contours = [convexHull(c.astype(np.float32),clockwise=True,returnPoints=True) for c in contours]
#
polygon_tuples = self.contours_to_polygon_tuples(contours)
polygons = [self.unpack_polygon(t) for t in polygon_tuples]
for polygon_tuple in polygon_tuples:
self.mark_correct_shape_and_orientation(polygon_tuple)
if self.selected_target is not None:
self.draw_target(self.lowest_found_so_far_x, self.lowest_found_so_far, self.selected_target_color)
drawContours(self.drawing, contours, -1, self.selected_target_color, thickness=10)
# drawContours(self.drawing, [self.unpack_polygon(self.selected_target).astype(np.int32)], -1, self.selected_target_color, thickness=10)
self.aim()
if show_windows:
drawing_scaled = resize(self.drawing, window_size)
imshow(self.targets_title, drawing_scaled)
if enable_dashboard:
SmartDashboard.PutNumber("Potential Targets:", len(polygons))
print("Potential Targets:", len(polygons))
def aim(self):
if enable_dashboard:
self.robot_heading = SmartDashboard.GetNumber(robot_heading_title)
polygon, x, y, w, h = self.selected_target
self.target_bearing = self.get_bearing(x + w/2.0)
self.target_range = self.get_range(x, y, w, h) | random_line_split | ||
App.js | are good with the object
this.props.dispatch(markerSelect(filterKeplerObject));
this.setState({filterKeplerObject}, () => this.addEventListeners());
});
}
};
getConfigSheetSummaryData = selectedSheet => {
// get sheet information this.state.selectedSheet should be syncronized with settings
// can possibly remove the || in the sheetName part
const sheetName = selectedSheet;
const sheetObject = tableauExt.dashboardContent.dashboard.worksheets.find(
worksheet => worksheet.name === sheetName
);
if (!sheetObject) {
return;
}
// clean up event listeners (taken from tableau example)
this.removeEventListeners();
if (TableauSettings.ShouldUse) {
this.setState({
isLoading: true
});
} else {
this.setState({isLoading: true});
tableauExt.settings.set('isLoading', true);
tableauExt.settings.saveAsync().then(() => {
this.setState({
tableauSettings: tableauExt.settings.getAll()
});
});
}
//working here on pulling out summmary data
//may want to limit to a single row when getting column names
sheetObject.getSummaryDataAsync(options).then(t => {
log('in getData().getSummaryDataAsync', t, this.state);
const newDataState = dataTableToKepler(t);
if (TableauSettings.ShouldUse) {
this.setState({
...newDataState,
selectedSheet: sheetName,
isLoading: false,
isMissingData: false
});
} else {
log(
'%c getConfigSheetSummaryData TableauSettings.ShouldUse false',
'color: purple'
);
this.setState({isLoading: false});
tableauExt.settings.set('isLoading', false);
tableauExt.settings.saveAsync().then(() => {
this.setState({
...newDataState,
isLoading: false,
tableauSettings: tableauExt.settings.getAll()
});
});
}
this.addEventListeners();
});
};
clearSheet() {
log('triggered erase');
if (TableauSettings.ShouldUse) {
TableauSettings.eraseAndSave(['isLoading', 'configuration'], settings => {
this.setState({
tableauSettings: settings,
configuration: false,
isSplash: true
});
});
} else {
// erase all the settings, there has got be a better way.
tableauExt.settings.erase('isLoading');
tableauExt.settings.erase('configuration');
// save async the erased settings
// wip - should be able to get rid of state as this is all captured in tableu settings (written to state)
tableauExt.settings.saveAsync().then(() => {
this.setState({
tableauSettings: tableauExt.settings.getAll(),
configuration: false,
isSplash: true
});
});
}
}
clearSplash = () => {
this.setState({
isSplash: false
});
};
configure = () => {
this.clearSheet();
const popUpUrl = window.location.href + '#true';
const popUpOptions = {
height: 625,
width: 720
};
tableauExt.ui
.displayDialogAsync(popUpUrl, '', popUpOptions)
.then(closePayload => {
log('configuring', closePayload, tableauExt.settings.getAll());
if (closePayload === 'false') {
this.setState({
isSplash: false,
isConfig: false,
tableauSettings: tableauExt.settings.getAll()
});
}
})
.catch(error => {
// One expected error condition is when the popup is closed by the user (meaning the user
// clicks the 'X' in the top right of the dialog). This can be checked for like so:
switch (error.errorCode) {
case window.tableau.ErrorCodes.DialogClosedByUser:
log('closed by user');
break;
default:
console.error(error.message);
}
});
};
componentWillUnmount() {
window.removeEventListener('resize', this.resize, true);
}
resize = () => {
this.setState({
width: window.innerWidth,
height: window.innerHeight
});
};
componentDidMount() {
window.addEventListener('resize', this.resize, true);
this.resize();
tableauExt.initializeAsync({configure: this.configure}).then(() => {
// console.log('tableau config', configJson);
// default tableau settings on initial entry into the extension
// we know if we haven't done anything yet when tableauSettings state = []
log('did mount', tableauExt.settings.get('mapboxAPIKey'));
if (tableauExt.settings.get('mapboxAPIKey') === '') {
log(
'defaultSettings triggered',
defaultSettings.length,
defaultSettings
);
defaultSettings.defaultKeys.map((defaultSetting, index) => {
log(
'defaultSetting',
index,
defaultSetting,
defaultSettings.defaults[defaultSetting]
);
this.configCallBack(
defaultSetting,
defaultSettings.defaults[defaultSetting]
);
});
}
// this is where the majority of the code is going to go for this extension I think
log('will mount', tableauExt.settings.getAll());
//get sheetNames and dashboard name from workbook
const dashboardName = tableauExt.dashboardContent.dashboard.name;
const sheetNames = tableauExt.dashboardContent.dashboard.worksheets.map(
worksheet => worksheet.name
);
log('checking field in getAll()', tableauExt.settings.getAll());
// add event listeners (this includes an initial removal)
this.addEventListeners();
// Initialize the current saved settings global
TableauSettings.init();
// default to uber's Kepler key that they requested if user does not enter
this.setState({
tableauKey: MAPBOX_ACCESS_TOKEN,
isLoading: false,
height: window.innerHeight,
width: window.innerWidth,
sheetNames,
dashboardName,
demoType: tableauExt.settings.get('ConfigType') || 'violin',
tableauSettings: tableauExt.settings.getAll()
});
if (
this.state.tableauSettings.configuration &&
this.state.tableauSettings.configuration === 'true'
) {
this.setState({
isSplash: false,
isConfig: false
});
}
});
}
componentWillUpdate(nextProps, nextState) {
if (tableauExt.settings) {
// get selectedSheet from Settings
// hardcoding this for now because I know i have two possibilities
const selectedSheet = tableauExt.settings.get('ConfigSheet');
if (
selectedSheet &&
this.state.tableauSettings.ConfigSheet !==
nextState.tableauSettings.ConfigSheet
) {
this.getConfigSheetSummaryData(selectedSheet);
}
}
}
render() {
// short cut this cause we use it ALOT
const tableauSettingsState = this.state.tableauSettings;
// loading screen jsx
let isLoading = false;
if (
!this.state.isSplash &&
!this.state.isConfig &&
(this.state.isLoading || this.state.isMissingData)
) {
isLoading = true;
}
// config screen jsx
if (this.state.isConfig) {
const stepNames = ['Select Sheet', 'Customize Kepler.gl'];
if (this.state.stepIndex === 1) {
// Placeholder sheet names. TODO: Bind to worksheet data
return (
<React.Fragment>
<Stepper stepIndex={this.state.stepIndex} steps={stepNames} />
<PickSheets
sheetNames={this.state.sheetNames}
configCallBack={this.configCallBack}
field={'ConfigSheet'}
ConfigSheet={tableauSettingsState.ConfigSheet || ''}
/>
<StepButtons
onNextClick={this.onNextStep}
onPrevClick={this.onPrevStep}
stepIndex={this.state.stepIndex}
maxStepCount={stepNames.length}
nextText={
this.state.stepIndex !== stepNames.length ? 'Next' : 'Save'
}
backText="Back"
/>
</React.Fragment>
);
}
if (this.state.stepIndex === 2) {
return (
<React.Fragment>
<Stepper stepIndex={this.state.stepIndex} steps={stepNames} />
<CustomizeViolin
handleChange={this.handleChange}
customCallBack={this.customCallBack}
field={'configuration'}
tableauSettings={tableauSettingsState}
configSheetColumns={this.state.ConfigSheetStringColumns || []}
/>
<StepButtons
onNextClick={this.onNextStep}
onPrevClick={this.onPrevStep}
stepIndex={this.state.stepIndex}
maxStepCount={stepNames.length}
nextText={
this.state.stepIndex !== stepNames.length ? 'Next' : 'Save'
}
backText="Back"
/>
</React.Fragment>
);
}
}
// splash screen jsx
if (this.state.isSplash) | {
log(`%c this.state.isSplash=true}`, 'color: purple');
return (
<div className="splashScreen" style={{padding: 5}}>
<SplashScreen
configure={this.configure}
title="Kepler.gl within Tableau"
desc="Leverage the brilliance of Kepler.gl functionality, directly within Tableau!"
ctaText="Configure"
poweredBy={
<React.Fragment>
<p className="info">
For information on how to use this extension check out the{' '}
<a
href="https://github.com/uber/kepler.gl-tableau/tree/feat/docs/docs"
target="_blank"
rel="noopener noreferrer"
>
user guide | conditional_block | |
App.js | .updateAndSave',
'background: red; color: white'
);
TableauSettings.updateAndSave(kv, settings => {
this.setState({
tableauSettings: settings
});
});
} else {
tableauExt.settings.set(event.target.name, event.target.value);
tableauExt.settings.saveAsync().then(() => {
this.setState({
tableauSettings: tableauExt.settings.getAll()
});
});
}
};
configCallBack = (field, columnName) => {
if (TableauSettings.ShouldUse) {
log(
'%c configCallBack=======TableauSettings.updateAndSave',
'background: red; color: white'
);
TableauSettings.updateAndSave(
{
// ['is' + field]: true,
[field]: columnName
},
settings => {
this.setState({
// ['is' + field]: true,
tableauSettings: settings
});
}
);
} else {
tableauExt.settings.set(field, columnName);
tableauExt.settings.saveAsync().then(() => {
this.setState({
tableauSettings: tableauExt.settings.getAll()
});
});
}
};
eraseCallBack = field => {
log('triggered erase', field);
if (TableauSettings.ShouldUse) {
log(
'%c eraseCallBack=======TableauSettings.eraseAndSave',
'background: red; color: white'
);
TableauSettings.eraseAndSave([field], settings => {
this.setState({
tableauSettings: settings
});
});
} else {
// erase all the settings, there has got be a better way.
tableauExt.settings.erase(field);
// save async the erased settings
// wip - should be able to get rid of state as this is all captured in tableu settings (written to state)
tableauExt.settings.saveAsync().then(() => {
this.setState({
tableauSettings: tableauExt.settings.getAll()
});
});
}
};
customCallBack = confSetting => {
log('in custom call back', confSetting);
if (TableauSettings.ShouldUse) {
log(
'%c customCallBack=======TableauSettings.updateAndSave',
'background: red; color: white'
);
TableauSettings.updateAndSave(
{
[confSetting]: true
},
settings => {
this.setState({
[confSetting]: true,
tableauSettings: settings
});
if (confSetting === 'configuration') {
tableauExt.ui.closeDialog('false');
}
}
);
} else {
tableauExt.settings.set(confSetting, true);
tableauExt.settings.saveAsync().then(() => {
this.setState({
[confSetting]: true,
tableauSettings: tableauExt.settings.getAll()
});
if (confSetting === 'configuration') {
tableauExt.ui.closeDialog('false');
}
});
}
};
// needs to be updated to handle if more than one data set is selected
// find all sheets in array and then call get summary, for now hardcoding
filterChanged = e => {
const selectedSheet = tableauExt.settings.get('ConfigSheet');
if (selectedSheet && selectedSheet === e.worksheet.name) {
log(
'%c ==============App filter has changed',
'background: red; color: white'
);
this.getConfigSheetSummaryData(selectedSheet);
}
};
marksSelected = e => {
if (this.state.tableauSettings.keplerFilterField) {
if (this.applyingMouseActions) {
return;
}
log(
'%c ==============App Marker selected',
'background: red; color: white'
);
// remove event listeners
this.removeEventListeners();
// get selected marks and pass to kepler via state object
e.getMarksAsync().then(marks => {
const {keplerFilterField} = this.state.tableauSettings;
// loop through marks table and adjust the class for opacity
const marksDataTable = marks.data[0];
const col_indexes = {};
const keplerFields = [];
// write column names to array
for (let k = 0; k < marksDataTable.columns.length; k++) {
col_indexes[marksDataTable.columns[k].fieldName] = k;
keplerFields.push(columnToKeplerField(marksDataTable.columns[k], k));
}
const keplerData = dataToKeplerRow(marksDataTable.data, keplerFields);
const filterKeplerObject = {
field: keplerFilterField,
values: keplerData.map(
childD => childD[col_indexes[keplerFilterField]]
)
};
// @shan you can remove this console once you are good with the object
this.props.dispatch(markerSelect(filterKeplerObject));
this.setState({filterKeplerObject}, () => this.addEventListeners());
});
}
};
getConfigSheetSummaryData = selectedSheet => {
// get sheet information this.state.selectedSheet should be syncronized with settings
// can possibly remove the || in the sheetName part
const sheetName = selectedSheet;
const sheetObject = tableauExt.dashboardContent.dashboard.worksheets.find(
worksheet => worksheet.name === sheetName
);
if (!sheetObject) {
return;
}
// clean up event listeners (taken from tableau example)
this.removeEventListeners();
if (TableauSettings.ShouldUse) {
this.setState({
isLoading: true
});
} else {
this.setState({isLoading: true});
tableauExt.settings.set('isLoading', true);
tableauExt.settings.saveAsync().then(() => {
this.setState({
tableauSettings: tableauExt.settings.getAll()
});
});
}
//working here on pulling out summmary data
//may want to limit to a single row when getting column names
sheetObject.getSummaryDataAsync(options).then(t => {
log('in getData().getSummaryDataAsync', t, this.state);
const newDataState = dataTableToKepler(t);
if (TableauSettings.ShouldUse) {
this.setState({
...newDataState,
selectedSheet: sheetName,
isLoading: false,
isMissingData: false
});
} else {
log(
'%c getConfigSheetSummaryData TableauSettings.ShouldUse false',
'color: purple'
);
this.setState({isLoading: false});
tableauExt.settings.set('isLoading', false);
tableauExt.settings.saveAsync().then(() => {
this.setState({
...newDataState,
isLoading: false,
tableauSettings: tableauExt.settings.getAll()
});
});
}
this.addEventListeners();
});
};
clearSheet() {
log('triggered erase');
if (TableauSettings.ShouldUse) {
TableauSettings.eraseAndSave(['isLoading', 'configuration'], settings => {
this.setState({
tableauSettings: settings,
configuration: false,
isSplash: true
});
});
} else {
// erase all the settings, there has got be a better way.
tableauExt.settings.erase('isLoading');
tableauExt.settings.erase('configuration');
// save async the erased settings
// wip - should be able to get rid of state as this is all captured in tableu settings (written to state)
tableauExt.settings.saveAsync().then(() => {
this.setState({
tableauSettings: tableauExt.settings.getAll(),
configuration: false,
isSplash: true
});
});
}
}
clearSplash = () => {
this.setState({
isSplash: false
});
};
configure = () => {
this.clearSheet();
const popUpUrl = window.location.href + '#true';
const popUpOptions = {
height: 625,
width: 720
};
tableauExt.ui
.displayDialogAsync(popUpUrl, '', popUpOptions)
.then(closePayload => {
log('configuring', closePayload, tableauExt.settings.getAll());
if (closePayload === 'false') {
this.setState({
isSplash: false,
isConfig: false,
tableauSettings: tableauExt.settings.getAll()
});
}
})
.catch(error => {
// One expected error condition is when the popup is closed by the user (meaning the user
// clicks the 'X' in the top right of the dialog). This can be checked for like so:
switch (error.errorCode) {
case window.tableau.ErrorCodes.DialogClosedByUser:
log('closed by user');
break;
default:
console.error(error.message);
}
});
};
componentWillUnmount() {
window.removeEventListener('resize', this.resize, true);
}
resize = () => {
this.setState({
width: window.innerWidth,
height: window.innerHeight
});
};
componentDidMount() | {
window.addEventListener('resize', this.resize, true);
this.resize();
tableauExt.initializeAsync({configure: this.configure}).then(() => {
// console.log('tableau config', configJson);
// default tableau settings on initial entry into the extension
// we know if we haven't done anything yet when tableauSettings state = []
log('did mount', tableauExt.settings.get('mapboxAPIKey'));
if (tableauExt.settings.get('mapboxAPIKey') === '') {
log(
'defaultSettings triggered',
defaultSettings.length,
defaultSettings
);
defaultSettings.defaultKeys.map((defaultSetting, index) => {
log(
'defaultSetting',
index,
defaultSetting, | identifier_body | |
App.js | .state.stepIndex === 2) {
this.customCallBack('configuration');
} else {
this.setState((previousState, currentProps) => {
return {stepIndex: previousState.stepIndex + 1};
});
}
};
onPrevStep = () => {
this.setState((previousState, currentProps) => {
return {stepIndex: previousState.stepIndex - 1};
});
};
clickCallBack = d => {
const {clickField, clickAction} = this.state.tableauSettings;
log(
'%c in on click callback',
'background: brown'
// d,
// findColumnIndexByFieldName(this.state, clickField),
// clickAction
);
this.applyMouseActionsToSheets(d, clickAction, clickField);
};
hoverCallBack = d => {
const {hoverField, hoverAction} = this.state.tableauSettings;
log(
'%c in on hover callback',
'background: OLIVE'
// d,
// findColumnIndexByFieldName(this.state, hoverField),
// hoverAction
);
this.applyMouseActionsToSheets(d, hoverAction, hoverField);
// go through each worksheet and select marks
};
applyMouseActionsToSheets = (d, action, fieldName) => {
if (this.applyingMouseActions) {
return;
}
const {ConfigSheet} = this.state.tableauSettings;
const toHighlight =
action === 'Highlight' && (fieldName || 'None') !== 'None';
const toFilter = action === 'Filter' && (fieldName || 'None') !== 'None';
// if no action should be taken
if (!toHighlight && !toFilter) {
return;
}
// remove EventListeners before apply any async actions
this.removeEventListeners();
this.applyingMouseActions = true;
let tasks = [];
if (d) {
// select marks or filter
const fieldIdx = findColumnIndexByFieldName(this.state, fieldName);
const fieldValues =
typeof d[0] === 'object'
? d.map(childD => childD[fieldIdx])
: [d[fieldIdx]];
const actionToApply = toHighlight
? selectMarksByField
: applyFilterByField;
tasks = actionToApply(fieldName, fieldValues, ConfigSheet);
} else {
// clear marks or filer
const actionToApply = toHighlight
? clearMarksByField
: clearFilterByField;
tasks = actionToApply(fieldName, ConfigSheet);
}
Promise.all(tasks).then(() => {
// all selection should be completed
// Add event listeners back
this.addEventListeners();
this.applyingMouseActions = false;
});
};
demoChange = event => {
this.setState({demoType: event.target.value});
log('in demo change', event.target.value, this.state.demoType);
};
handleChange = event => {
log('event', event);
if (TableauSettings.ShouldUse) {
// create a single k/v pair
const kv = {};
kv[event.target.name] = event.target.value;
// update the settings
log(
'%c handleChange=======TableauSettings.updateAndSave',
'background: red; color: white'
);
TableauSettings.updateAndSave(kv, settings => {
this.setState({
tableauSettings: settings
});
});
} else {
tableauExt.settings.set(event.target.name, event.target.value);
tableauExt.settings.saveAsync().then(() => {
this.setState({
tableauSettings: tableauExt.settings.getAll()
});
});
}
};
configCallBack = (field, columnName) => {
if (TableauSettings.ShouldUse) {
log(
'%c configCallBack=======TableauSettings.updateAndSave',
'background: red; color: white'
);
TableauSettings.updateAndSave(
{
// ['is' + field]: true,
[field]: columnName
},
settings => {
this.setState({
// ['is' + field]: true,
tableauSettings: settings
});
}
);
} else {
tableauExt.settings.set(field, columnName);
tableauExt.settings.saveAsync().then(() => {
this.setState({
tableauSettings: tableauExt.settings.getAll()
});
});
}
};
eraseCallBack = field => {
log('triggered erase', field);
if (TableauSettings.ShouldUse) {
log(
'%c eraseCallBack=======TableauSettings.eraseAndSave',
'background: red; color: white'
);
TableauSettings.eraseAndSave([field], settings => {
this.setState({
tableauSettings: settings
});
});
} else {
// erase all the settings, there has got be a better way.
tableauExt.settings.erase(field);
// save async the erased settings
// wip - should be able to get rid of state as this is all captured in tableu settings (written to state)
tableauExt.settings.saveAsync().then(() => {
this.setState({
tableauSettings: tableauExt.settings.getAll()
});
});
}
};
customCallBack = confSetting => {
log('in custom call back', confSetting);
if (TableauSettings.ShouldUse) {
log(
'%c customCallBack=======TableauSettings.updateAndSave',
'background: red; color: white'
);
TableauSettings.updateAndSave(
{
[confSetting]: true
},
settings => {
this.setState({
[confSetting]: true,
tableauSettings: settings
});
if (confSetting === 'configuration') {
tableauExt.ui.closeDialog('false');
}
}
);
} else {
tableauExt.settings.set(confSetting, true);
tableauExt.settings.saveAsync().then(() => {
this.setState({
[confSetting]: true,
tableauSettings: tableauExt.settings.getAll()
});
if (confSetting === 'configuration') {
tableauExt.ui.closeDialog('false');
}
});
}
};
// needs to be updated to handle if more than one data set is selected
// find all sheets in array and then call get summary, for now hardcoding
filterChanged = e => {
const selectedSheet = tableauExt.settings.get('ConfigSheet');
if (selectedSheet && selectedSheet === e.worksheet.name) {
log(
'%c ==============App filter has changed',
'background: red; color: white'
);
this.getConfigSheetSummaryData(selectedSheet);
}
};
marksSelected = e => {
if (this.state.tableauSettings.keplerFilterField) {
if (this.applyingMouseActions) {
return;
}
log(
'%c ==============App Marker selected',
'background: red; color: white'
);
// remove event listeners
this.removeEventListeners();
// get selected marks and pass to kepler via state object
e.getMarksAsync().then(marks => {
const {keplerFilterField} = this.state.tableauSettings;
// loop through marks table and adjust the class for opacity
const marksDataTable = marks.data[0];
const col_indexes = {};
const keplerFields = [];
// write column names to array
for (let k = 0; k < marksDataTable.columns.length; k++) {
col_indexes[marksDataTable.columns[k].fieldName] = k;
keplerFields.push(columnToKeplerField(marksDataTable.columns[k], k));
}
const keplerData = dataToKeplerRow(marksDataTable.data, keplerFields);
const filterKeplerObject = {
field: keplerFilterField,
values: keplerData.map(
childD => childD[col_indexes[keplerFilterField]]
)
};
// @shan you can remove this console once you are good with the object
this.props.dispatch(markerSelect(filterKeplerObject));
this.setState({filterKeplerObject}, () => this.addEventListeners());
});
}
};
getConfigSheetSummaryData = selectedSheet => {
// get sheet information this.state.selectedSheet should be syncronized with settings
// can possibly remove the || in the sheetName part
const sheetName = selectedSheet;
const sheetObject = tableauExt.dashboardContent.dashboard.worksheets.find(
worksheet => worksheet.name === sheetName
);
if (!sheetObject) {
return;
}
// clean up event listeners (taken from tableau example)
this.removeEventListeners();
if (TableauSettings.ShouldUse) {
this.setState({
isLoading: true
});
} else {
this.setState({isLoading: true});
tableauExt.settings.set('isLoading', true);
tableauExt.settings.saveAsync().then(() => {
this.setState({
tableauSettings: tableauExt.settings.getAll()
});
});
}
//working here on pulling out summmary data
//may want to limit to a single row when getting column names
sheetObject.getSummaryDataAsync(options).then(t => {
log('in getData().getSummaryDataAsync', t, this.state);
const newDataState = dataTableToKepler(t);
if (TableauSettings.ShouldUse) {
this.setState({
...newDataState,
selectedSheet: sheetName,
isLoading: false,
isMissingData: false
}); |
} else {
log(
'%c getConfigSheetSummaryData TableauSettings.ShouldUse false', | random_line_split | |
App.js | )
tableauExt.settings.saveAsync().then(() => {
this.setState({
tableauSettings: tableauExt.settings.getAll()
});
});
}
};
customCallBack = confSetting => {
log('in custom call back', confSetting);
if (TableauSettings.ShouldUse) {
log(
'%c customCallBack=======TableauSettings.updateAndSave',
'background: red; color: white'
);
TableauSettings.updateAndSave(
{
[confSetting]: true
},
settings => {
this.setState({
[confSetting]: true,
tableauSettings: settings
});
if (confSetting === 'configuration') {
tableauExt.ui.closeDialog('false');
}
}
);
} else {
tableauExt.settings.set(confSetting, true);
tableauExt.settings.saveAsync().then(() => {
this.setState({
[confSetting]: true,
tableauSettings: tableauExt.settings.getAll()
});
if (confSetting === 'configuration') {
tableauExt.ui.closeDialog('false');
}
});
}
};
// needs to be updated to handle if more than one data set is selected
// find all sheets in array and then call get summary, for now hardcoding
filterChanged = e => {
const selectedSheet = tableauExt.settings.get('ConfigSheet');
if (selectedSheet && selectedSheet === e.worksheet.name) {
log(
'%c ==============App filter has changed',
'background: red; color: white'
);
this.getConfigSheetSummaryData(selectedSheet);
}
};
marksSelected = e => {
if (this.state.tableauSettings.keplerFilterField) {
if (this.applyingMouseActions) {
return;
}
log(
'%c ==============App Marker selected',
'background: red; color: white'
);
// remove event listeners
this.removeEventListeners();
// get selected marks and pass to kepler via state object
e.getMarksAsync().then(marks => {
const {keplerFilterField} = this.state.tableauSettings;
// loop through marks table and adjust the class for opacity
const marksDataTable = marks.data[0];
const col_indexes = {};
const keplerFields = [];
// write column names to array
for (let k = 0; k < marksDataTable.columns.length; k++) {
col_indexes[marksDataTable.columns[k].fieldName] = k;
keplerFields.push(columnToKeplerField(marksDataTable.columns[k], k));
}
const keplerData = dataToKeplerRow(marksDataTable.data, keplerFields);
const filterKeplerObject = {
field: keplerFilterField,
values: keplerData.map(
childD => childD[col_indexes[keplerFilterField]]
)
};
// @shan you can remove this console once you are good with the object
this.props.dispatch(markerSelect(filterKeplerObject));
this.setState({filterKeplerObject}, () => this.addEventListeners());
});
}
};
getConfigSheetSummaryData = selectedSheet => {
// get sheet information this.state.selectedSheet should be syncronized with settings
// can possibly remove the || in the sheetName part
const sheetName = selectedSheet;
const sheetObject = tableauExt.dashboardContent.dashboard.worksheets.find(
worksheet => worksheet.name === sheetName
);
if (!sheetObject) {
return;
}
// clean up event listeners (taken from tableau example)
this.removeEventListeners();
if (TableauSettings.ShouldUse) {
this.setState({
isLoading: true
});
} else {
this.setState({isLoading: true});
tableauExt.settings.set('isLoading', true);
tableauExt.settings.saveAsync().then(() => {
this.setState({
tableauSettings: tableauExt.settings.getAll()
});
});
}
//working here on pulling out summmary data
//may want to limit to a single row when getting column names
sheetObject.getSummaryDataAsync(options).then(t => {
log('in getData().getSummaryDataAsync', t, this.state);
const newDataState = dataTableToKepler(t);
if (TableauSettings.ShouldUse) {
this.setState({
...newDataState,
selectedSheet: sheetName,
isLoading: false,
isMissingData: false
});
} else {
log(
'%c getConfigSheetSummaryData TableauSettings.ShouldUse false',
'color: purple'
);
this.setState({isLoading: false});
tableauExt.settings.set('isLoading', false);
tableauExt.settings.saveAsync().then(() => {
this.setState({
...newDataState,
isLoading: false,
tableauSettings: tableauExt.settings.getAll()
});
});
}
this.addEventListeners();
});
};
clearSheet() {
log('triggered erase');
if (TableauSettings.ShouldUse) {
TableauSettings.eraseAndSave(['isLoading', 'configuration'], settings => {
this.setState({
tableauSettings: settings,
configuration: false,
isSplash: true
});
});
} else {
// erase all the settings, there has got be a better way.
tableauExt.settings.erase('isLoading');
tableauExt.settings.erase('configuration');
// save async the erased settings
// wip - should be able to get rid of state as this is all captured in tableu settings (written to state)
tableauExt.settings.saveAsync().then(() => {
this.setState({
tableauSettings: tableauExt.settings.getAll(),
configuration: false,
isSplash: true
});
});
}
}
clearSplash = () => {
this.setState({
isSplash: false
});
};
configure = () => {
this.clearSheet();
const popUpUrl = window.location.href + '#true';
const popUpOptions = {
height: 625,
width: 720
};
tableauExt.ui
.displayDialogAsync(popUpUrl, '', popUpOptions)
.then(closePayload => {
log('configuring', closePayload, tableauExt.settings.getAll());
if (closePayload === 'false') {
this.setState({
isSplash: false,
isConfig: false,
tableauSettings: tableauExt.settings.getAll()
});
}
})
.catch(error => {
// One expected error condition is when the popup is closed by the user (meaning the user
// clicks the 'X' in the top right of the dialog). This can be checked for like so:
switch (error.errorCode) {
case window.tableau.ErrorCodes.DialogClosedByUser:
log('closed by user');
break;
default:
console.error(error.message);
}
});
};
componentWillUnmount() {
window.removeEventListener('resize', this.resize, true);
}
resize = () => {
this.setState({
width: window.innerWidth,
height: window.innerHeight
});
};
componentDidMount() {
window.addEventListener('resize', this.resize, true);
this.resize();
tableauExt.initializeAsync({configure: this.configure}).then(() => {
// console.log('tableau config', configJson);
// default tableau settings on initial entry into the extension
// we know if we haven't done anything yet when tableauSettings state = []
log('did mount', tableauExt.settings.get('mapboxAPIKey'));
if (tableauExt.settings.get('mapboxAPIKey') === '') {
log(
'defaultSettings triggered',
defaultSettings.length,
defaultSettings
);
defaultSettings.defaultKeys.map((defaultSetting, index) => {
log(
'defaultSetting',
index,
defaultSetting,
defaultSettings.defaults[defaultSetting]
);
this.configCallBack(
defaultSetting,
defaultSettings.defaults[defaultSetting]
);
});
}
// this is where the majority of the code is going to go for this extension I think
log('will mount', tableauExt.settings.getAll());
//get sheetNames and dashboard name from workbook
const dashboardName = tableauExt.dashboardContent.dashboard.name;
const sheetNames = tableauExt.dashboardContent.dashboard.worksheets.map(
worksheet => worksheet.name
);
log('checking field in getAll()', tableauExt.settings.getAll());
// add event listeners (this includes an initial removal)
this.addEventListeners();
// Initialize the current saved settings global
TableauSettings.init();
// default to uber's Kepler key that they requested if user does not enter
this.setState({
tableauKey: MAPBOX_ACCESS_TOKEN,
isLoading: false,
height: window.innerHeight,
width: window.innerWidth,
sheetNames,
dashboardName,
demoType: tableauExt.settings.get('ConfigType') || 'violin',
tableauSettings: tableauExt.settings.getAll()
});
if (
this.state.tableauSettings.configuration &&
this.state.tableauSettings.configuration === 'true'
) {
this.setState({
isSplash: false,
isConfig: false
});
}
});
}
componentWillUpdate(nextProps, nextState) {
if (tableauExt.settings) {
// get selectedSheet from Settings
// hardcoding this for now because I know i have two possibilities
const selectedSheet = tableauExt.settings.get('ConfigSheet');
if (
selectedSheet &&
this.state.tableauSettings.ConfigSheet !==
nextState.tableauSettings.ConfigSheet
) {
this.getConfigSheetSummaryData(selectedSheet);
}
}
}
| render | identifier_name | |
hubtype-service.js | (o, minLen) { if (!o) return; if (typeof o === "string") return _arrayLikeToArray(o, minLen); var n = Object.prototype.toString.call(o).slice(8, -1); if (n === "Object" && o.constructor) n = o.constructor.name; if (n === "Map" || n === "Set") return Array.from(o); if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen); }
function _arrayLikeToArray(arr, len) { if (len == null || len > arr.length) len = arr.length; for (var i = 0, arr2 = new Array(len); i < len; i++) { arr2[i] = arr[i]; } return arr2; }
function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); if (enumerableOnly) symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; }); keys.push.apply(keys, symbols); } return keys; }
function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) | return target; }
var _WEBCHAT_PUSHER_KEY_ = (0, _utils.getWebpackEnvVar)( // eslint-disable-next-line no-undef
typeof WEBCHAT_PUSHER_KEY !== 'undefined' && WEBCHAT_PUSHER_KEY, 'WEBCHAT_PUSHER_KEY', '434ca667c8e6cb3f641c');
var _HUBTYPE_API_URL_ = (0, _utils.getWebpackEnvVar)( // eslint-disable-next-line no-undef
typeof HUBTYPE_API_URL !== 'undefined' && HUBTYPE_API_URL, 'HUBTYPE_API_URL', 'https://api.hubtype.com');
var ACTIVITY_TIMEOUT = 20 * 1000; // https://pusher.com/docs/channels/using_channels/connection#activitytimeout-integer-
var PONG_TIMEOUT = 5 * 1000; // https://pusher.com/docs/channels/using_channels/connection#pongtimeout-integer-
var HubtypeService = /*#__PURE__*/function () {
function HubtypeService(_ref) {
var appId = _ref.appId,
user = _ref.user,
lastMessageId = _ref.lastMessageId,
lastMessageUpdateDate = _ref.lastMessageUpdateDate,
onEvent = _ref.onEvent,
unsentInputs = _ref.unsentInputs,
server = _ref.server;
(0, _classCallCheck2["default"])(this, HubtypeService);
this.appId = appId;
this.user = user || {};
this.lastMessageId = lastMessageId;
this.lastMessageUpdateDate = lastMessageUpdateDate;
this.onEvent = onEvent;
this.unsentInputs = unsentInputs;
this.server = server;
if (user.id && (lastMessageId || lastMessageUpdateDate)) {
this.init();
}
}
(0, _createClass2["default"])(HubtypeService, [{
key: "resolveServerConfig",
value: function resolveServerConfig() {
if (!this.server) {
return {
activityTimeout: ACTIVITY_TIMEOUT,
pongTimeout: PONG_TIMEOUT
};
}
return {
activityTimeout: this.server.activityTimeout || ACTIVITY_TIMEOUT,
pongTimeout: this.server.pongTimeout || PONG_TIMEOUT
};
}
}, {
key: "updateAuthHeaders",
value: function updateAuthHeaders() {
if (this.pusher) {
this.pusher.config.auth.headers = _objectSpread(_objectSpread({}, this.pusher.config.auth.headers), this.constructHeaders());
}
}
}, {
key: "init",
value: function init(user, lastMessageId, lastMessageUpdateDate) {
var _this = this;
if (user) this.user = user;
if (lastMessageId) this.lastMessageId = lastMessageId;
if (lastMessageUpdateDate) this.lastMessageUpdateDate = lastMessageUpdateDate;
if (this.pusher || !this.user.id || !this.appId) return null;
this.pusher = new _pusherJs["default"](_WEBCHAT_PUSHER_KEY_, _objectSpread({
cluster: 'eu',
authEndpoint: "".concat(_HUBTYPE_API_URL_, "/v1/provider_accounts/webhooks/webchat/").concat(this.appId, "/auth/"),
forceTLS: true,
auth: {
headers: this.constructHeaders()
}
}, this.resolveServerConfig()));
this.channel = this.pusher.subscribe(this.pusherChannel);
var connectionPromise = new Promise(function (resolve, reject) {
var cleanAndReject = function cleanAndReject(msg) {
// eslint-disable-next-line @typescript-eslint/no-use-before-define
clearTimeout(connectTimeout);
_this.destroyPusher();
reject(msg);
};
var connectTimeout = setTimeout(function () {
return cleanAndReject('Connection Timeout');
}, 10000);
_this.channel.bind('pusher:subscription_succeeded', function () {
// Once subscribed, we know that authentication has been done: https://pusher.com/docs/channels/server_api/authenticating-users
_this.onConnectionRegained();
clearTimeout(connectTimeout);
resolve();
});
_this.channel.bind('botonic_response', function (data) {
return _this.onPusherEvent(data);
});
_this.channel.bind('update_message_info', function (data) {
return _this.onPusherEvent(data);
});
_this.pusher.connection.bind('error', function (event) {
if (event.type == 'WebSocketError') _this.handleConnectionChange(false);else {
var errorMsg = event.error && event.error.data ? event.error.data.code || event.error.data.message : 'Connection error';
cleanAndReject("Pusher error (".concat(errorMsg, ")"));
}
});
});
this.pusher.connection.bind('state_change', function (states) {
if (states.current === 'connecting') _this.updateAuthHeaders();
if (states.current === 'connected') _this.handleConnectionChange(true);
if (states.current === 'unavailable') _this.handleConnectionChange(false);
});
return connectionPromise;
}
}, {
key: "constructHeaders",
value: function constructHeaders() {
var headers = {};
if (this.user && this.user.id) headers['X-BOTONIC-USER-ID'] = this.user.id;
if (this.lastMessageId) headers['X-BOTONIC-LAST-MESSAGE-ID'] = this.lastMessageId;
if (this.lastMessageUpdateDate) headers['X-BOTONIC-LAST-MESSAGE-UPDATE-DATE'] = this.lastMessageUpdateDate;
return headers;
}
}, {
key: "handleConnectionChange",
value: function handleConnectionChange(online) {
this.onPusherEvent({
action: 'connectionChange',
online: online
});
}
}, {
key: "onPusherEvent",
value: function onPusherEvent(event) {
if (this.onEvent && typeof this.onEvent === 'function') this.onEvent(event);
}
}, {
key: "pusherChannel",
get: function get() {
return "private-encrypted-".concat(this.appId, "-").concat(this.user.id);
}
}, {
key: "handleSentInput",
value: function handleSentInput(message) {
this.onEvent({
action: 'update_message_info',
message: {
id: message.id,
ack: 1
}
});
}
}, {
key: "handleUnsentInput",
value: function handleUnsentInput(message) {
this.onEvent({
action: 'update_message_info',
message: {
id: message.id,
ack: 0,
unsentInput: message
}
});
}
}, {
key: "postMessage",
value: function () {
var _postMessage = (0, _asyncToGenerator2["default"])( /*#__PURE__*/_regenerator["default"].mark(function _callee(user, message) {
return _regenerator["default"].wrap(function _callee$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
_context.prev = 0;
_context.next = 3;
return this.init(user);
case 3:
_context.next = 5;
return _axios["default"].post("".concat(_HUBTYPE_API_URL_, "/v1/provider_accounts/webhooks/webchat/").concat(this.appId, "/"), {
sender: this.user,
message: message
}, {
validateStatus: function validateStatus(status) {
return status === 2 | { var source = arguments[i] != null ? arguments[i] : {}; if (i % 2) { ownKeys(Object(source), true).forEach(function (key) { (0, _defineProperty2["default"])(target, key, source[key]); }); } else if (Object.getOwnPropertyDescriptors) { Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)); } else { ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } } | conditional_block |
hubtype-service.js | onEvent = _ref.onEvent,
unsentInputs = _ref.unsentInputs,
server = _ref.server;
(0, _classCallCheck2["default"])(this, HubtypeService);
this.appId = appId;
this.user = user || {};
this.lastMessageId = lastMessageId;
this.lastMessageUpdateDate = lastMessageUpdateDate;
this.onEvent = onEvent;
this.unsentInputs = unsentInputs;
this.server = server;
if (user.id && (lastMessageId || lastMessageUpdateDate)) {
this.init();
}
}
(0, _createClass2["default"])(HubtypeService, [{
key: "resolveServerConfig",
value: function resolveServerConfig() {
if (!this.server) {
return {
activityTimeout: ACTIVITY_TIMEOUT,
pongTimeout: PONG_TIMEOUT
};
}
return {
activityTimeout: this.server.activityTimeout || ACTIVITY_TIMEOUT,
pongTimeout: this.server.pongTimeout || PONG_TIMEOUT
};
}
}, {
key: "updateAuthHeaders",
value: function updateAuthHeaders() {
if (this.pusher) {
this.pusher.config.auth.headers = _objectSpread(_objectSpread({}, this.pusher.config.auth.headers), this.constructHeaders());
}
}
}, {
key: "init",
value: function init(user, lastMessageId, lastMessageUpdateDate) {
var _this = this;
if (user) this.user = user;
if (lastMessageId) this.lastMessageId = lastMessageId;
if (lastMessageUpdateDate) this.lastMessageUpdateDate = lastMessageUpdateDate;
if (this.pusher || !this.user.id || !this.appId) return null;
this.pusher = new _pusherJs["default"](_WEBCHAT_PUSHER_KEY_, _objectSpread({
cluster: 'eu',
authEndpoint: "".concat(_HUBTYPE_API_URL_, "/v1/provider_accounts/webhooks/webchat/").concat(this.appId, "/auth/"),
forceTLS: true,
auth: {
headers: this.constructHeaders()
}
}, this.resolveServerConfig()));
this.channel = this.pusher.subscribe(this.pusherChannel);
var connectionPromise = new Promise(function (resolve, reject) {
var cleanAndReject = function cleanAndReject(msg) {
// eslint-disable-next-line @typescript-eslint/no-use-before-define
clearTimeout(connectTimeout);
_this.destroyPusher();
reject(msg);
};
var connectTimeout = setTimeout(function () {
return cleanAndReject('Connection Timeout');
}, 10000);
_this.channel.bind('pusher:subscription_succeeded', function () {
// Once subscribed, we know that authentication has been done: https://pusher.com/docs/channels/server_api/authenticating-users
_this.onConnectionRegained();
clearTimeout(connectTimeout);
resolve();
});
_this.channel.bind('botonic_response', function (data) {
return _this.onPusherEvent(data);
});
_this.channel.bind('update_message_info', function (data) {
return _this.onPusherEvent(data);
});
_this.pusher.connection.bind('error', function (event) {
if (event.type == 'WebSocketError') _this.handleConnectionChange(false);else {
var errorMsg = event.error && event.error.data ? event.error.data.code || event.error.data.message : 'Connection error';
cleanAndReject("Pusher error (".concat(errorMsg, ")"));
}
});
});
this.pusher.connection.bind('state_change', function (states) {
if (states.current === 'connecting') _this.updateAuthHeaders();
if (states.current === 'connected') _this.handleConnectionChange(true);
if (states.current === 'unavailable') _this.handleConnectionChange(false);
});
return connectionPromise;
}
}, {
key: "constructHeaders",
value: function constructHeaders() {
var headers = {};
if (this.user && this.user.id) headers['X-BOTONIC-USER-ID'] = this.user.id;
if (this.lastMessageId) headers['X-BOTONIC-LAST-MESSAGE-ID'] = this.lastMessageId;
if (this.lastMessageUpdateDate) headers['X-BOTONIC-LAST-MESSAGE-UPDATE-DATE'] = this.lastMessageUpdateDate;
return headers;
}
}, {
key: "handleConnectionChange",
value: function handleConnectionChange(online) {
this.onPusherEvent({
action: 'connectionChange',
online: online
});
}
}, {
key: "onPusherEvent",
value: function onPusherEvent(event) {
if (this.onEvent && typeof this.onEvent === 'function') this.onEvent(event);
}
}, {
key: "pusherChannel",
get: function get() {
return "private-encrypted-".concat(this.appId, "-").concat(this.user.id);
}
}, {
key: "handleSentInput",
value: function handleSentInput(message) {
this.onEvent({
action: 'update_message_info',
message: {
id: message.id,
ack: 1
}
});
}
}, {
key: "handleUnsentInput",
value: function handleUnsentInput(message) {
this.onEvent({
action: 'update_message_info',
message: {
id: message.id,
ack: 0,
unsentInput: message
}
});
}
}, {
key: "postMessage",
value: function () {
var _postMessage = (0, _asyncToGenerator2["default"])( /*#__PURE__*/_regenerator["default"].mark(function _callee(user, message) {
return _regenerator["default"].wrap(function _callee$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
_context.prev = 0;
_context.next = 3;
return this.init(user);
case 3:
_context.next = 5;
return _axios["default"].post("".concat(_HUBTYPE_API_URL_, "/v1/provider_accounts/webhooks/webchat/").concat(this.appId, "/"), {
sender: this.user,
message: message
}, {
validateStatus: function validateStatus(status) {
return status === 200;
}
});
case 5:
this.handleSentInput(message);
_context.next = 11;
break;
case 8:
_context.prev = 8;
_context.t0 = _context["catch"](0);
this.handleUnsentInput(message);
case 11:
case "end":
return _context.stop();
}
}
}, _callee, this, [[0, 8]]);
}));
function postMessage(_x, _x2) {
return _postMessage.apply(this, arguments);
}
return postMessage;
}()
}, {
key: "destroyPusher",
value: function destroyPusher() {
if (!this.pusher) return;
this.pusher.disconnect();
this.pusher.unsubscribe(this.pusherChannel);
this.pusher.unbind_all();
this.pusher.channels = {};
this.pusher = null;
}
}, {
key: "onConnectionRegained",
value: function () {
var _onConnectionRegained = (0, _asyncToGenerator2["default"])( /*#__PURE__*/_regenerator["default"].mark(function _callee2() {
return _regenerator["default"].wrap(function _callee2$(_context2) {
while (1) {
switch (_context2.prev = _context2.next) {
case 0:
_context2.next = 2;
return this.resendUnsentInputs();
case 2:
case "end":
return _context2.stop();
}
}
}, _callee2, this);
}));
function onConnectionRegained() {
return _onConnectionRegained.apply(this, arguments);
}
return onConnectionRegained;
}()
}, {
key: "resendUnsentInputs",
value: function () {
var _resendUnsentInputs = (0, _asyncToGenerator2["default"])( /*#__PURE__*/_regenerator["default"].mark(function _callee3() {
var _iterator, _step, message;
return _regenerator["default"].wrap(function _callee3$(_context3) {
while (1) {
switch (_context3.prev = _context3.next) {
case 0:
_iterator = _createForOfIteratorHelper(this.unsentInputs());
_context3.prev = 1;
_iterator.s();
case 3:
if ((_step = _iterator.n()).done) {
_context3.next = 11;
break;
}
message = _step.value;
_context3.t0 = message.unsentInput;
if (!_context3.t0) {
_context3.next = 9;
break;
}
_context3.next = 9;
return this.postMessage(this.user, message.unsentInput);
case 9:
_context3.next = 3;
break;
case 11:
_context3.next = 16;
break;
| case 13: | random_line_split | |
hubtype-service.js | "])(HubtypeService, [{
key: "resolveServerConfig",
value: function resolveServerConfig() {
if (!this.server) {
return {
activityTimeout: ACTIVITY_TIMEOUT,
pongTimeout: PONG_TIMEOUT
};
}
return {
activityTimeout: this.server.activityTimeout || ACTIVITY_TIMEOUT,
pongTimeout: this.server.pongTimeout || PONG_TIMEOUT
};
}
}, {
key: "updateAuthHeaders",
value: function updateAuthHeaders() {
if (this.pusher) {
this.pusher.config.auth.headers = _objectSpread(_objectSpread({}, this.pusher.config.auth.headers), this.constructHeaders());
}
}
}, {
key: "init",
value: function init(user, lastMessageId, lastMessageUpdateDate) {
var _this = this;
if (user) this.user = user;
if (lastMessageId) this.lastMessageId = lastMessageId;
if (lastMessageUpdateDate) this.lastMessageUpdateDate = lastMessageUpdateDate;
if (this.pusher || !this.user.id || !this.appId) return null;
this.pusher = new _pusherJs["default"](_WEBCHAT_PUSHER_KEY_, _objectSpread({
cluster: 'eu',
authEndpoint: "".concat(_HUBTYPE_API_URL_, "/v1/provider_accounts/webhooks/webchat/").concat(this.appId, "/auth/"),
forceTLS: true,
auth: {
headers: this.constructHeaders()
}
}, this.resolveServerConfig()));
this.channel = this.pusher.subscribe(this.pusherChannel);
var connectionPromise = new Promise(function (resolve, reject) {
var cleanAndReject = function cleanAndReject(msg) {
// eslint-disable-next-line @typescript-eslint/no-use-before-define
clearTimeout(connectTimeout);
_this.destroyPusher();
reject(msg);
};
var connectTimeout = setTimeout(function () {
return cleanAndReject('Connection Timeout');
}, 10000);
_this.channel.bind('pusher:subscription_succeeded', function () {
// Once subscribed, we know that authentication has been done: https://pusher.com/docs/channels/server_api/authenticating-users
_this.onConnectionRegained();
clearTimeout(connectTimeout);
resolve();
});
_this.channel.bind('botonic_response', function (data) {
return _this.onPusherEvent(data);
});
_this.channel.bind('update_message_info', function (data) {
return _this.onPusherEvent(data);
});
_this.pusher.connection.bind('error', function (event) {
if (event.type == 'WebSocketError') _this.handleConnectionChange(false);else {
var errorMsg = event.error && event.error.data ? event.error.data.code || event.error.data.message : 'Connection error';
cleanAndReject("Pusher error (".concat(errorMsg, ")"));
}
});
});
this.pusher.connection.bind('state_change', function (states) {
if (states.current === 'connecting') _this.updateAuthHeaders();
if (states.current === 'connected') _this.handleConnectionChange(true);
if (states.current === 'unavailable') _this.handleConnectionChange(false);
});
return connectionPromise;
}
}, {
key: "constructHeaders",
value: function constructHeaders() {
var headers = {};
if (this.user && this.user.id) headers['X-BOTONIC-USER-ID'] = this.user.id;
if (this.lastMessageId) headers['X-BOTONIC-LAST-MESSAGE-ID'] = this.lastMessageId;
if (this.lastMessageUpdateDate) headers['X-BOTONIC-LAST-MESSAGE-UPDATE-DATE'] = this.lastMessageUpdateDate;
return headers;
}
}, {
key: "handleConnectionChange",
value: function handleConnectionChange(online) {
this.onPusherEvent({
action: 'connectionChange',
online: online
});
}
}, {
key: "onPusherEvent",
value: function onPusherEvent(event) {
if (this.onEvent && typeof this.onEvent === 'function') this.onEvent(event);
}
}, {
key: "pusherChannel",
get: function get() {
return "private-encrypted-".concat(this.appId, "-").concat(this.user.id);
}
}, {
key: "handleSentInput",
value: function handleSentInput(message) {
this.onEvent({
action: 'update_message_info',
message: {
id: message.id,
ack: 1
}
});
}
}, {
key: "handleUnsentInput",
value: function handleUnsentInput(message) {
this.onEvent({
action: 'update_message_info',
message: {
id: message.id,
ack: 0,
unsentInput: message
}
});
}
}, {
key: "postMessage",
value: function () {
var _postMessage = (0, _asyncToGenerator2["default"])( /*#__PURE__*/_regenerator["default"].mark(function _callee(user, message) {
return _regenerator["default"].wrap(function _callee$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
_context.prev = 0;
_context.next = 3;
return this.init(user);
case 3:
_context.next = 5;
return _axios["default"].post("".concat(_HUBTYPE_API_URL_, "/v1/provider_accounts/webhooks/webchat/").concat(this.appId, "/"), {
sender: this.user,
message: message
}, {
validateStatus: function validateStatus(status) {
return status === 200;
}
});
case 5:
this.handleSentInput(message);
_context.next = 11;
break;
case 8:
_context.prev = 8;
_context.t0 = _context["catch"](0);
this.handleUnsentInput(message);
case 11:
case "end":
return _context.stop();
}
}
}, _callee, this, [[0, 8]]);
}));
function postMessage(_x, _x2) {
return _postMessage.apply(this, arguments);
}
return postMessage;
}()
}, {
key: "destroyPusher",
value: function destroyPusher() {
if (!this.pusher) return;
this.pusher.disconnect();
this.pusher.unsubscribe(this.pusherChannel);
this.pusher.unbind_all();
this.pusher.channels = {};
this.pusher = null;
}
}, {
key: "onConnectionRegained",
value: function () {
var _onConnectionRegained = (0, _asyncToGenerator2["default"])( /*#__PURE__*/_regenerator["default"].mark(function _callee2() {
return _regenerator["default"].wrap(function _callee2$(_context2) {
while (1) {
switch (_context2.prev = _context2.next) {
case 0:
_context2.next = 2;
return this.resendUnsentInputs();
case 2:
case "end":
return _context2.stop();
}
}
}, _callee2, this);
}));
function onConnectionRegained() {
return _onConnectionRegained.apply(this, arguments);
}
return onConnectionRegained;
}()
}, {
key: "resendUnsentInputs",
value: function () {
var _resendUnsentInputs = (0, _asyncToGenerator2["default"])( /*#__PURE__*/_regenerator["default"].mark(function _callee3() {
var _iterator, _step, message;
return _regenerator["default"].wrap(function _callee3$(_context3) {
while (1) {
switch (_context3.prev = _context3.next) {
case 0:
_iterator = _createForOfIteratorHelper(this.unsentInputs());
_context3.prev = 1;
_iterator.s();
case 3:
if ((_step = _iterator.n()).done) {
_context3.next = 11;
break;
}
message = _step.value;
_context3.t0 = message.unsentInput;
if (!_context3.t0) {
_context3.next = 9;
break;
}
_context3.next = 9;
return this.postMessage(this.user, message.unsentInput);
case 9:
_context3.next = 3;
break;
case 11:
_context3.next = 16;
break;
case 13:
_context3.prev = 13;
_context3.t1 = _context3["catch"](1);
_iterator.e(_context3.t1);
case 16:
_context3.prev = 16;
_iterator.f();
return _context3.finish(16);
case 19:
case "end":
return _context3.stop();
}
}
}, _callee3, this, [[1, 13, 16, 19]]);
}));
function resendUnsentInputs() | {
return _resendUnsentInputs.apply(this, arguments);
} | identifier_body | |
hubtype-service.js | this.init();
}
}
(0, _createClass2["default"])(HubtypeService, [{
key: "resolveServerConfig",
value: function resolveServerConfig() {
if (!this.server) {
return {
activityTimeout: ACTIVITY_TIMEOUT,
pongTimeout: PONG_TIMEOUT
};
}
return {
activityTimeout: this.server.activityTimeout || ACTIVITY_TIMEOUT,
pongTimeout: this.server.pongTimeout || PONG_TIMEOUT
};
}
}, {
key: "updateAuthHeaders",
value: function updateAuthHeaders() {
if (this.pusher) {
this.pusher.config.auth.headers = _objectSpread(_objectSpread({}, this.pusher.config.auth.headers), this.constructHeaders());
}
}
}, {
key: "init",
value: function init(user, lastMessageId, lastMessageUpdateDate) {
var _this = this;
if (user) this.user = user;
if (lastMessageId) this.lastMessageId = lastMessageId;
if (lastMessageUpdateDate) this.lastMessageUpdateDate = lastMessageUpdateDate;
if (this.pusher || !this.user.id || !this.appId) return null;
this.pusher = new _pusherJs["default"](_WEBCHAT_PUSHER_KEY_, _objectSpread({
cluster: 'eu',
authEndpoint: "".concat(_HUBTYPE_API_URL_, "/v1/provider_accounts/webhooks/webchat/").concat(this.appId, "/auth/"),
forceTLS: true,
auth: {
headers: this.constructHeaders()
}
}, this.resolveServerConfig()));
this.channel = this.pusher.subscribe(this.pusherChannel);
var connectionPromise = new Promise(function (resolve, reject) {
var cleanAndReject = function cleanAndReject(msg) {
// eslint-disable-next-line @typescript-eslint/no-use-before-define
clearTimeout(connectTimeout);
_this.destroyPusher();
reject(msg);
};
var connectTimeout = setTimeout(function () {
return cleanAndReject('Connection Timeout');
}, 10000);
_this.channel.bind('pusher:subscription_succeeded', function () {
// Once subscribed, we know that authentication has been done: https://pusher.com/docs/channels/server_api/authenticating-users
_this.onConnectionRegained();
clearTimeout(connectTimeout);
resolve();
});
_this.channel.bind('botonic_response', function (data) {
return _this.onPusherEvent(data);
});
_this.channel.bind('update_message_info', function (data) {
return _this.onPusherEvent(data);
});
_this.pusher.connection.bind('error', function (event) {
if (event.type == 'WebSocketError') _this.handleConnectionChange(false);else {
var errorMsg = event.error && event.error.data ? event.error.data.code || event.error.data.message : 'Connection error';
cleanAndReject("Pusher error (".concat(errorMsg, ")"));
}
});
});
this.pusher.connection.bind('state_change', function (states) {
if (states.current === 'connecting') _this.updateAuthHeaders();
if (states.current === 'connected') _this.handleConnectionChange(true);
if (states.current === 'unavailable') _this.handleConnectionChange(false);
});
return connectionPromise;
}
}, {
key: "constructHeaders",
value: function constructHeaders() {
var headers = {};
if (this.user && this.user.id) headers['X-BOTONIC-USER-ID'] = this.user.id;
if (this.lastMessageId) headers['X-BOTONIC-LAST-MESSAGE-ID'] = this.lastMessageId;
if (this.lastMessageUpdateDate) headers['X-BOTONIC-LAST-MESSAGE-UPDATE-DATE'] = this.lastMessageUpdateDate;
return headers;
}
}, {
key: "handleConnectionChange",
value: function handleConnectionChange(online) {
this.onPusherEvent({
action: 'connectionChange',
online: online
});
}
}, {
key: "onPusherEvent",
value: function onPusherEvent(event) {
if (this.onEvent && typeof this.onEvent === 'function') this.onEvent(event);
}
}, {
key: "pusherChannel",
get: function get() {
return "private-encrypted-".concat(this.appId, "-").concat(this.user.id);
}
}, {
key: "handleSentInput",
value: function handleSentInput(message) {
this.onEvent({
action: 'update_message_info',
message: {
id: message.id,
ack: 1
}
});
}
}, {
key: "handleUnsentInput",
value: function handleUnsentInput(message) {
this.onEvent({
action: 'update_message_info',
message: {
id: message.id,
ack: 0,
unsentInput: message
}
});
}
}, {
key: "postMessage",
value: function () {
var _postMessage = (0, _asyncToGenerator2["default"])( /*#__PURE__*/_regenerator["default"].mark(function _callee(user, message) {
return _regenerator["default"].wrap(function _callee$(_context) {
while (1) {
switch (_context.prev = _context.next) {
case 0:
_context.prev = 0;
_context.next = 3;
return this.init(user);
case 3:
_context.next = 5;
return _axios["default"].post("".concat(_HUBTYPE_API_URL_, "/v1/provider_accounts/webhooks/webchat/").concat(this.appId, "/"), {
sender: this.user,
message: message
}, {
validateStatus: function validateStatus(status) {
return status === 200;
}
});
case 5:
this.handleSentInput(message);
_context.next = 11;
break;
case 8:
_context.prev = 8;
_context.t0 = _context["catch"](0);
this.handleUnsentInput(message);
case 11:
case "end":
return _context.stop();
}
}
}, _callee, this, [[0, 8]]);
}));
function postMessage(_x, _x2) {
return _postMessage.apply(this, arguments);
}
return postMessage;
}()
}, {
key: "destroyPusher",
value: function destroyPusher() {
if (!this.pusher) return;
this.pusher.disconnect();
this.pusher.unsubscribe(this.pusherChannel);
this.pusher.unbind_all();
this.pusher.channels = {};
this.pusher = null;
}
}, {
key: "onConnectionRegained",
value: function () {
var _onConnectionRegained = (0, _asyncToGenerator2["default"])( /*#__PURE__*/_regenerator["default"].mark(function _callee2() {
return _regenerator["default"].wrap(function _callee2$(_context2) {
while (1) {
switch (_context2.prev = _context2.next) {
case 0:
_context2.next = 2;
return this.resendUnsentInputs();
case 2:
case "end":
return _context2.stop();
}
}
}, _callee2, this);
}));
function onConnectionRegained() {
return _onConnectionRegained.apply(this, arguments);
}
return onConnectionRegained;
}()
}, {
key: "resendUnsentInputs",
value: function () {
var _resendUnsentInputs = (0, _asyncToGenerator2["default"])( /*#__PURE__*/_regenerator["default"].mark(function _callee3() {
var _iterator, _step, message;
return _regenerator["default"].wrap(function _callee3$(_context3) {
while (1) {
switch (_context3.prev = _context3.next) {
case 0:
_iterator = _createForOfIteratorHelper(this.unsentInputs());
_context3.prev = 1;
_iterator.s();
case 3:
if ((_step = _iterator.n()).done) {
_context3.next = 11;
break;
}
message = _step.value;
_context3.t0 = message.unsentInput;
if (!_context3.t0) {
_context3.next = 9;
break;
}
_context3.next = 9;
return this.postMessage(this.user, message.unsentInput);
case 9:
_context3.next = 3;
break;
case 11:
_context3.next = 16;
break;
case 13:
_context3.prev = 13;
_context3.t1 = _context3["catch"](1);
_iterator.e(_context3.t1);
case 16:
_context3.prev = 16;
_iterator.f();
return _context3.finish(16);
case 19:
case "end":
return _context3.stop();
}
}
}, _callee3, this, [[1, 13, 16, 19]]);
}));
function | resendUnsentInputs | identifier_name | |
main_test_xgb.py | = np.cos(adrien_data['Ang'].flatten()) # cosinus of angular direction
data['sin'] = np.sin(adrien_data['Ang'].flatten()) # sinus of angular direction
# Firing data
for i in xrange(adrien_data['Pos'].shape[1]): data['Pos'+'.'+str(i)] = adrien_data['Pos'][:,i].astype('float')
for i in xrange(adrien_data['ADn'].shape[1]): data['ADn'+'.'+str(i)] = adrien_data['ADn'][:,i].astype('float')
#RANDOM DATA
for i in xrange(5):
data['rand'+str(i)] = np.random.uniform(0, 2*np.pi, len(adrien_data['Ang']))
#######################################################################
# FONCTIONS DEFINITIONS
#######################################################################
def extract_tree_threshold(trees):
n = len(trees.get_dump())
thr = {}
for t in xrange(n):
gv = xgb.to_graphviz(trees, num_trees=t)
body = gv.body
for i in xrange(len(body)):
for l in body[i].split('"'):
if 'f' in l and '<' in l:
tmp = l.split("<")
if thr.has_key(tmp[0]):
thr[tmp[0]].append(float(tmp[1]))
else:
thr[tmp[0]] = [float(tmp[1])]
for k in thr.iterkeys():
thr[k] = np.sort(np.array(thr[k]))
return thr
def tuning_curve(x, f, nb_bins):
bins = np.linspace(x.min(), x.max()+1e-8, nb_bins+1)
index = np.digitize(x, bins).flatten()
tcurve = np.array([np.mean(f[index == i]) for i in xrange(1, nb_bins+1)])
x = bins[0:-1] + (bins[1]-bins[0])/2.
return (x, tcurve)
def test_features(features, targets, learners = ['glm_pyglmnet', 'nn', 'xgb_run', 'ens']):
X = data[features].values
Y = np.vstack(data[targets].values)
Models = {method:{'PR2':[],'Yt_hat':[]} for method in learners}
learners_ = list(learners)
# print learners_
for i in xrange(Y.shape[1]):
y = Y[:,i]
# TODO : make sure that 'ens' is the last learner
for method in learners_:
print('Running '+method+'...')
print 'targets ', targets[i]
Yt_hat, PR2 = fit_cv(X, y, algorithm = method, n_cv=8, verbose=1)
Models[method]['Yt_hat'].append(Yt_hat)
Models[method]['PR2'].append(PR2)
for m in Models.iterkeys():
Models[m]['Yt_hat'] = np.array(Models[m]['Yt_hat'])
Models[m]['PR2'] = np.array(Models[m]['PR2'])
return Models
#####################################################################
# COMBINATIONS DEFINITION
#####################################################################
combination = {
14: {
'features' : ['rand0', 'rand1', 'ang', 'rand2', 'rand3'],
# 'targets' : [i for i in list(data) if i.split(".")[0] in ['Pos', 'ADn']],
'targets' : ['ADn.0'],
},
}
#####################################################################
# LEARNING XGB
#####################################################################
bsts = {i:{} for i in combination.iterkeys()} # to keep the boosted tree
params = {'objective': "count:poisson", #for poisson output
'eval_metric': "logloss", #loglikelihood loss
'seed': 2925, #for reproducibility
'silent': 0,
'learning_rate': 0.1,
'min_child_weight': 2, 'n_estimators': 1,
'subsample': 0.6, 'max_depth': 5, 'gamma': 0.5,
'tree_method':'exact'}
num_round = 600
X = data[['rand0', 'rand1', 'ang', 'rand2', 'rand3']].values
Y = data['ADn.0']
dtrain = xgb.DMatrix(np.vstack(X), label = np.vstack(Y))
bst = xgb.train(params, dtrain, num_round)
a = extract_tree_threshold(bst)
sys.exit()
# methods = ['xgb_run']
# for k in np.sort(combination.keys()):
# features = combination[k]['features']
# targets = combination[k]['targets']
# results = test_features(features, targets, methods)
# sys.exit()
##################################################################### | # TUNING CURVE
#####################################################################
X = data['ang'].values
Yall = data[[i for i in list(data) if i.split(".")[0] in ['Pos', 'ADn']]].values
tuningc = {targets[i]:tuning_curve(X, Yall[:,i], nb_bins = 100) for i in xrange(Yall.shape[1])}
sys.exit()
#####################################################################
# EXTRACT TREE STRUCTURE
#####################################################################
thresholds = {}
for i in bsts.iterkeys():
thresholds[i] = {}
for j in bsts[i].iterkeys():
thresholds[i][j] = extract_tree_threshold(bsts[i][j])
#####################################################################
# plot 11 (2.1)
#####################################################################
order = ['Pos.8', 'Pos.9', 'Pos.10', 'ADn.9', 'ADn.10', 'ADn.11']
rcParams.update({ 'backend':'pdf',
'savefig.pad_inches':0.1,
'font.size':8 })
figure(figsize = (12,15))
for k, i in zip(order, xrange(1,7)):
subplot(3,2,i)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[11][k]['f0']]
plot(tuningc[k][0], tuningc[k][1])
title(k)
xlim(0, 2*np.pi)
xlabel('Angle (rad)')
ylabel('f')
simpleaxis(gca())
subplots_adjust(hspace = 0.3, wspace = 0.3)
savefig(combination[11]['figure'], dpi = 900, bbox_inches = 'tight')
#####################################################################
# plot 12 (2.2)
#####################################################################
figure(figsize = (12,14))
for k, i in zip(order, xrange(1,7)):
subplot(3,2,i)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[12][k]['f0']]
[axhline(j, alpha = 0.1, color = 'grey') for j in thresholds[12][k]['f1']]
plot(data['x'].values, data['y'].values, '-', alpha = 0.3)
xlabel('x')
ylabel('y')
title(k)
simpleaxis(gca())
subplots_adjust(hspace = 0.3, wspace = 0.3)
savefig(combination[12]['figure'], dpi = 900, bbox_inches = 'tight')
#####################################################################
# plot 13 (2.3)
#####################################################################
trans = {'f0':'Ang','f1':'x','f2':'y'}
figure(figsize = (12,17))
for k, i in zip(order, xrange(1,18,3)):
subplot(6,3,i)
count = np.array([len(thresholds[13][k][f]) for f in thresholds[13][k].keys()])
name = np.array([trans[f] for f in thresholds[13][k].keys()])
bar(left = np.arange(len(count)), height = count, tick_label = name, align = 'center', facecolor = 'grey')
ylabel('Number of split')
simpleaxis(gca())
subplot(6,3,i+1)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[13][k]['f0']]
plot(tuningc[k][0], tuningc[k][1])
title(k)
xlim(0, 2*np.pi)
xlabel('Angle (rad)')
ylabel('f')
simpleaxis(gca())
subplot(6,3,i+2)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[13][k]['f1']]
[axhline(j, alpha = 0.1, color = 'grey') for j in thresholds[13][k]['f2']]
plot(data['x'].values, data['y'].values, '-', alpha = 0.5)
xlabel('x')
ylabel('y')
title(k)
simpleaxis(gca())
subplots_adjust(hspace = 0.3, wspace = 0.3)
savefig(combination[13]['figure'], dpi = 900, bbox_inches = 'tight')
#####################################################################
# plot 14 (2.4)
################################################################ | random_line_split | |
main_test_xgb.py | = np.cos(adrien_data['Ang'].flatten()) # cosinus of angular direction
data['sin'] = np.sin(adrien_data['Ang'].flatten()) # sinus of angular direction
# Firing data
for i in xrange(adrien_data['Pos'].shape[1]): data['Pos'+'.'+str(i)] = adrien_data['Pos'][:,i].astype('float')
for i in xrange(adrien_data['ADn'].shape[1]): data['ADn'+'.'+str(i)] = adrien_data['ADn'][:,i].astype('float')
#RANDOM DATA
for i in xrange(5):
data['rand'+str(i)] = np.random.uniform(0, 2*np.pi, len(adrien_data['Ang']))
#######################################################################
# FONCTIONS DEFINITIONS
#######################################################################
def extract_tree_threshold(trees):
n = len(trees.get_dump())
thr = {}
for t in xrange(n):
gv = xgb.to_graphviz(trees, num_trees=t)
body = gv.body
for i in xrange(len(body)):
for l in body[i].split('"'):
if 'f' in l and '<' in l:
tmp = l.split("<")
if thr.has_key(tmp[0]):
thr[tmp[0]].append(float(tmp[1]))
else:
thr[tmp[0]] = [float(tmp[1])]
for k in thr.iterkeys():
thr[k] = np.sort(np.array(thr[k]))
return thr
def tuning_curve(x, f, nb_bins):
bins = np.linspace(x.min(), x.max()+1e-8, nb_bins+1)
index = np.digitize(x, bins).flatten()
tcurve = np.array([np.mean(f[index == i]) for i in xrange(1, nb_bins+1)])
x = bins[0:-1] + (bins[1]-bins[0])/2.
return (x, tcurve)
def | (features, targets, learners = ['glm_pyglmnet', 'nn', 'xgb_run', 'ens']):
X = data[features].values
Y = np.vstack(data[targets].values)
Models = {method:{'PR2':[],'Yt_hat':[]} for method in learners}
learners_ = list(learners)
# print learners_
for i in xrange(Y.shape[1]):
y = Y[:,i]
# TODO : make sure that 'ens' is the last learner
for method in learners_:
print('Running '+method+'...')
print 'targets ', targets[i]
Yt_hat, PR2 = fit_cv(X, y, algorithm = method, n_cv=8, verbose=1)
Models[method]['Yt_hat'].append(Yt_hat)
Models[method]['PR2'].append(PR2)
for m in Models.iterkeys():
Models[m]['Yt_hat'] = np.array(Models[m]['Yt_hat'])
Models[m]['PR2'] = np.array(Models[m]['PR2'])
return Models
#####################################################################
# COMBINATIONS DEFINITION
#####################################################################
combination = {
14: {
'features' : ['rand0', 'rand1', 'ang', 'rand2', 'rand3'],
# 'targets' : [i for i in list(data) if i.split(".")[0] in ['Pos', 'ADn']],
'targets' : ['ADn.0'],
},
}
#####################################################################
# LEARNING XGB
#####################################################################
bsts = {i:{} for i in combination.iterkeys()} # to keep the boosted tree
params = {'objective': "count:poisson", #for poisson output
'eval_metric': "logloss", #loglikelihood loss
'seed': 2925, #for reproducibility
'silent': 0,
'learning_rate': 0.1,
'min_child_weight': 2, 'n_estimators': 1,
'subsample': 0.6, 'max_depth': 5, 'gamma': 0.5,
'tree_method':'exact'}
num_round = 600
X = data[['rand0', 'rand1', 'ang', 'rand2', 'rand3']].values
Y = data['ADn.0']
dtrain = xgb.DMatrix(np.vstack(X), label = np.vstack(Y))
bst = xgb.train(params, dtrain, num_round)
a = extract_tree_threshold(bst)
sys.exit()
# methods = ['xgb_run']
# for k in np.sort(combination.keys()):
# features = combination[k]['features']
# targets = combination[k]['targets']
# results = test_features(features, targets, methods)
# sys.exit()
#####################################################################
# TUNING CURVE
#####################################################################
X = data['ang'].values
Yall = data[[i for i in list(data) if i.split(".")[0] in ['Pos', 'ADn']]].values
tuningc = {targets[i]:tuning_curve(X, Yall[:,i], nb_bins = 100) for i in xrange(Yall.shape[1])}
sys.exit()
#####################################################################
# EXTRACT TREE STRUCTURE
#####################################################################
thresholds = {}
for i in bsts.iterkeys():
thresholds[i] = {}
for j in bsts[i].iterkeys():
thresholds[i][j] = extract_tree_threshold(bsts[i][j])
#####################################################################
# plot 11 (2.1)
#####################################################################
order = ['Pos.8', 'Pos.9', 'Pos.10', 'ADn.9', 'ADn.10', 'ADn.11']
rcParams.update({ 'backend':'pdf',
'savefig.pad_inches':0.1,
'font.size':8 })
figure(figsize = (12,15))
for k, i in zip(order, xrange(1,7)):
subplot(3,2,i)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[11][k]['f0']]
plot(tuningc[k][0], tuningc[k][1])
title(k)
xlim(0, 2*np.pi)
xlabel('Angle (rad)')
ylabel('f')
simpleaxis(gca())
subplots_adjust(hspace = 0.3, wspace = 0.3)
savefig(combination[11]['figure'], dpi = 900, bbox_inches = 'tight')
#####################################################################
# plot 12 (2.2)
#####################################################################
figure(figsize = (12,14))
for k, i in zip(order, xrange(1,7)):
subplot(3,2,i)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[12][k]['f0']]
[axhline(j, alpha = 0.1, color = 'grey') for j in thresholds[12][k]['f1']]
plot(data['x'].values, data['y'].values, '-', alpha = 0.3)
xlabel('x')
ylabel('y')
title(k)
simpleaxis(gca())
subplots_adjust(hspace = 0.3, wspace = 0.3)
savefig(combination[12]['figure'], dpi = 900, bbox_inches = 'tight')
#####################################################################
# plot 13 (2.3)
#####################################################################
trans = {'f0':'Ang','f1':'x','f2':'y'}
figure(figsize = (12,17))
for k, i in zip(order, xrange(1,18,3)):
subplot(6,3,i)
count = np.array([len(thresholds[13][k][f]) for f in thresholds[13][k].keys()])
name = np.array([trans[f] for f in thresholds[13][k].keys()])
bar(left = np.arange(len(count)), height = count, tick_label = name, align = 'center', facecolor = 'grey')
ylabel('Number of split')
simpleaxis(gca())
subplot(6,3,i+1)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[13][k]['f0']]
plot(tuningc[k][0], tuningc[k][1])
title(k)
xlim(0, 2*np.pi)
xlabel('Angle (rad)')
ylabel('f')
simpleaxis(gca())
subplot(6,3,i+2)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[13][k]['f1']]
[axhline(j, alpha = 0.1, color = 'grey') for j in thresholds[13][k]['f2']]
plot(data['x'].values, data['y'].values, '-', alpha = 0.5)
xlabel('x')
ylabel('y')
title(k)
simpleaxis(gca())
subplots_adjust(hspace = 0.3, wspace = 0.3)
savefig(combination[13]['figure'], dpi = 900, bbox_inches = 'tight')
#####################################################################
# plot 14 (2.4)
| test_features | identifier_name |
main_test_xgb.py | = np.cos(adrien_data['Ang'].flatten()) # cosinus of angular direction
data['sin'] = np.sin(adrien_data['Ang'].flatten()) # sinus of angular direction
# Firing data
for i in xrange(adrien_data['Pos'].shape[1]): data['Pos'+'.'+str(i)] = adrien_data['Pos'][:,i].astype('float')
for i in xrange(adrien_data['ADn'].shape[1]): data['ADn'+'.'+str(i)] = adrien_data['ADn'][:,i].astype('float')
#RANDOM DATA
for i in xrange(5):
data['rand'+str(i)] = np.random.uniform(0, 2*np.pi, len(adrien_data['Ang']))
#######################################################################
# FONCTIONS DEFINITIONS
#######################################################################
def extract_tree_threshold(trees):
|
def tuning_curve(x, f, nb_bins):
bins = np.linspace(x.min(), x.max()+1e-8, nb_bins+1)
index = np.digitize(x, bins).flatten()
tcurve = np.array([np.mean(f[index == i]) for i in xrange(1, nb_bins+1)])
x = bins[0:-1] + (bins[1]-bins[0])/2.
return (x, tcurve)
def test_features(features, targets, learners = ['glm_pyglmnet', 'nn', 'xgb_run', 'ens']):
X = data[features].values
Y = np.vstack(data[targets].values)
Models = {method:{'PR2':[],'Yt_hat':[]} for method in learners}
learners_ = list(learners)
# print learners_
for i in xrange(Y.shape[1]):
y = Y[:,i]
# TODO : make sure that 'ens' is the last learner
for method in learners_:
print('Running '+method+'...')
print 'targets ', targets[i]
Yt_hat, PR2 = fit_cv(X, y, algorithm = method, n_cv=8, verbose=1)
Models[method]['Yt_hat'].append(Yt_hat)
Models[method]['PR2'].append(PR2)
for m in Models.iterkeys():
Models[m]['Yt_hat'] = np.array(Models[m]['Yt_hat'])
Models[m]['PR2'] = np.array(Models[m]['PR2'])
return Models
#####################################################################
# COMBINATIONS DEFINITION
#####################################################################
combination = {
14: {
'features' : ['rand0', 'rand1', 'ang', 'rand2', 'rand3'],
# 'targets' : [i for i in list(data) if i.split(".")[0] in ['Pos', 'ADn']],
'targets' : ['ADn.0'],
},
}
#####################################################################
# LEARNING XGB
#####################################################################
bsts = {i:{} for i in combination.iterkeys()} # to keep the boosted tree
params = {'objective': "count:poisson", #for poisson output
'eval_metric': "logloss", #loglikelihood loss
'seed': 2925, #for reproducibility
'silent': 0,
'learning_rate': 0.1,
'min_child_weight': 2, 'n_estimators': 1,
'subsample': 0.6, 'max_depth': 5, 'gamma': 0.5,
'tree_method':'exact'}
num_round = 600
X = data[['rand0', 'rand1', 'ang', 'rand2', 'rand3']].values
Y = data['ADn.0']
dtrain = xgb.DMatrix(np.vstack(X), label = np.vstack(Y))
bst = xgb.train(params, dtrain, num_round)
a = extract_tree_threshold(bst)
sys.exit()
# methods = ['xgb_run']
# for k in np.sort(combination.keys()):
# features = combination[k]['features']
# targets = combination[k]['targets']
# results = test_features(features, targets, methods)
# sys.exit()
#####################################################################
# TUNING CURVE
#####################################################################
X = data['ang'].values
Yall = data[[i for i in list(data) if i.split(".")[0] in ['Pos', 'ADn']]].values
tuningc = {targets[i]:tuning_curve(X, Yall[:,i], nb_bins = 100) for i in xrange(Yall.shape[1])}
sys.exit()
#####################################################################
# EXTRACT TREE STRUCTURE
#####################################################################
thresholds = {}
for i in bsts.iterkeys():
thresholds[i] = {}
for j in bsts[i].iterkeys():
thresholds[i][j] = extract_tree_threshold(bsts[i][j])
#####################################################################
# plot 11 (2.1)
#####################################################################
order = ['Pos.8', 'Pos.9', 'Pos.10', 'ADn.9', 'ADn.10', 'ADn.11']
rcParams.update({ 'backend':'pdf',
'savefig.pad_inches':0.1,
'font.size':8 })
figure(figsize = (12,15))
for k, i in zip(order, xrange(1,7)):
subplot(3,2,i)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[11][k]['f0']]
plot(tuningc[k][0], tuningc[k][1])
title(k)
xlim(0, 2*np.pi)
xlabel('Angle (rad)')
ylabel('f')
simpleaxis(gca())
subplots_adjust(hspace = 0.3, wspace = 0.3)
savefig(combination[11]['figure'], dpi = 900, bbox_inches = 'tight')
#####################################################################
# plot 12 (2.2)
#####################################################################
figure(figsize = (12,14))
for k, i in zip(order, xrange(1,7)):
subplot(3,2,i)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[12][k]['f0']]
[axhline(j, alpha = 0.1, color = 'grey') for j in thresholds[12][k]['f1']]
plot(data['x'].values, data['y'].values, '-', alpha = 0.3)
xlabel('x')
ylabel('y')
title(k)
simpleaxis(gca())
subplots_adjust(hspace = 0.3, wspace = 0.3)
savefig(combination[12]['figure'], dpi = 900, bbox_inches = 'tight')
#####################################################################
# plot 13 (2.3)
#####################################################################
trans = {'f0':'Ang','f1':'x','f2':'y'}
figure(figsize = (12,17))
for k, i in zip(order, xrange(1,18,3)):
subplot(6,3,i)
count = np.array([len(thresholds[13][k][f]) for f in thresholds[13][k].keys()])
name = np.array([trans[f] for f in thresholds[13][k].keys()])
bar(left = np.arange(len(count)), height = count, tick_label = name, align = 'center', facecolor = 'grey')
ylabel('Number of split')
simpleaxis(gca())
subplot(6,3,i+1)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[13][k]['f0']]
plot(tuningc[k][0], tuningc[k][1])
title(k)
xlim(0, 2*np.pi)
xlabel('Angle (rad)')
ylabel('f')
simpleaxis(gca())
subplot(6,3,i+2)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[13][k]['f1']]
[axhline(j, alpha = 0.1, color = 'grey') for j in thresholds[13][k]['f2']]
plot(data['x'].values, data['y'].values, '-', alpha = 0.5)
xlabel('x')
ylabel('y')
title(k)
simpleaxis(gca())
subplots_adjust(hspace = 0.3, wspace = 0.3)
savefig(combination[13]['figure'], dpi = 900, bbox_inches = 'tight')
#####################################################################
# plot 14 (2.4)
| n = len(trees.get_dump())
thr = {}
for t in xrange(n):
gv = xgb.to_graphviz(trees, num_trees=t)
body = gv.body
for i in xrange(len(body)):
for l in body[i].split('"'):
if 'f' in l and '<' in l:
tmp = l.split("<")
if thr.has_key(tmp[0]):
thr[tmp[0]].append(float(tmp[1]))
else:
thr[tmp[0]] = [float(tmp[1])]
for k in thr.iterkeys():
thr[k] = np.sort(np.array(thr[k]))
return thr | identifier_body |
main_test_xgb.py | '] = np.cos(adrien_data['Ang'].flatten()) # cosinus of angular direction
data['sin'] = np.sin(adrien_data['Ang'].flatten()) # sinus of angular direction
# Firing data
for i in xrange(adrien_data['Pos'].shape[1]): data['Pos'+'.'+str(i)] = adrien_data['Pos'][:,i].astype('float')
for i in xrange(adrien_data['ADn'].shape[1]): data['ADn'+'.'+str(i)] = adrien_data['ADn'][:,i].astype('float')
#RANDOM DATA
for i in xrange(5):
data['rand'+str(i)] = np.random.uniform(0, 2*np.pi, len(adrien_data['Ang']))
#######################################################################
# FONCTIONS DEFINITIONS
#######################################################################
def extract_tree_threshold(trees):
n = len(trees.get_dump())
thr = {}
for t in xrange(n):
gv = xgb.to_graphviz(trees, num_trees=t)
body = gv.body
for i in xrange(len(body)):
for l in body[i].split('"'):
if 'f' in l and '<' in l:
tmp = l.split("<")
if thr.has_key(tmp[0]):
thr[tmp[0]].append(float(tmp[1]))
else:
thr[tmp[0]] = [float(tmp[1])]
for k in thr.iterkeys():
thr[k] = np.sort(np.array(thr[k]))
return thr
def tuning_curve(x, f, nb_bins):
bins = np.linspace(x.min(), x.max()+1e-8, nb_bins+1)
index = np.digitize(x, bins).flatten()
tcurve = np.array([np.mean(f[index == i]) for i in xrange(1, nb_bins+1)])
x = bins[0:-1] + (bins[1]-bins[0])/2.
return (x, tcurve)
def test_features(features, targets, learners = ['glm_pyglmnet', 'nn', 'xgb_run', 'ens']):
X = data[features].values
Y = np.vstack(data[targets].values)
Models = {method:{'PR2':[],'Yt_hat':[]} for method in learners}
learners_ = list(learners)
# print learners_
for i in xrange(Y.shape[1]):
y = Y[:,i]
# TODO : make sure that 'ens' is the last learner
for method in learners_:
print('Running '+method+'...')
print 'targets ', targets[i]
Yt_hat, PR2 = fit_cv(X, y, algorithm = method, n_cv=8, verbose=1)
Models[method]['Yt_hat'].append(Yt_hat)
Models[method]['PR2'].append(PR2)
for m in Models.iterkeys():
Models[m]['Yt_hat'] = np.array(Models[m]['Yt_hat'])
Models[m]['PR2'] = np.array(Models[m]['PR2'])
return Models
#####################################################################
# COMBINATIONS DEFINITION
#####################################################################
combination = {
14: {
'features' : ['rand0', 'rand1', 'ang', 'rand2', 'rand3'],
# 'targets' : [i for i in list(data) if i.split(".")[0] in ['Pos', 'ADn']],
'targets' : ['ADn.0'],
},
}
#####################################################################
# LEARNING XGB
#####################################################################
bsts = {i:{} for i in combination.iterkeys()} # to keep the boosted tree
params = {'objective': "count:poisson", #for poisson output
'eval_metric': "logloss", #loglikelihood loss
'seed': 2925, #for reproducibility
'silent': 0,
'learning_rate': 0.1,
'min_child_weight': 2, 'n_estimators': 1,
'subsample': 0.6, 'max_depth': 5, 'gamma': 0.5,
'tree_method':'exact'}
num_round = 600
X = data[['rand0', 'rand1', 'ang', 'rand2', 'rand3']].values
Y = data['ADn.0']
dtrain = xgb.DMatrix(np.vstack(X), label = np.vstack(Y))
bst = xgb.train(params, dtrain, num_round)
a = extract_tree_threshold(bst)
sys.exit()
# methods = ['xgb_run']
# for k in np.sort(combination.keys()):
# features = combination[k]['features']
# targets = combination[k]['targets']
# results = test_features(features, targets, methods)
# sys.exit()
#####################################################################
# TUNING CURVE
#####################################################################
X = data['ang'].values
Yall = data[[i for i in list(data) if i.split(".")[0] in ['Pos', 'ADn']]].values
tuningc = {targets[i]:tuning_curve(X, Yall[:,i], nb_bins = 100) for i in xrange(Yall.shape[1])}
sys.exit()
#####################################################################
# EXTRACT TREE STRUCTURE
#####################################################################
thresholds = {}
for i in bsts.iterkeys():
thresholds[i] = {}
for j in bsts[i].iterkeys():
|
#####################################################################
# plot 11 (2.1)
#####################################################################
order = ['Pos.8', 'Pos.9', 'Pos.10', 'ADn.9', 'ADn.10', 'ADn.11']
rcParams.update({ 'backend':'pdf',
'savefig.pad_inches':0.1,
'font.size':8 })
figure(figsize = (12,15))
for k, i in zip(order, xrange(1,7)):
subplot(3,2,i)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[11][k]['f0']]
plot(tuningc[k][0], tuningc[k][1])
title(k)
xlim(0, 2*np.pi)
xlabel('Angle (rad)')
ylabel('f')
simpleaxis(gca())
subplots_adjust(hspace = 0.3, wspace = 0.3)
savefig(combination[11]['figure'], dpi = 900, bbox_inches = 'tight')
#####################################################################
# plot 12 (2.2)
#####################################################################
figure(figsize = (12,14))
for k, i in zip(order, xrange(1,7)):
subplot(3,2,i)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[12][k]['f0']]
[axhline(j, alpha = 0.1, color = 'grey') for j in thresholds[12][k]['f1']]
plot(data['x'].values, data['y'].values, '-', alpha = 0.3)
xlabel('x')
ylabel('y')
title(k)
simpleaxis(gca())
subplots_adjust(hspace = 0.3, wspace = 0.3)
savefig(combination[12]['figure'], dpi = 900, bbox_inches = 'tight')
#####################################################################
# plot 13 (2.3)
#####################################################################
trans = {'f0':'Ang','f1':'x','f2':'y'}
figure(figsize = (12,17))
for k, i in zip(order, xrange(1,18,3)):
subplot(6,3,i)
count = np.array([len(thresholds[13][k][f]) for f in thresholds[13][k].keys()])
name = np.array([trans[f] for f in thresholds[13][k].keys()])
bar(left = np.arange(len(count)), height = count, tick_label = name, align = 'center', facecolor = 'grey')
ylabel('Number of split')
simpleaxis(gca())
subplot(6,3,i+1)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[13][k]['f0']]
plot(tuningc[k][0], tuningc[k][1])
title(k)
xlim(0, 2*np.pi)
xlabel('Angle (rad)')
ylabel('f')
simpleaxis(gca())
subplot(6,3,i+2)
[axvline(j, alpha = 0.1, color = 'grey') for j in thresholds[13][k]['f1']]
[axhline(j, alpha = 0.1, color = 'grey') for j in thresholds[13][k]['f2']]
plot(data['x'].values, data['y'].values, '-', alpha = 0.5)
xlabel('x')
ylabel('y')
title(k)
simpleaxis(gca())
subplots_adjust(hspace = 0.3, wspace = 0.3)
savefig(combination[13]['figure'], dpi = 900, bbox_inches = 'tight')
#####################################################################
# plot 14 (2.4)
################################################################ | thresholds[i][j] = extract_tree_threshold(bsts[i][j]) | conditional_block |
tls.go | "cr", the Kubernetes Service "Service", and the CertConfig "config".
//
// GenerateCert creates and manages TLS key and cert and CA with the following:
// CA creation and management:
// - If CA is not given:
// - A unique CA is generated for the CR.
// - CA's key is packaged into a Secret as shown below.
// - CA's cert is packaged in a ConfigMap as shown below.
// - The CA Secret and ConfigMap are created on the k8s cluster in the CR's namespace before
// returned to the user. The CertGenerator manages the CA Secret and ConfigMap to ensure it's
// unqiue per CR.
// - If CA is given:
// - CA's key is packaged into a Secret as shown below.
// - CA's cert is packaged in a ConfigMap as shown below.
// - The CA Secret and ConfigMap are returned but not created in the K8s cluster in the CR's
// namespace. The CertGenerator doesn't manage the CA because the user controls the lifecycle
// of the CA.
//
// TLS Key and Cert Creation and Management:
// - A unique TLS cert and key pair is generated per CR + CertConfig.CertName.
// - The CA is used to generate and sign the TLS cert.
// - The signing process uses the passed in "service" to set the Subject Alternative Names(SAN)
// for the certificate. We assume that the deployed applications are typically communicated
// with via a Kubernetes Service. The SAN is set to the FQDN of the service
// `<service-name>.<service-namespace>.svc.cluster.local`.
// - Once TLS key and cert are created, they are packaged into a secret as shown below.
// - Finally, the secret are created on the k8s cluster in the CR's namespace before returned to
// the user. The CertGenerator manages this secret to ensure that it is unique per CR +
// CertConfig.CertName.
//
// TLS encryption key and cert Secret format:
// kind: Secret
// apiVersion: v1
// metadata:
// name: <cr-kind>-<cr-name>-<CertConfig.CertName>
// namespace: <cr-namespace>
// data:
// tls.crt: ...
// tls.key: ...
//
// CA Certificate ConfigMap format:
// kind: ConfigMap
// apiVersion: v1
// metadata:
// name: <cr-kind>-<cr-name>-ca
// namespace: <cr-namespace>
// data:
// ca.crt: ...
//
// CA Key Secret format:
// kind: Secret
// apiVersion: v1
// metadata:
// name: <cr-kind>-<cr-name>-ca
// namespace: <cr-namespace>
// data:
// ca.key: ..
GenerateCert(cr runtime.Object, service *v1.Service, config *CertConfig) (*v1.Secret, *v1.ConfigMap, *v1.Secret, error)
}
const (
// TLSPrivateCAKeyKey is the key for the private CA key field.
TLSPrivateCAKeyKey = "ca.key"
// TLSCertKey is the key for tls CA certificates.
TLSCACertKey = "ca.crt"
)
// NewSDKCertGenerator constructs a new CertGenerator given the kubeClient.
func NewSDKCertGenerator(kubeClient kubernetes.Interface) CertGenerator {
return &SDKCertGenerator{KubeClient: kubeClient}
}
type SDKCertGenerator struct {
KubeClient kubernetes.Interface
}
// GenerateCert returns a secret containing the TLS encryption key and cert,
// a ConfigMap containing the CA Certificate and a Secret containing the CA key or it
// returns a error incase something goes wrong.
func (scg *SDKCertGenerator) GenerateCert(cr runtime.Object, service *v1.Service, config *CertConfig) (*v1.Secret, *v1.ConfigMap, *v1.Secret, error) {
if err := verifyConfig(config); err != nil {
return nil, nil, nil, err
}
k, n, ns, err := toKindNameNamespace(cr)
if err != nil {
return nil, nil, nil, err
}
appSecretName := ToAppSecretName(k, n, config.CertName)
appSecret, err := getAppSecretInCluster(scg.KubeClient, appSecretName, ns)
if err != nil {
return nil, nil, nil, err
}
caSecretAndConfigMapName := ToCASecretAndConfigMapName(k, n)
var (
caSecret *v1.Secret
caConfigMap *v1.ConfigMap
)
caSecret, caConfigMap, err = getCASecretAndConfigMapInCluster(scg.KubeClient, caSecretAndConfigMapName, ns)
if err != nil {
return nil, nil, nil, err
}
if config.CAKey != "" && config.CACert != "" {
// custom CA provided by the user.
customCAKeyData, err := ioutil.ReadFile(config.CAKey)
if err != nil {
return nil, nil, nil, fmt.Errorf("error reading CA Key from the given file name: %v", err)
}
customCACertData, err := ioutil.ReadFile(config.CACert)
if err != nil {
return nil, nil, nil, fmt.Errorf("error reading CA Cert from the given file name: %v", err)
}
customCAKey, err := parsePEMEncodedPrivateKey(customCAKeyData)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing CA Key from the given file name: %v", err)
}
customCACert, err := parsePEMEncodedCert(customCACertData)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing CA Cert from the given file name: %v", err)
}
caSecret, caConfigMap = toCASecretAndConfigmap(customCAKey, customCACert, caSecretAndConfigMapName)
} else if config.CAKey != "" || config.CACert != "" {
// if only one of the custom CA Key or Cert is provided
return nil, nil, nil, ErrCAKeyAndCACertReq
}
hasAppSecret := appSecret != nil
hasCASecretAndConfigMap := caSecret != nil && caConfigMap != nil
switch {
case hasAppSecret && hasCASecretAndConfigMap:
return appSecret, caConfigMap, caSecret, nil
case hasAppSecret && !hasCASecretAndConfigMap:
return nil, nil, nil, ErrCANotFound
case !hasAppSecret && hasCASecretAndConfigMap:
// Note: if a custom CA is passed in my the user it takes preference over an already
// generated CA secret and CA configmap that might exist in the cluster
caKey, err := parsePEMEncodedPrivateKey(caSecret.Data[TLSPrivateCAKeyKey])
if err != nil {
return nil, nil, nil, err
}
caCert, err := parsePEMEncodedCert([]byte(caConfigMap.Data[TLSCACertKey]))
if err != nil {
return nil, nil, nil, err
}
key, err := newPrivateKey()
if err != nil {
return nil, nil, nil, err
}
cert, err := newSignedCertificate(config, service, key, caCert, caKey)
if err != nil {
return nil, nil, nil, err
}
appSecret, err := scg.KubeClient.CoreV1().Secrets(ns).Create(toTLSSecret(key, cert, appSecretName))
if err != nil {
return nil, nil, nil, err
}
return appSecret, caConfigMap, caSecret, nil
case !hasAppSecret && !hasCASecretAndConfigMap:
// If no custom CAKey and CACert are provided we have to generate them
caKey, err := newPrivateKey()
if err != nil {
return nil, nil, nil, err
}
caCert, err := newSelfSignedCACertificate(caKey)
if err != nil {
return nil, nil, nil, err
}
caSecret, caConfigMap := toCASecretAndConfigmap(caKey, caCert, caSecretAndConfigMapName)
caSecret, err = scg.KubeClient.CoreV1().Secrets(ns).Create(caSecret)
if err != nil {
return nil, nil, nil, err
}
caConfigMap, err = scg.KubeClient.CoreV1().ConfigMaps(ns).Create(caConfigMap)
if err != nil {
return nil, nil, nil, err
}
key, err := newPrivateKey()
if err != nil {
return nil, nil, nil, err
}
cert, err := newSignedCertificate(config, service, key, caCert, caKey)
if err != nil | {
return nil, nil, nil, err
} | conditional_block | |
tls.go | CA's cert is packaged in a ConfigMap as shown below.
// - The CA Secret and ConfigMap are created on the k8s cluster in the CR's namespace before
// returned to the user. The CertGenerator manages the CA Secret and ConfigMap to ensure it's
// unqiue per CR.
// - If CA is given:
// - CA's key is packaged into a Secret as shown below.
// - CA's cert is packaged in a ConfigMap as shown below.
// - The CA Secret and ConfigMap are returned but not created in the K8s cluster in the CR's
// namespace. The CertGenerator doesn't manage the CA because the user controls the lifecycle
// of the CA.
//
// TLS Key and Cert Creation and Management:
// - A unique TLS cert and key pair is generated per CR + CertConfig.CertName.
// - The CA is used to generate and sign the TLS cert.
// - The signing process uses the passed in "service" to set the Subject Alternative Names(SAN)
// for the certificate. We assume that the deployed applications are typically communicated
// with via a Kubernetes Service. The SAN is set to the FQDN of the service
// `<service-name>.<service-namespace>.svc.cluster.local`.
// - Once TLS key and cert are created, they are packaged into a secret as shown below.
// - Finally, the secret are created on the k8s cluster in the CR's namespace before returned to
// the user. The CertGenerator manages this secret to ensure that it is unique per CR +
// CertConfig.CertName.
//
// TLS encryption key and cert Secret format:
// kind: Secret
// apiVersion: v1
// metadata:
// name: <cr-kind>-<cr-name>-<CertConfig.CertName>
// namespace: <cr-namespace>
// data:
// tls.crt: ...
// tls.key: ...
//
// CA Certificate ConfigMap format:
// kind: ConfigMap
// apiVersion: v1
// metadata:
// name: <cr-kind>-<cr-name>-ca
// namespace: <cr-namespace>
// data:
// ca.crt: ...
//
// CA Key Secret format:
// kind: Secret
// apiVersion: v1
// metadata:
// name: <cr-kind>-<cr-name>-ca
// namespace: <cr-namespace>
// data:
// ca.key: ..
GenerateCert(cr runtime.Object, service *v1.Service, config *CertConfig) (*v1.Secret, *v1.ConfigMap, *v1.Secret, error)
}
const (
// TLSPrivateCAKeyKey is the key for the private CA key field.
TLSPrivateCAKeyKey = "ca.key"
// TLSCertKey is the key for tls CA certificates.
TLSCACertKey = "ca.crt"
)
// NewSDKCertGenerator constructs a new CertGenerator given the kubeClient.
func NewSDKCertGenerator(kubeClient kubernetes.Interface) CertGenerator {
return &SDKCertGenerator{KubeClient: kubeClient}
}
type SDKCertGenerator struct {
KubeClient kubernetes.Interface
}
// GenerateCert returns a secret containing the TLS encryption key and cert,
// a ConfigMap containing the CA Certificate and a Secret containing the CA key or it
// returns a error incase something goes wrong.
func (scg *SDKCertGenerator) GenerateCert(cr runtime.Object, service *v1.Service, config *CertConfig) (*v1.Secret, *v1.ConfigMap, *v1.Secret, error) {
if err := verifyConfig(config); err != nil {
return nil, nil, nil, err
}
k, n, ns, err := toKindNameNamespace(cr)
if err != nil {
return nil, nil, nil, err
}
appSecretName := ToAppSecretName(k, n, config.CertName)
appSecret, err := getAppSecretInCluster(scg.KubeClient, appSecretName, ns)
if err != nil {
return nil, nil, nil, err
}
caSecretAndConfigMapName := ToCASecretAndConfigMapName(k, n)
var (
caSecret *v1.Secret
caConfigMap *v1.ConfigMap
)
caSecret, caConfigMap, err = getCASecretAndConfigMapInCluster(scg.KubeClient, caSecretAndConfigMapName, ns)
if err != nil {
return nil, nil, nil, err
}
if config.CAKey != "" && config.CACert != "" {
// custom CA provided by the user.
customCAKeyData, err := ioutil.ReadFile(config.CAKey)
if err != nil {
return nil, nil, nil, fmt.Errorf("error reading CA Key from the given file name: %v", err)
}
customCACertData, err := ioutil.ReadFile(config.CACert)
if err != nil {
return nil, nil, nil, fmt.Errorf("error reading CA Cert from the given file name: %v", err)
}
customCAKey, err := parsePEMEncodedPrivateKey(customCAKeyData)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing CA Key from the given file name: %v", err)
}
customCACert, err := parsePEMEncodedCert(customCACertData)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing CA Cert from the given file name: %v", err)
}
caSecret, caConfigMap = toCASecretAndConfigmap(customCAKey, customCACert, caSecretAndConfigMapName)
} else if config.CAKey != "" || config.CACert != "" {
// if only one of the custom CA Key or Cert is provided
return nil, nil, nil, ErrCAKeyAndCACertReq
}
hasAppSecret := appSecret != nil
hasCASecretAndConfigMap := caSecret != nil && caConfigMap != nil
switch {
case hasAppSecret && hasCASecretAndConfigMap:
return appSecret, caConfigMap, caSecret, nil
case hasAppSecret && !hasCASecretAndConfigMap:
return nil, nil, nil, ErrCANotFound
case !hasAppSecret && hasCASecretAndConfigMap:
// Note: if a custom CA is passed in my the user it takes preference over an already
// generated CA secret and CA configmap that might exist in the cluster
caKey, err := parsePEMEncodedPrivateKey(caSecret.Data[TLSPrivateCAKeyKey])
if err != nil {
return nil, nil, nil, err
}
caCert, err := parsePEMEncodedCert([]byte(caConfigMap.Data[TLSCACertKey]))
if err != nil {
return nil, nil, nil, err
}
key, err := newPrivateKey()
if err != nil {
return nil, nil, nil, err
}
cert, err := newSignedCertificate(config, service, key, caCert, caKey)
if err != nil {
return nil, nil, nil, err
}
appSecret, err := scg.KubeClient.CoreV1().Secrets(ns).Create(toTLSSecret(key, cert, appSecretName))
if err != nil {
return nil, nil, nil, err
}
return appSecret, caConfigMap, caSecret, nil
case !hasAppSecret && !hasCASecretAndConfigMap:
// If no custom CAKey and CACert are provided we have to generate them
caKey, err := newPrivateKey()
if err != nil {
return nil, nil, nil, err
}
caCert, err := newSelfSignedCACertificate(caKey)
if err != nil {
return nil, nil, nil, err
}
caSecret, caConfigMap := toCASecretAndConfigmap(caKey, caCert, caSecretAndConfigMapName)
caSecret, err = scg.KubeClient.CoreV1().Secrets(ns).Create(caSecret)
if err != nil {
return nil, nil, nil, err
}
caConfigMap, err = scg.KubeClient.CoreV1().ConfigMaps(ns).Create(caConfigMap)
if err != nil {
return nil, nil, nil, err
}
key, err := newPrivateKey()
if err != nil {
return nil, nil, nil, err
}
cert, err := newSignedCertificate(config, service, key, caCert, caKey)
if err != nil {
return nil, nil, nil, err
}
appSecret, err := scg.KubeClient.CoreV1().Secrets(ns).Create(toTLSSecret(key, cert, appSecretName))
if err != nil {
return nil, nil, nil, err
}
return appSecret, caConfigMap, caSecret, nil
default:
return nil, nil, nil, ErrInternal
}
}
func | verifyConfig | identifier_name | |
tls.go |
import (
"crypto/rsa"
"crypto/x509"
"errors"
"fmt"
"io/ioutil"
"strings"
"k8s.io/api/core/v1"
apiErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
)
// CertType defines the type of the cert.
type CertType int
const (
// ClientAndServingCert defines both client and serving cert.
ClientAndServingCert CertType = iota
// ServingCert defines a serving cert.
ServingCert
// ClientCert defines a client cert.
ClientCert
)
// CertConfig configures how to generate the Cert.
type CertConfig struct {
// CertName is the name of the cert.
CertName string
// Optional CertType. Serving, client or both; defaults to both.
CertType CertType
// Optional CommonName is the common name of the cert; defaults to "".
CommonName string
// Optional Organization is Organization of the cert; defaults to "".
Organization []string
// Optional CA Key, if user wants to provide custom CA key via a file path.
CAKey string
// Optional CA Certificate, if user wants to provide custom CA cert via file path.
CACert string
// TODO: consider to add passed in SAN fields.
}
// CertGenerator is an operator specific TLS tool that generates TLS assets for the deploying a user's application.
type CertGenerator interface {
// GenerateCert generates a secret containing TLS encryption key and cert, a Secret
// containing the CA key, and a ConfigMap containing the CA Certificate given the Custom
// Resource(CR) "cr", the Kubernetes Service "Service", and the CertConfig "config".
//
// GenerateCert creates and manages TLS key and cert and CA with the following:
// CA creation and management:
// - If CA is not given:
// - A unique CA is generated for the CR.
// - CA's key is packaged into a Secret as shown below.
// - CA's cert is packaged in a ConfigMap as shown below.
// - The CA Secret and ConfigMap are created on the k8s cluster in the CR's namespace before
// returned to the user. The CertGenerator manages the CA Secret and ConfigMap to ensure it's
// unqiue per CR.
// - If CA is given:
// - CA's key is packaged into a Secret as shown below.
// - CA's cert is packaged in a ConfigMap as shown below.
// - The CA Secret and ConfigMap are returned but not created in the K8s cluster in the CR's
// namespace. The CertGenerator doesn't manage the CA because the user controls the lifecycle
// of the CA.
//
// TLS Key and Cert Creation and Management:
// - A unique TLS cert and key pair is generated per CR + CertConfig.CertName.
// - The CA is used to generate and sign the TLS cert.
// - The signing process uses the passed in "service" to set the Subject Alternative Names(SAN)
// for the certificate. We assume that the deployed applications are typically communicated
// with via a Kubernetes Service. The SAN is set to the FQDN of the service
// `<service-name>.<service-namespace>.svc.cluster.local`.
// - Once TLS key and cert are created, they are packaged into a secret as shown below.
// - Finally, the secret are created on the k8s cluster in the CR's namespace before returned to
// the user. The CertGenerator manages this secret to ensure that it is unique per CR +
// CertConfig.CertName.
//
// TLS encryption key and cert Secret format:
// kind: Secret
// apiVersion: v1
// metadata:
// name: <cr-kind>-<cr-name>-<CertConfig.CertName>
// namespace: <cr-namespace>
// data:
// tls.crt: ...
// tls.key: ...
//
// CA Certificate ConfigMap format:
// kind: ConfigMap
// apiVersion: v1
// metadata:
// name: <cr-kind>-<cr-name>-ca
// namespace: <cr-namespace>
// data:
// ca.crt: ...
//
// CA Key Secret format:
// kind: Secret
// apiVersion: v1
// metadata:
// name: <cr-kind>-<cr-name>-ca
// namespace: <cr-namespace>
// data:
// ca.key: ..
GenerateCert(cr runtime.Object, service *v1.Service, config *CertConfig) (*v1.Secret, *v1.ConfigMap, *v1.Secret, error)
}
const (
// TLSPrivateCAKeyKey is the key for the private CA key field.
TLSPrivateCAKeyKey = "ca.key"
// TLSCertKey is the key for tls CA certificates.
TLSCACertKey = "ca.crt"
)
// NewSDKCertGenerator constructs a new CertGenerator given the kubeClient.
func NewSDKCertGenerator(kubeClient kubernetes.Interface) CertGenerator {
return &SDKCertGenerator{KubeClient: kubeClient}
}
type SDKCertGenerator struct {
KubeClient kubernetes.Interface
}
// GenerateCert returns a secret containing the TLS encryption key and cert,
// a ConfigMap containing the CA Certificate and a Secret containing the CA key or it
// returns a error incase something goes wrong.
func (scg *SDKCertGenerator) GenerateCert(cr runtime.Object, service *v1.Service, config *CertConfig) (*v1.Secret, *v1.ConfigMap, *v1.Secret, error) {
if err := verifyConfig(config); err != nil {
return nil, nil, nil, err
}
k, n, ns, err := toKindNameNamespace(cr)
if err != nil {
return nil, nil, nil, err
}
appSecretName := ToAppSecretName(k, n, config.CertName)
appSecret, err := getAppSecretInCluster(scg.KubeClient, appSecretName, ns)
if err != nil {
return nil, nil, nil, err
}
caSecretAndConfigMapName := ToCASecretAndConfigMapName(k, n)
var (
caSecret *v1.Secret
caConfigMap *v1.ConfigMap
)
caSecret, caConfigMap, err = getCASecretAndConfigMapInCluster(scg.KubeClient, caSecretAndConfigMapName, ns)
if err != nil {
return nil, nil, nil, err
}
if config.CAKey != "" && config.CACert != "" {
// custom CA provided by the user.
customCAKeyData, err := ioutil.ReadFile(config.CAKey)
if err != nil {
return nil, nil, nil, fmt.Errorf("error reading CA Key from the given file name: %v", err)
}
customCACertData, err := ioutil.ReadFile(config.CACert)
if err != nil {
return nil, nil, nil, fmt.Errorf("error reading CA Cert from the given file name: %v", err)
}
customCAKey, err := parsePEMEncodedPrivateKey(customCAKeyData)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing CA Key from the given file name: %v", err)
}
customCACert, err := parsePEMEncodedCert(customCACertData)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing CA Cert from the given file name: %v", err)
}
caSecret, caConfigMap = toCASecretAndConfigmap(customCAKey, customCACert, caSecretAndConfigMapName)
} else if config.CAKey != "" || config.CACert != "" {
// if only one of the custom CA Key or Cert is provided
return nil, nil, nil, ErrCAKeyAndCACertReq
}
hasAppSecret := appSecret != nil
hasCASecretAndConfigMap := caSecret != nil && caConfigMap != nil
switch {
case hasAppSecret && hasCASecretAndConfigMap:
return appSecret, caConfigMap, caSecret, nil
case hasAppSecret && !hasCASecretAndConfigMap:
return nil, nil, nil, ErrCANotFound
case !hasAppSecret && hasCASecretAndConfigMap:
// Note: if a custom CA is passed in my the user it takes preference over an already
// generated CA secret and CA configmap that might exist in the cluster
caKey, err := parsePEMEncodedPrivateKey(caSecret.Data[TLSPrivateCAKeyKey])
if err != nil {
return nil, nil, nil, err
}
caCert, err := parsePEMEncodedCert([]byte(caConfigMap.Data[TLSCACertKey]))
if err != nil {
return nil, nil, nil | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tlsutil | random_line_split | |
tls.go | used to generate and sign the TLS cert.
// - The signing process uses the passed in "service" to set the Subject Alternative Names(SAN)
// for the certificate. We assume that the deployed applications are typically communicated
// with via a Kubernetes Service. The SAN is set to the FQDN of the service
// `<service-name>.<service-namespace>.svc.cluster.local`.
// - Once TLS key and cert are created, they are packaged into a secret as shown below.
// - Finally, the secret are created on the k8s cluster in the CR's namespace before returned to
// the user. The CertGenerator manages this secret to ensure that it is unique per CR +
// CertConfig.CertName.
//
// TLS encryption key and cert Secret format:
// kind: Secret
// apiVersion: v1
// metadata:
// name: <cr-kind>-<cr-name>-<CertConfig.CertName>
// namespace: <cr-namespace>
// data:
// tls.crt: ...
// tls.key: ...
//
// CA Certificate ConfigMap format:
// kind: ConfigMap
// apiVersion: v1
// metadata:
// name: <cr-kind>-<cr-name>-ca
// namespace: <cr-namespace>
// data:
// ca.crt: ...
//
// CA Key Secret format:
// kind: Secret
// apiVersion: v1
// metadata:
// name: <cr-kind>-<cr-name>-ca
// namespace: <cr-namespace>
// data:
// ca.key: ..
GenerateCert(cr runtime.Object, service *v1.Service, config *CertConfig) (*v1.Secret, *v1.ConfigMap, *v1.Secret, error)
}
const (
// TLSPrivateCAKeyKey is the key for the private CA key field.
TLSPrivateCAKeyKey = "ca.key"
// TLSCertKey is the key for tls CA certificates.
TLSCACertKey = "ca.crt"
)
// NewSDKCertGenerator constructs a new CertGenerator given the kubeClient.
func NewSDKCertGenerator(kubeClient kubernetes.Interface) CertGenerator {
return &SDKCertGenerator{KubeClient: kubeClient}
}
type SDKCertGenerator struct {
KubeClient kubernetes.Interface
}
// GenerateCert returns a secret containing the TLS encryption key and cert,
// a ConfigMap containing the CA Certificate and a Secret containing the CA key or it
// returns a error incase something goes wrong.
func (scg *SDKCertGenerator) GenerateCert(cr runtime.Object, service *v1.Service, config *CertConfig) (*v1.Secret, *v1.ConfigMap, *v1.Secret, error) {
if err := verifyConfig(config); err != nil {
return nil, nil, nil, err
}
k, n, ns, err := toKindNameNamespace(cr)
if err != nil {
return nil, nil, nil, err
}
appSecretName := ToAppSecretName(k, n, config.CertName)
appSecret, err := getAppSecretInCluster(scg.KubeClient, appSecretName, ns)
if err != nil {
return nil, nil, nil, err
}
caSecretAndConfigMapName := ToCASecretAndConfigMapName(k, n)
var (
caSecret *v1.Secret
caConfigMap *v1.ConfigMap
)
caSecret, caConfigMap, err = getCASecretAndConfigMapInCluster(scg.KubeClient, caSecretAndConfigMapName, ns)
if err != nil {
return nil, nil, nil, err
}
if config.CAKey != "" && config.CACert != "" {
// custom CA provided by the user.
customCAKeyData, err := ioutil.ReadFile(config.CAKey)
if err != nil {
return nil, nil, nil, fmt.Errorf("error reading CA Key from the given file name: %v", err)
}
customCACertData, err := ioutil.ReadFile(config.CACert)
if err != nil {
return nil, nil, nil, fmt.Errorf("error reading CA Cert from the given file name: %v", err)
}
customCAKey, err := parsePEMEncodedPrivateKey(customCAKeyData)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing CA Key from the given file name: %v", err)
}
customCACert, err := parsePEMEncodedCert(customCACertData)
if err != nil {
return nil, nil, nil, fmt.Errorf("error parsing CA Cert from the given file name: %v", err)
}
caSecret, caConfigMap = toCASecretAndConfigmap(customCAKey, customCACert, caSecretAndConfigMapName)
} else if config.CAKey != "" || config.CACert != "" {
// if only one of the custom CA Key or Cert is provided
return nil, nil, nil, ErrCAKeyAndCACertReq
}
hasAppSecret := appSecret != nil
hasCASecretAndConfigMap := caSecret != nil && caConfigMap != nil
switch {
case hasAppSecret && hasCASecretAndConfigMap:
return appSecret, caConfigMap, caSecret, nil
case hasAppSecret && !hasCASecretAndConfigMap:
return nil, nil, nil, ErrCANotFound
case !hasAppSecret && hasCASecretAndConfigMap:
// Note: if a custom CA is passed in my the user it takes preference over an already
// generated CA secret and CA configmap that might exist in the cluster
caKey, err := parsePEMEncodedPrivateKey(caSecret.Data[TLSPrivateCAKeyKey])
if err != nil {
return nil, nil, nil, err
}
caCert, err := parsePEMEncodedCert([]byte(caConfigMap.Data[TLSCACertKey]))
if err != nil {
return nil, nil, nil, err
}
key, err := newPrivateKey()
if err != nil {
return nil, nil, nil, err
}
cert, err := newSignedCertificate(config, service, key, caCert, caKey)
if err != nil {
return nil, nil, nil, err
}
appSecret, err := scg.KubeClient.CoreV1().Secrets(ns).Create(toTLSSecret(key, cert, appSecretName))
if err != nil {
return nil, nil, nil, err
}
return appSecret, caConfigMap, caSecret, nil
case !hasAppSecret && !hasCASecretAndConfigMap:
// If no custom CAKey and CACert are provided we have to generate them
caKey, err := newPrivateKey()
if err != nil {
return nil, nil, nil, err
}
caCert, err := newSelfSignedCACertificate(caKey)
if err != nil {
return nil, nil, nil, err
}
caSecret, caConfigMap := toCASecretAndConfigmap(caKey, caCert, caSecretAndConfigMapName)
caSecret, err = scg.KubeClient.CoreV1().Secrets(ns).Create(caSecret)
if err != nil {
return nil, nil, nil, err
}
caConfigMap, err = scg.KubeClient.CoreV1().ConfigMaps(ns).Create(caConfigMap)
if err != nil {
return nil, nil, nil, err
}
key, err := newPrivateKey()
if err != nil {
return nil, nil, nil, err
}
cert, err := newSignedCertificate(config, service, key, caCert, caKey)
if err != nil {
return nil, nil, nil, err
}
appSecret, err := scg.KubeClient.CoreV1().Secrets(ns).Create(toTLSSecret(key, cert, appSecretName))
if err != nil {
return nil, nil, nil, err
}
return appSecret, caConfigMap, caSecret, nil
default:
return nil, nil, nil, ErrInternal
}
}
func verifyConfig(config *CertConfig) error {
if config == nil {
return errors.New("nil CertConfig not allowed")
}
if config.CertName == "" {
return errors.New("empty CertConfig.CertName not allowed")
}
return nil
}
func ToAppSecretName(kind, name, certName string) string {
return strings.ToLower(kind) + "-" + name + "-" + certName
}
func ToCASecretAndConfigMapName(kind, name string) string {
return strings.ToLower(kind) + "-" + name + "-ca"
}
func getAppSecretInCluster(kubeClient kubernetes.Interface, name, namespace string) (*v1.Secret, error) | {
se, err := kubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
if err != nil && !apiErrors.IsNotFound(err) {
return nil, err
}
if apiErrors.IsNotFound(err) {
return nil, nil
}
return se, nil
} | identifier_body | |
universal.rs | Right
pub const VPRE: u8 = 22; // VOWEL_PRE / VOWEL_PRE_ABOVE / VOWEL_PRE_ABOVE_POST / VOWEL_PRE_POST
pub const VMABV: u8 = 37; // VOWEL_MOD_ABOVE
pub const VMBLW: u8 = 38; // VOWEL_MOD_BELOW
pub const VMPST: u8 = 39; // VOWEL_MOD_POST
pub const VMPRE: u8 = 23; // VOWEL_MOD_PRE
pub const SMABV: u8 = 41; // SYM_MOD_ABOVE
pub const SMBLW: u8 = 42; // SYM_MOD_BELOW
pub const FMABV: u8 = 45; // CONS_FINAL_MOD UIPC = Top
pub const FMBLW: u8 = 46; // CONS_FINAL_MOD UIPC = Bottom
pub const FMPST: u8 = 47; // CONS_FINAL_MOD UIPC = Not_Applicable
}
// These features are applied all at once, before reordering.
const BASIC_FEATURES: &[Tag] = &[
feature::RAKAR_FORMS,
feature::ABOVE_BASE_FORMS,
feature::BELOW_BASE_FORMS,
feature::HALF_FORMS,
feature::POST_BASE_FORMS,
feature::VATTU_VARIANTS,
feature::CONJUNCT_FORMS,
];
const TOPOGRAPHICAL_FEATURES: &[Tag] = &[
feature::ISOLATED_FORMS,
feature::INITIAL_FORMS,
feature::MEDIAL_FORMS_1,
feature::TERMINAL_FORMS_1,
];
// Same order as use_topographical_features.
#[derive(Clone, Copy, PartialEq)]
enum JoiningForm {
Isolated = 0,
Initial,
Medial,
Terminal,
}
// These features are applied all at once, after reordering and clearing syllables.
const OTHER_FEATURES: &[Tag] = &[
feature::ABOVE_BASE_SUBSTITUTIONS,
feature::BELOW_BASE_SUBSTITUTIONS,
feature::HALANT_FORMS,
feature::PRE_BASE_SUBSTITUTIONS,
feature::POST_BASE_SUBSTITUTIONS,
];
impl GlyphInfo {
pub(crate) fn use_category(&self) -> Category {
let v: &[u8; 4] = bytemuck::cast_ref(&self.var2);
v[2]
}
fn set_use_category(&mut self, c: Category) {
let v: &mut [u8; 4] = bytemuck::cast_mut(&mut self.var2);
v[2] = c;
}
fn is_halant_use(&self) -> bool {
matches!(self.use_category(), category::H | category::HVM) && !self.is_ligated()
}
}
struct UniversalShapePlan {
rphf_mask: Mask,
arabic_plan: Option<ArabicShapePlan>,
}
impl UniversalShapePlan {
fn new(plan: &ShapePlan) -> UniversalShapePlan {
let mut arabic_plan = None;
if plan.script.map_or(false, has_arabic_joining) {
arabic_plan = Some(super::arabic::ArabicShapePlan::new(plan));
}
UniversalShapePlan {
rphf_mask: plan.ot_map.one_mask(feature::REPH_FORMS),
arabic_plan,
}
}
}
fn collect_features(planner: &mut ShapePlanner) {
// Do this before any lookups have been applied.
planner.ot_map.add_gsub_pause(Some(setup_syllables));
// Default glyph pre-processing group
planner.ot_map.enable_feature(feature::LOCALIZED_FORMS, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::GLYPH_COMPOSITION_DECOMPOSITION, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::NUKTA_FORMS, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::AKHANDS, FeatureFlags::MANUAL_ZWJ, 1);
// Reordering group
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_substitution_flags));
planner.ot_map.add_feature(feature::REPH_FORMS, FeatureFlags::MANUAL_ZWJ, 1);
planner.ot_map.add_gsub_pause(Some(record_rphf));
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_substitution_flags));
planner.ot_map.enable_feature(feature::PRE_BASE_FORMS, FeatureFlags::MANUAL_ZWJ, 1);
planner.ot_map.add_gsub_pause(Some(record_pref));
// Orthographic unit shaping group
for feature in BASIC_FEATURES {
planner.ot_map.enable_feature(*feature, FeatureFlags::MANUAL_ZWJ, 1);
}
planner.ot_map.add_gsub_pause(Some(reorder));
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_syllables));
// Topographical features
for feature in TOPOGRAPHICAL_FEATURES {
planner.ot_map.add_feature(*feature, FeatureFlags::empty(), 1);
}
planner.ot_map.add_gsub_pause(None);
// Standard typographic presentation
for feature in OTHER_FEATURES {
planner.ot_map.enable_feature(*feature, FeatureFlags::empty(), 1);
}
}
fn setup_syllables(plan: &ShapePlan, _: &Face, buffer: &mut Buffer) {
super::universal_machine::find_syllables(buffer);
foreach_syllable!(buffer, start, end, {
buffer.unsafe_to_break(start, end);
});
setup_rphf_mask(plan, buffer);
setup_topographical_masks(plan, buffer);
}
fn setup_rphf_mask(plan: &ShapePlan, buffer: &mut Buffer) {
let universal_plan = plan.data::<UniversalShapePlan>();
let mask = universal_plan.rphf_mask;
if mask == 0 {
return;
}
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
let limit = if buffer.info[start].use_category() == category::R {
1
} else {
core::cmp::min(3, end - start)
};
for i in start..start+limit {
buffer.info[i].mask |= mask;
}
start = end;
end = buffer.next_syllable(start);
}
}
fn setup_topographical_masks(plan: &ShapePlan, buffer: &mut Buffer) {
use super::universal_machine::SyllableType;
let mut masks = [0; 4];
let mut all_masks = 0;
for i in 0..4 {
masks[i] = plan.ot_map.one_mask(TOPOGRAPHICAL_FEATURES[i]);
if masks[i] == plan.ot_map.global_mask() {
masks[i] = 0;
}
all_masks |= masks[i];
}
if all_masks == 0 {
return;
}
let other_masks = !all_masks;
let mut last_start = 0;
let mut last_form = None;
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
let syllable = buffer.info[start].syllable() & 0x0F;
if syllable == SyllableType::IndependentCluster as u8 ||
syllable == SyllableType::SymbolCluster as u8 ||
syllable == SyllableType::NonCluster as u8
{
last_form = None;
} else {
let join = last_form == Some(JoiningForm::Terminal) || last_form == Some(JoiningForm::Isolated);
if join {
// Fixup previous syllable's form.
let form = if last_form == Some(JoiningForm::Terminal) {
JoiningForm::Medial
} else {
JoiningForm::Initial
};
for i in last_start..start {
buffer.info[i].mask = (buffer.info[i].mask & other_masks) | masks[form as usize];
}
}
// Form for this syllable.
let form = if join { JoiningForm::Terminal } else { JoiningForm::Isolated };
last_form = Some(form);
for i in start..end {
buffer.info[i].mask = (buffer.info[i].mask & other_masks) | masks[form as usize];
}
}
last_start = start;
start = end;
end = buffer.next_syllable(start);
}
}
fn record_rphf(plan: &ShapePlan, _: &Face, buffer: &mut Buffer) | {
let universal_plan = plan.data::<UniversalShapePlan>();
let mask = universal_plan.rphf_mask;
if mask == 0 {
return;
}
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
// Mark a substituted repha as USE_R.
for i in start..end {
if buffer.info[i].mask & mask == 0 {
break;
}
if buffer.info[i].is_substituted() {
buffer.info[i].set_use_category(category::R);
break; | identifier_body | |
universal.rs | &[u8; 4] = bytemuck::cast_ref(&self.var2);
v[2]
}
fn set_use_category(&mut self, c: Category) {
let v: &mut [u8; 4] = bytemuck::cast_mut(&mut self.var2);
v[2] = c;
}
fn is_halant_use(&self) -> bool {
matches!(self.use_category(), category::H | category::HVM) && !self.is_ligated()
}
}
struct UniversalShapePlan {
rphf_mask: Mask,
arabic_plan: Option<ArabicShapePlan>,
}
impl UniversalShapePlan {
fn new(plan: &ShapePlan) -> UniversalShapePlan {
let mut arabic_plan = None;
if plan.script.map_or(false, has_arabic_joining) {
arabic_plan = Some(super::arabic::ArabicShapePlan::new(plan));
}
UniversalShapePlan {
rphf_mask: plan.ot_map.one_mask(feature::REPH_FORMS),
arabic_plan,
}
}
}
fn collect_features(planner: &mut ShapePlanner) {
// Do this before any lookups have been applied.
planner.ot_map.add_gsub_pause(Some(setup_syllables));
// Default glyph pre-processing group
planner.ot_map.enable_feature(feature::LOCALIZED_FORMS, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::GLYPH_COMPOSITION_DECOMPOSITION, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::NUKTA_FORMS, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::AKHANDS, FeatureFlags::MANUAL_ZWJ, 1);
// Reordering group
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_substitution_flags));
planner.ot_map.add_feature(feature::REPH_FORMS, FeatureFlags::MANUAL_ZWJ, 1);
planner.ot_map.add_gsub_pause(Some(record_rphf));
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_substitution_flags));
planner.ot_map.enable_feature(feature::PRE_BASE_FORMS, FeatureFlags::MANUAL_ZWJ, 1);
planner.ot_map.add_gsub_pause(Some(record_pref));
// Orthographic unit shaping group
for feature in BASIC_FEATURES {
planner.ot_map.enable_feature(*feature, FeatureFlags::MANUAL_ZWJ, 1);
}
planner.ot_map.add_gsub_pause(Some(reorder));
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_syllables));
// Topographical features
for feature in TOPOGRAPHICAL_FEATURES {
planner.ot_map.add_feature(*feature, FeatureFlags::empty(), 1);
}
planner.ot_map.add_gsub_pause(None);
// Standard typographic presentation
for feature in OTHER_FEATURES {
planner.ot_map.enable_feature(*feature, FeatureFlags::empty(), 1);
}
}
fn setup_syllables(plan: &ShapePlan, _: &Face, buffer: &mut Buffer) {
super::universal_machine::find_syllables(buffer);
foreach_syllable!(buffer, start, end, {
buffer.unsafe_to_break(start, end);
});
setup_rphf_mask(plan, buffer);
setup_topographical_masks(plan, buffer);
}
fn setup_rphf_mask(plan: &ShapePlan, buffer: &mut Buffer) {
let universal_plan = plan.data::<UniversalShapePlan>();
let mask = universal_plan.rphf_mask;
if mask == 0 {
return;
}
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
let limit = if buffer.info[start].use_category() == category::R {
1
} else {
core::cmp::min(3, end - start)
};
for i in start..start+limit {
buffer.info[i].mask |= mask;
}
start = end;
end = buffer.next_syllable(start);
}
}
fn setup_topographical_masks(plan: &ShapePlan, buffer: &mut Buffer) {
use super::universal_machine::SyllableType;
let mut masks = [0; 4];
let mut all_masks = 0;
for i in 0..4 {
masks[i] = plan.ot_map.one_mask(TOPOGRAPHICAL_FEATURES[i]);
if masks[i] == plan.ot_map.global_mask() {
masks[i] = 0;
}
all_masks |= masks[i];
}
if all_masks == 0 {
return;
}
let other_masks = !all_masks;
let mut last_start = 0;
let mut last_form = None;
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
let syllable = buffer.info[start].syllable() & 0x0F;
if syllable == SyllableType::IndependentCluster as u8 ||
syllable == SyllableType::SymbolCluster as u8 ||
syllable == SyllableType::NonCluster as u8
{
last_form = None;
} else {
let join = last_form == Some(JoiningForm::Terminal) || last_form == Some(JoiningForm::Isolated);
if join {
// Fixup previous syllable's form.
let form = if last_form == Some(JoiningForm::Terminal) {
JoiningForm::Medial
} else {
JoiningForm::Initial
};
for i in last_start..start {
buffer.info[i].mask = (buffer.info[i].mask & other_masks) | masks[form as usize];
}
}
// Form for this syllable.
let form = if join { JoiningForm::Terminal } else { JoiningForm::Isolated };
last_form = Some(form);
for i in start..end {
buffer.info[i].mask = (buffer.info[i].mask & other_masks) | masks[form as usize];
}
}
last_start = start;
start = end;
end = buffer.next_syllable(start);
}
}
fn record_rphf(plan: &ShapePlan, _: &Face, buffer: &mut Buffer) {
let universal_plan = plan.data::<UniversalShapePlan>();
let mask = universal_plan.rphf_mask;
if mask == 0 {
return;
}
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
// Mark a substituted repha as USE_R.
for i in start..end {
if buffer.info[i].mask & mask == 0 {
break;
}
if buffer.info[i].is_substituted() {
buffer.info[i].set_use_category(category::R);
break;
}
}
start = end;
end = buffer.next_syllable(start);
}
}
fn reorder(_: &ShapePlan, face: &Face, buffer: &mut Buffer) {
insert_dotted_circles(face, buffer);
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
reorder_syllable(start, end, buffer);
start = end;
end = buffer.next_syllable(start);
}
}
fn insert_dotted_circles(face: &Face, buffer: &mut Buffer) {
use super::universal_machine::SyllableType;
if buffer.flags.contains(BufferFlags::DO_NOT_INSERT_DOTTED_CIRCLE) {
return;
}
// Note: This loop is extra overhead, but should not be measurable.
// TODO Use a buffer scratch flag to remove the loop.
let has_broken_syllables = buffer.info_slice().iter()
.any(|info| info.syllable() & 0x0F == SyllableType::BrokenCluster as u8);
if !has_broken_syllables {
return;
}
let dottedcircle_glyph = match face.glyph_index(0x25CC) {
Some(g) => g.0 as u32,
None => return,
};
let mut dottedcircle = GlyphInfo {
glyph_id: dottedcircle_glyph,
..GlyphInfo::default()
};
dottedcircle.set_use_category(super::universal_table::get_category(0x25CC));
buffer.clear_output();
buffer.idx = 0;
let mut last_syllable = 0;
while buffer.idx < buffer.len {
let syllable = buffer.cur(0).syllable();
let syllable_type = syllable & 0x0F;
if last_syllable != syllable && syllable_type == SyllableType::BrokenCluster as u8 {
last_syllable = syllable;
let mut ginfo = dottedcircle;
ginfo.cluster = buffer.cur(0).cluster;
ginfo.mask = buffer.cur(0).mask;
ginfo.set_syllable(buffer.cur(0).syllable());
// Insert dottedcircle after possible Repha.
while buffer.idx < buffer.len &&
last_syllable == buffer.cur(0).syllable() &&
buffer.cur(0).use_category() == category::R
{
buffer.next_glyph();
}
buffer.output_info(ginfo); | } else { | random_line_split | |
universal.rs | _POST
pub const VMPRE: u8 = 23; // VOWEL_MOD_PRE
pub const SMABV: u8 = 41; // SYM_MOD_ABOVE
pub const SMBLW: u8 = 42; // SYM_MOD_BELOW
pub const FMABV: u8 = 45; // CONS_FINAL_MOD UIPC = Top
pub const FMBLW: u8 = 46; // CONS_FINAL_MOD UIPC = Bottom
pub const FMPST: u8 = 47; // CONS_FINAL_MOD UIPC = Not_Applicable
}
// These features are applied all at once, before reordering.
const BASIC_FEATURES: &[Tag] = &[
feature::RAKAR_FORMS,
feature::ABOVE_BASE_FORMS,
feature::BELOW_BASE_FORMS,
feature::HALF_FORMS,
feature::POST_BASE_FORMS,
feature::VATTU_VARIANTS,
feature::CONJUNCT_FORMS,
];
const TOPOGRAPHICAL_FEATURES: &[Tag] = &[
feature::ISOLATED_FORMS,
feature::INITIAL_FORMS,
feature::MEDIAL_FORMS_1,
feature::TERMINAL_FORMS_1,
];
// Same order as use_topographical_features.
#[derive(Clone, Copy, PartialEq)]
enum JoiningForm {
Isolated = 0,
Initial,
Medial,
Terminal,
}
// These features are applied all at once, after reordering and clearing syllables.
const OTHER_FEATURES: &[Tag] = &[
feature::ABOVE_BASE_SUBSTITUTIONS,
feature::BELOW_BASE_SUBSTITUTIONS,
feature::HALANT_FORMS,
feature::PRE_BASE_SUBSTITUTIONS,
feature::POST_BASE_SUBSTITUTIONS,
];
impl GlyphInfo {
pub(crate) fn use_category(&self) -> Category {
let v: &[u8; 4] = bytemuck::cast_ref(&self.var2);
v[2]
}
fn set_use_category(&mut self, c: Category) {
let v: &mut [u8; 4] = bytemuck::cast_mut(&mut self.var2);
v[2] = c;
}
fn is_halant_use(&self) -> bool {
matches!(self.use_category(), category::H | category::HVM) && !self.is_ligated()
}
}
struct UniversalShapePlan {
rphf_mask: Mask,
arabic_plan: Option<ArabicShapePlan>,
}
impl UniversalShapePlan {
fn new(plan: &ShapePlan) -> UniversalShapePlan {
let mut arabic_plan = None;
if plan.script.map_or(false, has_arabic_joining) {
arabic_plan = Some(super::arabic::ArabicShapePlan::new(plan));
}
UniversalShapePlan {
rphf_mask: plan.ot_map.one_mask(feature::REPH_FORMS),
arabic_plan,
}
}
}
fn collect_features(planner: &mut ShapePlanner) {
// Do this before any lookups have been applied.
planner.ot_map.add_gsub_pause(Some(setup_syllables));
// Default glyph pre-processing group
planner.ot_map.enable_feature(feature::LOCALIZED_FORMS, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::GLYPH_COMPOSITION_DECOMPOSITION, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::NUKTA_FORMS, FeatureFlags::empty(), 1);
planner.ot_map.enable_feature(feature::AKHANDS, FeatureFlags::MANUAL_ZWJ, 1);
// Reordering group
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_substitution_flags));
planner.ot_map.add_feature(feature::REPH_FORMS, FeatureFlags::MANUAL_ZWJ, 1);
planner.ot_map.add_gsub_pause(Some(record_rphf));
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_substitution_flags));
planner.ot_map.enable_feature(feature::PRE_BASE_FORMS, FeatureFlags::MANUAL_ZWJ, 1);
planner.ot_map.add_gsub_pause(Some(record_pref));
// Orthographic unit shaping group
for feature in BASIC_FEATURES {
planner.ot_map.enable_feature(*feature, FeatureFlags::MANUAL_ZWJ, 1);
}
planner.ot_map.add_gsub_pause(Some(reorder));
planner.ot_map.add_gsub_pause(Some(crate::ot::clear_syllables));
// Topographical features
for feature in TOPOGRAPHICAL_FEATURES {
planner.ot_map.add_feature(*feature, FeatureFlags::empty(), 1);
}
planner.ot_map.add_gsub_pause(None);
// Standard typographic presentation
for feature in OTHER_FEATURES {
planner.ot_map.enable_feature(*feature, FeatureFlags::empty(), 1);
}
}
fn setup_syllables(plan: &ShapePlan, _: &Face, buffer: &mut Buffer) {
super::universal_machine::find_syllables(buffer);
foreach_syllable!(buffer, start, end, {
buffer.unsafe_to_break(start, end);
});
setup_rphf_mask(plan, buffer);
setup_topographical_masks(plan, buffer);
}
fn setup_rphf_mask(plan: &ShapePlan, buffer: &mut Buffer) {
let universal_plan = plan.data::<UniversalShapePlan>();
let mask = universal_plan.rphf_mask;
if mask == 0 {
return;
}
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
let limit = if buffer.info[start].use_category() == category::R {
1
} else {
core::cmp::min(3, end - start)
};
for i in start..start+limit {
buffer.info[i].mask |= mask;
}
start = end;
end = buffer.next_syllable(start);
}
}
fn setup_topographical_masks(plan: &ShapePlan, buffer: &mut Buffer) {
use super::universal_machine::SyllableType;
let mut masks = [0; 4];
let mut all_masks = 0;
for i in 0..4 {
masks[i] = plan.ot_map.one_mask(TOPOGRAPHICAL_FEATURES[i]);
if masks[i] == plan.ot_map.global_mask() {
masks[i] = 0;
}
all_masks |= masks[i];
}
if all_masks == 0 {
return;
}
let other_masks = !all_masks;
let mut last_start = 0;
let mut last_form = None;
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
let syllable = buffer.info[start].syllable() & 0x0F;
if syllable == SyllableType::IndependentCluster as u8 ||
syllable == SyllableType::SymbolCluster as u8 ||
syllable == SyllableType::NonCluster as u8
{
last_form = None;
} else {
let join = last_form == Some(JoiningForm::Terminal) || last_form == Some(JoiningForm::Isolated);
if join {
// Fixup previous syllable's form.
let form = if last_form == Some(JoiningForm::Terminal) {
JoiningForm::Medial
} else {
JoiningForm::Initial
};
for i in last_start..start {
buffer.info[i].mask = (buffer.info[i].mask & other_masks) | masks[form as usize];
}
}
// Form for this syllable.
let form = if join { JoiningForm::Terminal } else { JoiningForm::Isolated };
last_form = Some(form);
for i in start..end {
buffer.info[i].mask = (buffer.info[i].mask & other_masks) | masks[form as usize];
}
}
last_start = start;
start = end;
end = buffer.next_syllable(start);
}
}
fn record_rphf(plan: &ShapePlan, _: &Face, buffer: &mut Buffer) {
let universal_plan = plan.data::<UniversalShapePlan>();
let mask = universal_plan.rphf_mask;
if mask == 0 {
return;
}
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
// Mark a substituted repha as USE_R.
for i in start..end {
if buffer.info[i].mask & mask == 0 {
break;
}
if buffer.info[i].is_substituted() {
buffer.info[i].set_use_category(category::R);
break;
}
}
start = end;
end = buffer.next_syllable(start);
}
}
fn reorder(_: &ShapePlan, face: &Face, buffer: &mut Buffer) {
insert_dotted_circles(face, buffer);
let mut start = 0;
let mut end = buffer.next_syllable(0);
while start < buffer.len {
reorder_syllable(start, end, buffer);
start = end;
end = buffer.next_syllable(start);
}
}
fn | insert_dotted_circles | identifier_name | |
api_op_CreateScan.go | ns, c.addOperationCreateScanMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateScanOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateScanInput struct {
// The identifier for an input resource used to create a scan.
//
// This member is required.
ResourceId types.ResourceId
// The unique name that CodeGuru Security uses to track revisions across multiple
// scans of the same resource. Only allowed for a STANDARD scan type. If not
// specified, it will be auto generated.
//
// This member is required.
ScanName *string
// The type of analysis you want CodeGuru Security to perform in the scan, either
// Security or All . The Security type only generates findings related to
// security. The All type generates both security findings and quality findings.
// Defaults to Security type if missing.
AnalysisType types.AnalysisType
// The idempotency token for the request. Amazon CodeGuru Security uses this value
// to prevent the accidental creation of duplicate scans if there are failures and
// retries.
ClientToken *string
// The type of scan, either Standard or Express . Defaults to Standard type if
// missing. Express scans run on limited resources and use a limited set of
// detectors to analyze your code in near-real time. Standard scans have standard
// resource limits and use the full set of detectors to analyze your code.
ScanType types.ScanType
// An array of key-value pairs used to tag a scan. A tag is a custom attribute
// label with two parts:
// - A tag key. For example, CostCenter , Environment , or Secret . Tag keys are
// case sensitive.
// - An optional tag value field. For example, 111122223333 , Production , or a
// team name. Omitting the tag value is the same as using an empty string. Tag
// values are case sensitive.
Tags map[string]string
noSmithyDocumentSerde
}
type CreateScanOutput struct {
// The identifier for the resource object that contains resources that were
// scanned.
//
// This member is required.
ResourceId types.ResourceId
// UUID that identifies the individual scan run.
//
// This member is required.
RunId *string
// The name of the scan.
//
// This member is required.
ScanName *string
// The current state of the scan. Returns either InProgress , Successful , or
// Failed .
//
// This member is required.
ScanState types.ScanState
// The ARN for the scan name.
ScanNameArn *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateScanMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateScan{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateScan{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateScanResolveEndpointMiddleware(stack, options); err != nil |
if err = addIdempotencyToken_opCreateScanMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateScanValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateScan(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
type idempotencyToken_initializeOpCreateScan struct {
tokenProvider IdempotencyTokenProvider
}
func (*idempotencyToken_initializeOpCreateScan) ID() string {
return "OperationIdempotencyTokenAutoFill"
}
func (m *idempotencyToken_initializeOpCreateScan) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
if m.tokenProvider == nil {
return next.HandleInitialize(ctx, in)
}
input, ok := in.Parameters.(*CreateScanInput)
if !ok {
return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateScanInput ")
}
if input.ClientToken == nil {
t, err := m.tokenProvider.GetIdempotencyToken()
if err != nil {
return out, metadata, err
}
input.ClientToken = &t
}
return next.HandleInitialize(ctx, in)
}
func addIdempotencyToken_opCreateScanMiddleware(stack *middleware.Stack, cfg Options) error {
return stack.Initialize.Add(&idempotencyToken_initializeOpCreateScan{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before)
}
func newServiceMetadataMiddleware_opCreateScan(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "codeguru-security",
OperationName: "CreateScan",
}
}
type opCreateScanResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCreateScanResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCreateScanResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "codeguru-security"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, | {
return err
} | conditional_block |
api_op_CreateScan.go | ns, c.addOperationCreateScanMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateScanOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateScanInput struct {
// The identifier for an input resource used to create a scan.
//
// This member is required.
ResourceId types.ResourceId
// The unique name that CodeGuru Security uses to track revisions across multiple
// scans of the same resource. Only allowed for a STANDARD scan type. If not
// specified, it will be auto generated.
//
// This member is required.
ScanName *string
// The type of analysis you want CodeGuru Security to perform in the scan, either
// Security or All . The Security type only generates findings related to
// security. The All type generates both security findings and quality findings.
// Defaults to Security type if missing.
AnalysisType types.AnalysisType
// The idempotency token for the request. Amazon CodeGuru Security uses this value
// to prevent the accidental creation of duplicate scans if there are failures and
// retries.
ClientToken *string
// The type of scan, either Standard or Express . Defaults to Standard type if
// missing. Express scans run on limited resources and use a limited set of
// detectors to analyze your code in near-real time. Standard scans have standard
// resource limits and use the full set of detectors to analyze your code.
ScanType types.ScanType
// An array of key-value pairs used to tag a scan. A tag is a custom attribute
// label with two parts:
// - A tag key. For example, CostCenter , Environment , or Secret . Tag keys are
// case sensitive.
// - An optional tag value field. For example, 111122223333 , Production , or a
// team name. Omitting the tag value is the same as using an empty string. Tag
// values are case sensitive.
Tags map[string]string
noSmithyDocumentSerde
}
type CreateScanOutput struct {
// The identifier for the resource object that contains resources that were
// scanned.
//
// This member is required.
ResourceId types.ResourceId
// UUID that identifies the individual scan run.
//
// This member is required.
RunId *string
// The name of the scan.
//
// This member is required.
ScanName *string
// The current state of the scan. Returns either InProgress , Successful , or
// Failed .
//
// This member is required.
ScanState types.ScanState
// The ARN for the scan name.
ScanNameArn *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) | (stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateScan{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateScan{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateScanResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addIdempotencyToken_opCreateScanMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateScanValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateScan(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
type idempotencyToken_initializeOpCreateScan struct {
tokenProvider IdempotencyTokenProvider
}
func (*idempotencyToken_initializeOpCreateScan) ID() string {
return "OperationIdempotencyTokenAutoFill"
}
func (m *idempotencyToken_initializeOpCreateScan) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
if m.tokenProvider == nil {
return next.HandleInitialize(ctx, in)
}
input, ok := in.Parameters.(*CreateScanInput)
if !ok {
return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateScanInput ")
}
if input.ClientToken == nil {
t, err := m.tokenProvider.GetIdempotencyToken()
if err != nil {
return out, metadata, err
}
input.ClientToken = &t
}
return next.HandleInitialize(ctx, in)
}
func addIdempotencyToken_opCreateScanMiddleware(stack *middleware.Stack, cfg Options) error {
return stack.Initialize.Add(&idempotencyToken_initializeOpCreateScan{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before)
}
func newServiceMetadataMiddleware_opCreateScan(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "codeguru-security",
OperationName: "CreateScan",
}
}
type opCreateScanResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCreateScanResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCreateScanResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "codeguru-security"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ | addOperationCreateScanMiddlewares | identifier_name |
api_op_CreateScan.go | Fns, c.addOperationCreateScanMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateScanOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateScanInput struct {
// The identifier for an input resource used to create a scan.
//
// This member is required.
ResourceId types.ResourceId
// The unique name that CodeGuru Security uses to track revisions across multiple
// scans of the same resource. Only allowed for a STANDARD scan type. If not
// specified, it will be auto generated.
//
// This member is required.
ScanName *string |
// The type of analysis you want CodeGuru Security to perform in the scan, either
// Security or All . The Security type only generates findings related to
// security. The All type generates both security findings and quality findings.
// Defaults to Security type if missing.
AnalysisType types.AnalysisType
// The idempotency token for the request. Amazon CodeGuru Security uses this value
// to prevent the accidental creation of duplicate scans if there are failures and
// retries.
ClientToken *string
// The type of scan, either Standard or Express . Defaults to Standard type if
// missing. Express scans run on limited resources and use a limited set of
// detectors to analyze your code in near-real time. Standard scans have standard
// resource limits and use the full set of detectors to analyze your code.
ScanType types.ScanType
// An array of key-value pairs used to tag a scan. A tag is a custom attribute
// label with two parts:
// - A tag key. For example, CostCenter , Environment , or Secret . Tag keys are
// case sensitive.
// - An optional tag value field. For example, 111122223333 , Production , or a
// team name. Omitting the tag value is the same as using an empty string. Tag
// values are case sensitive.
Tags map[string]string
noSmithyDocumentSerde
}
type CreateScanOutput struct {
// The identifier for the resource object that contains resources that were
// scanned.
//
// This member is required.
ResourceId types.ResourceId
// UUID that identifies the individual scan run.
//
// This member is required.
RunId *string
// The name of the scan.
//
// This member is required.
ScanName *string
// The current state of the scan. Returns either InProgress , Successful , or
// Failed .
//
// This member is required.
ScanState types.ScanState
// The ARN for the scan name.
ScanNameArn *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateScanMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateScan{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateScan{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateScanResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addIdempotencyToken_opCreateScanMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateScanValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateScan(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
type idempotencyToken_initializeOpCreateScan struct {
tokenProvider IdempotencyTokenProvider
}
func (*idempotencyToken_initializeOpCreateScan) ID() string {
return "OperationIdempotencyTokenAutoFill"
}
func (m *idempotencyToken_initializeOpCreateScan) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
if m.tokenProvider == nil {
return next.HandleInitialize(ctx, in)
}
input, ok := in.Parameters.(*CreateScanInput)
if !ok {
return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateScanInput ")
}
if input.ClientToken == nil {
t, err := m.tokenProvider.GetIdempotencyToken()
if err != nil {
return out, metadata, err
}
input.ClientToken = &t
}
return next.HandleInitialize(ctx, in)
}
func addIdempotencyToken_opCreateScanMiddleware(stack *middleware.Stack, cfg Options) error {
return stack.Initialize.Add(&idempotencyToken_initializeOpCreateScan{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before)
}
func newServiceMetadataMiddleware_opCreateScan(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "codeguru-security",
OperationName: "CreateScan",
}
}
type opCreateScanResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCreateScanResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCreateScanResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "codeguru-security"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ | random_line_split | |
api_op_CreateScan.go | ScanName *string
// The current state of the scan. Returns either InProgress , Successful , or
// Failed .
//
// This member is required.
ScanState types.ScanState
// The ARN for the scan name.
ScanNameArn *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateScanMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateScan{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateScan{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addCreateScanResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addIdempotencyToken_opCreateScanMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateScanValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateScan(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
type idempotencyToken_initializeOpCreateScan struct {
tokenProvider IdempotencyTokenProvider
}
func (*idempotencyToken_initializeOpCreateScan) ID() string {
return "OperationIdempotencyTokenAutoFill"
}
func (m *idempotencyToken_initializeOpCreateScan) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
if m.tokenProvider == nil {
return next.HandleInitialize(ctx, in)
}
input, ok := in.Parameters.(*CreateScanInput)
if !ok {
return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateScanInput ")
}
if input.ClientToken == nil {
t, err := m.tokenProvider.GetIdempotencyToken()
if err != nil {
return out, metadata, err
}
input.ClientToken = &t
}
return next.HandleInitialize(ctx, in)
}
func addIdempotencyToken_opCreateScanMiddleware(stack *middleware.Stack, cfg Options) error {
return stack.Initialize.Add(&idempotencyToken_initializeOpCreateScan{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before)
}
func newServiceMetadataMiddleware_opCreateScan(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "codeguru-security",
OperationName: "CreateScan",
}
}
type opCreateScanResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opCreateScanResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opCreateScanResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "codeguru-security"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "codeguru-security"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("codeguru-security")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addCreateScanResolveEndpointMiddleware(stack *middleware.Stack, options Options) error | {
return stack.Serialize.Insert(&opCreateScanResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
} | identifier_body | |
stocker.py | l.get('%s/%s' % (exchange, ticker))
except Exception as e:
print('Error Retrieving Data.')
print(e)
return
# Set the index to a column called Date
stock = stock.reset_index(level=0)
# Columns required for prophet
stock['ds'] = stock['Date']
if ('Adj. Close' not in stock.columns):
stock['Adj. Close'] = stock['Close']
stock['Adj. Open'] = stock['Open']
stock['y'] = stock['Adj. Close']
stock['Daily Change'] = stock['Adj. Close'] - stock['Adj. Open']
# Data assigned as class attribute
self.stock = stock.copy()
# Minimum and maximum date in range
self.min_date = min(stock['Date'])
self.max_date = max(stock['Date'])
| # Find max and min prices and dates on which they occurred
self.max_price = np.max(self.stock['y'])
self.min_price = np.min(self.stock['y'])
self.min_price_date = self.stock[self.stock['y'] == self.min_price]['Date']
self.min_price_date = self.min_price_date[self.min_price_date.index[0]]
self.max_price_date = self.stock[self.stock['y'] == self.max_price]['Date']
self.max_price_date = self.max_price_date[self.max_price_date.index[0]]
# The starting price (starting with the opening price)
self.starting_price = float(self.stock.loc[0, 'Adj. Open'])
# The most recent price
self.most_recent_price = float(self.stock.loc[self.stock.index[-1], 'y'])
# Whether or not to round dates
self.round_dates = True
# Number of years of data to train on
self.training_years = 3
# Prophet parameters
# Default prior from library
self.changepoint_prior_scale = 0.05
self.weekly_seasonality = False
self.daily_seasonality = False
self.monthly_seasonality = True
self.yearly_seasonality = True
self.changepoints = None
"""
Make sure start and end dates are in the range and can be
converted to pandas datetimes. Returns dates in the correct format
"""
def handle_dates(self, start_date, end_date):
# Default start and end date are the beginning and end of data
if start_date is None:
start_date = self.min_date
if end_date is None:
end_date = self.max_date
try:
# Convert to pandas datetime for indexing dataframe
start_date = pd.to_datetime(start_date)
end_date = pd.to_datetime(end_date)
except Exception as e:
print('Enter valid pandas date format.')
print(e)
return
valid_start = False
valid_end = False
# User will continue to enter dates until valid dates are met
while (not valid_start) & (not valid_end):
valid_end = True
valid_start = True
if end_date < start_date:
print('End Date must be later than start date.')
start_date = pd.to_datetime(input('Enter a new start date: '))
end_date= pd.to_datetime(input('Enter a new end date: '))
valid_end = False
valid_start = False
else:
if end_date > self.max_date:
print('End Date exceeds data range')
end_date= pd.to_datetime(input('Enter a new end date: '))
valid_end = False
if start_date < self.min_date:
print('Start Date is before date range')
start_date = pd.to_datetime(input('Enter a new start date: '))
valid_start = False
return start_date, end_date
"""
Return the dataframe trimmed to the specified range.
"""
def make_df(self, start_date, end_date, df=None):
# Default is to use the object stock data
if not df:
df = self.stock.copy()
start_date, end_date = self.handle_dates(start_date, end_date)
# keep track of whether the start and end dates are in the data
start_in = True
end_in = True
# If user wants to round dates (default behavior)
if self.round_dates:
# Record if start and end date are in df
if (start_date not in list(df['Date'])):
start_in = False
if (end_date not in list(df['Date'])):
end_in = False
# If both are not in dataframe, round both
if (not end_in) & (not start_in):
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] <= end_date)]
else:
# If both are in dataframe, round neither
if (end_in) & (start_in):
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] <= end_date)]
else:
# If only start is missing, round start
if (not start_in):
trim_df = df[(df['Date'] > start_date) &
(df['Date'] <= end_date)]
# If only end is imssing round end
elif (not end_in):
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] < end_date)]
else:
valid_start = False
valid_end = False
while (not valid_start) & (not valid_end):
start_date, end_date = self.handle_dates(start_date, end_date)
# No round dates, if either data not in, print message and return
if (start_date in list(df['Date'])):
valid_start = True
if (end_date in list(df['Date'])):
valid_end = True
# Check to make sure dates are in the data
if (start_date not in list(df['Date'])):
print('Start Date not in data (either out of range or not a trading day.)')
start_date = pd.to_datetime(input(prompt='Enter a new start date: '))
elif (end_date not in list(df['Date'])):
print('End Date not in data (either out of range or not a trading day.)')
end_date = pd.to_datetime(input(prompt='Enter a new end date: ') )
# Dates are not rounded
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] <= end_date.date)]
return trim_df
# Basic Historical Plots and Basic Statistics
def get_stats(self, start_date=None, end_date=None, stats=['Adj. Close']):
if start_date is None:
start_date = self.min_date
if end_date is None:
end_date = self.max_date
stock_plot = self.make_df(start_date, end_date)
ans = []
for i, stat in enumerate(stats):
#stat_min = min(stock_plot[stat])
#stat_max = max(stock_plot[stat])
#stat_avg = np.mean(stock_plot[stat])
#date_stat_min = stock_plot[stock_plot[stat] == stat_min]['Date']
#date_stat_min = date_stat_min[date_stat_min.index[0]]
#date_stat_max = stock_plot[stock_plot[stat] == stat_max]['Date']
#date_stat_max = date_stat_max[date_stat_max.index[0]]
#print('Maximum {} = {:.2f} on {}.'.format(stat, stat_max, date_stat_max))
#print('Minimum {} = {:.2f} on {}.'.format(stat, stat_min, date_stat_min))
#print('Current {} = {:.2f} on {}.\n'.format(stat, self.stock.loc[self.stock.index[-1], stat], self.max_date))
ans.append(self.stock.loc[self.stock.index[-1], stat])
return ans
# Method to linearly interpolate prices on the weekends
def resample(self, dataframe):
# Change the index and resample at daily level
dataframe = dataframe.set_index('ds')
dataframe = dataframe.resample('D')
# Reset the index and interpolate nan values
dataframe = dataframe.reset_index(level=0)
dataframe = dataframe.interpolate()
return dataframe
# Remove weekends from a dataframe
def remove_weekends(self, dataframe):
# Reset index to use ix
dataframe = dataframe.reset_index(drop=True)
weekends = []
# Find all of the weekends
for i, date in enumerate(dataframe['ds']):
if (date.weekday()) == 5 | (date.weekday() == 6):
weekends.append(i)
# Drop the weekends
dataframe = dataframe.drop(weekends, axis=0)
return dataframe
# Calculate and plot profit from buying and holding shares for specified date range
def buy_and_hold(self, start_date=None, end_date=None, nshares=1):
start_date, end_date = self.handle_dates(start_date, end_date)
# Find starting and ending price of stock
start_price = float(self.stock[self.stock['Date'] == start_date]['Adj. Open'])
end_price = float(self.stock.tail(1)['Adj. Close'])
# Make a profit dataframe and calculate profit column
profits = self.make_df(start_date, end_date)
profits['hold_profit'] = nshares * (profits['Adj. Close'] - start_price)
# Total profit
total_hold_profit = nshares | random_line_split | |
stocker.py | l.get('%s/%s' % (exchange, ticker))
except Exception as e:
print('Error Retrieving Data.')
print(e)
return
# Set the index to a column called Date
stock = stock.reset_index(level=0)
# Columns required for prophet
stock['ds'] = stock['Date']
if ('Adj. Close' not in stock.columns):
stock['Adj. Close'] = stock['Close']
stock['Adj. Open'] = stock['Open']
stock['y'] = stock['Adj. Close']
stock['Daily Change'] = stock['Adj. Close'] - stock['Adj. Open']
# Data assigned as class attribute
self.stock = stock.copy()
# Minimum and maximum date in range
self.min_date = min(stock['Date'])
self.max_date = max(stock['Date'])
# Find max and min prices and dates on which they occurred
self.max_price = np.max(self.stock['y'])
self.min_price = np.min(self.stock['y'])
self.min_price_date = self.stock[self.stock['y'] == self.min_price]['Date']
self.min_price_date = self.min_price_date[self.min_price_date.index[0]]
self.max_price_date = self.stock[self.stock['y'] == self.max_price]['Date']
self.max_price_date = self.max_price_date[self.max_price_date.index[0]]
# The starting price (starting with the opening price)
self.starting_price = float(self.stock.loc[0, 'Adj. Open'])
# The most recent price
self.most_recent_price = float(self.stock.loc[self.stock.index[-1], 'y'])
# Whether or not to round dates
self.round_dates = True
# Number of years of data to train on
self.training_years = 3
# Prophet parameters
# Default prior from library
self.changepoint_prior_scale = 0.05
self.weekly_seasonality = False
self.daily_seasonality = False
self.monthly_seasonality = True
self.yearly_seasonality = True
self.changepoints = None
"""
Make sure start and end dates are in the range and can be
converted to pandas datetimes. Returns dates in the correct format
"""
def handle_dates(self, start_date, end_date):
# Default start and end date are the beginning and end of data
if start_date is None:
start_date = self.min_date
if end_date is None:
end_date = self.max_date
try:
# Convert to pandas datetime for indexing dataframe
start_date = pd.to_datetime(start_date)
end_date = pd.to_datetime(end_date)
except Exception as e:
print('Enter valid pandas date format.')
print(e)
return
valid_start = False
valid_end = False
# User will continue to enter dates until valid dates are met
while (not valid_start) & (not valid_end):
valid_end = True
valid_start = True
if end_date < start_date:
print('End Date must be later than start date.')
start_date = pd.to_datetime(input('Enter a new start date: '))
end_date= pd.to_datetime(input('Enter a new end date: '))
valid_end = False
valid_start = False
else:
if end_date > self.max_date:
print('End Date exceeds data range')
end_date= pd.to_datetime(input('Enter a new end date: '))
valid_end = False
if start_date < self.min_date:
print('Start Date is before date range')
start_date = pd.to_datetime(input('Enter a new start date: '))
valid_start = False
return start_date, end_date
"""
Return the dataframe trimmed to the specified range.
"""
def make_df(self, start_date, end_date, df=None):
# Default is to use the object stock data
if not df:
df = self.stock.copy()
start_date, end_date = self.handle_dates(start_date, end_date)
# keep track of whether the start and end dates are in the data
start_in = True
end_in = True
# If user wants to round dates (default behavior)
if self.round_dates:
# Record if start and end date are in df
if (start_date not in list(df['Date'])):
start_in = False
if (end_date not in list(df['Date'])):
end_in = False
# If both are not in dataframe, round both
if (not end_in) & (not start_in):
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] <= end_date)]
else:
# If both are in dataframe, round neither
|
else:
valid_start = False
valid_end = False
while (not valid_start) & (not valid_end):
start_date, end_date = self.handle_dates(start_date, end_date)
# No round dates, if either data not in, print message and return
if (start_date in list(df['Date'])):
valid_start = True
if (end_date in list(df['Date'])):
valid_end = True
# Check to make sure dates are in the data
if (start_date not in list(df['Date'])):
print('Start Date not in data (either out of range or not a trading day.)')
start_date = pd.to_datetime(input(prompt='Enter a new start date: '))
elif (end_date not in list(df['Date'])):
print('End Date not in data (either out of range or not a trading day.)')
end_date = pd.to_datetime(input(prompt='Enter a new end date: ') )
# Dates are not rounded
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] <= end_date.date)]
return trim_df
# Basic Historical Plots and Basic Statistics
def get_stats(self, start_date=None, end_date=None, stats=['Adj. Close']):
if start_date is None:
start_date = self.min_date
if end_date is None:
end_date = self.max_date
stock_plot = self.make_df(start_date, end_date)
ans = []
for i, stat in enumerate(stats):
#stat_min = min(stock_plot[stat])
#stat_max = max(stock_plot[stat])
#stat_avg = np.mean(stock_plot[stat])
#date_stat_min = stock_plot[stock_plot[stat] == stat_min]['Date']
#date_stat_min = date_stat_min[date_stat_min.index[0]]
#date_stat_max = stock_plot[stock_plot[stat] == stat_max]['Date']
#date_stat_max = date_stat_max[date_stat_max.index[0]]
#print('Maximum {} = {:.2f} on {}.'.format(stat, stat_max, date_stat_max))
#print('Minimum {} = {:.2f} on {}.'.format(stat, stat_min, date_stat_min))
#print('Current {} = {:.2f} on {}.\n'.format(stat, self.stock.loc[self.stock.index[-1], stat], self.max_date))
ans.append(self.stock.loc[self.stock.index[-1], stat])
return ans
# Method to linearly interpolate prices on the weekends
def resample(self, dataframe):
# Change the index and resample at daily level
dataframe = dataframe.set_index('ds')
dataframe = dataframe.resample('D')
# Reset the index and interpolate nan values
dataframe = dataframe.reset_index(level=0)
dataframe = dataframe.interpolate()
return dataframe
# Remove weekends from a dataframe
def remove_weekends(self, dataframe):
# Reset index to use ix
dataframe = dataframe.reset_index(drop=True)
weekends = []
# Find all of the weekends
for i, date in enumerate(dataframe['ds']):
if (date.weekday()) == 5 | (date.weekday() == 6):
weekends.append(i)
# Drop the weekends
dataframe = dataframe.drop(weekends, axis=0)
return dataframe
# Calculate and plot profit from buying and holding shares for specified date range
def buy_and_hold(self, start_date=None, end_date=None, nshares=1):
start_date, end_date = self.handle_dates(start_date, end_date)
# Find starting and ending price of stock
start_price = float(self.stock[self.stock['Date'] == start_date]['Adj. Open'])
end_price = float(self.stock.tail(1)['Adj. Close'])
# Make a profit dataframe and calculate profit column
profits = self.make_df(start_date, end_date)
profits['hold_profit'] = nshares * (profits['Adj. Close'] - start_price)
# Total profit
total_hold_profit = nshares | if (end_in) & (start_in):
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] <= end_date)]
else:
# If only start is missing, round start
if (not start_in):
trim_df = df[(df['Date'] > start_date) &
(df['Date'] <= end_date)]
# If only end is imssing round end
elif (not end_in):
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] < end_date)] | conditional_block |
stocker.py | (self, ticker, exchange='WIKI'):
# Enforce capitalization
ticker = ticker.upper()
# Symbol is used for labeling plots
self.symbol = ticker
# Use Personal Api Key
quandl.ApiConfig.api_key = 'U-m-xTvejNiPHWNa8SzH'
# Retrieval the financial data
try:
stock = quandl.get('%s/%s' % (exchange, ticker))
except Exception as e:
print('Error Retrieving Data.')
print(e)
return
# Set the index to a column called Date
stock = stock.reset_index(level=0)
# Columns required for prophet
stock['ds'] = stock['Date']
if ('Adj. Close' not in stock.columns):
stock['Adj. Close'] = stock['Close']
stock['Adj. Open'] = stock['Open']
stock['y'] = stock['Adj. Close']
stock['Daily Change'] = stock['Adj. Close'] - stock['Adj. Open']
# Data assigned as class attribute
self.stock = stock.copy()
# Minimum and maximum date in range
self.min_date = min(stock['Date'])
self.max_date = max(stock['Date'])
# Find max and min prices and dates on which they occurred
self.max_price = np.max(self.stock['y'])
self.min_price = np.min(self.stock['y'])
self.min_price_date = self.stock[self.stock['y'] == self.min_price]['Date']
self.min_price_date = self.min_price_date[self.min_price_date.index[0]]
self.max_price_date = self.stock[self.stock['y'] == self.max_price]['Date']
self.max_price_date = self.max_price_date[self.max_price_date.index[0]]
# The starting price (starting with the opening price)
self.starting_price = float(self.stock.loc[0, 'Adj. Open'])
# The most recent price
self.most_recent_price = float(self.stock.loc[self.stock.index[-1], 'y'])
# Whether or not to round dates
self.round_dates = True
# Number of years of data to train on
self.training_years = 3
# Prophet parameters
# Default prior from library
self.changepoint_prior_scale = 0.05
self.weekly_seasonality = False
self.daily_seasonality = False
self.monthly_seasonality = True
self.yearly_seasonality = True
self.changepoints = None
"""
Make sure start and end dates are in the range and can be
converted to pandas datetimes. Returns dates in the correct format
"""
def handle_dates(self, start_date, end_date):
# Default start and end date are the beginning and end of data
if start_date is None:
start_date = self.min_date
if end_date is None:
end_date = self.max_date
try:
# Convert to pandas datetime for indexing dataframe
start_date = pd.to_datetime(start_date)
end_date = pd.to_datetime(end_date)
except Exception as e:
print('Enter valid pandas date format.')
print(e)
return
valid_start = False
valid_end = False
# User will continue to enter dates until valid dates are met
while (not valid_start) & (not valid_end):
valid_end = True
valid_start = True
if end_date < start_date:
print('End Date must be later than start date.')
start_date = pd.to_datetime(input('Enter a new start date: '))
end_date= pd.to_datetime(input('Enter a new end date: '))
valid_end = False
valid_start = False
else:
if end_date > self.max_date:
print('End Date exceeds data range')
end_date= pd.to_datetime(input('Enter a new end date: '))
valid_end = False
if start_date < self.min_date:
print('Start Date is before date range')
start_date = pd.to_datetime(input('Enter a new start date: '))
valid_start = False
return start_date, end_date
"""
Return the dataframe trimmed to the specified range.
"""
def make_df(self, start_date, end_date, df=None):
# Default is to use the object stock data
if not df:
df = self.stock.copy()
start_date, end_date = self.handle_dates(start_date, end_date)
# keep track of whether the start and end dates are in the data
start_in = True
end_in = True
# If user wants to round dates (default behavior)
if self.round_dates:
# Record if start and end date are in df
if (start_date not in list(df['Date'])):
start_in = False
if (end_date not in list(df['Date'])):
end_in = False
# If both are not in dataframe, round both
if (not end_in) & (not start_in):
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] <= end_date)]
else:
# If both are in dataframe, round neither
if (end_in) & (start_in):
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] <= end_date)]
else:
# If only start is missing, round start
if (not start_in):
trim_df = df[(df['Date'] > start_date) &
(df['Date'] <= end_date)]
# If only end is imssing round end
elif (not end_in):
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] < end_date)]
else:
valid_start = False
valid_end = False
while (not valid_start) & (not valid_end):
start_date, end_date = self.handle_dates(start_date, end_date)
# No round dates, if either data not in, print message and return
if (start_date in list(df['Date'])):
valid_start = True
if (end_date in list(df['Date'])):
valid_end = True
# Check to make sure dates are in the data
if (start_date not in list(df['Date'])):
print('Start Date not in data (either out of range or not a trading day.)')
start_date = pd.to_datetime(input(prompt='Enter a new start date: '))
elif (end_date not in list(df['Date'])):
print('End Date not in data (either out of range or not a trading day.)')
end_date = pd.to_datetime(input(prompt='Enter a new end date: ') )
# Dates are not rounded
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] <= end_date.date)]
return trim_df
# Basic Historical Plots and Basic Statistics
def get_stats(self, start_date=None, end_date=None, stats=['Adj. Close']):
if start_date is None:
start_date = self.min_date
if end_date is None:
end_date = self.max_date
stock_plot = self.make_df(start_date, end_date)
ans = []
for i, stat in enumerate(stats):
#stat_min = min(stock_plot[stat])
#stat_max = max(stock_plot[stat])
#stat_avg = np.mean(stock_plot[stat])
#date_stat_min = stock_plot[stock_plot[stat] == stat_min]['Date']
#date_stat_min = date_stat_min[date_stat_min.index[0]]
#date_stat_max = stock_plot[stock_plot[stat] == stat_max]['Date']
#date_stat_max = date_stat_max[date_stat_max.index[0]]
#print('Maximum {} = {:.2f} on {}.'.format(stat, stat_max, date_stat_max))
#print('Minimum {} = {:.2f} on {}.'.format(stat, stat_min, date_stat_min))
#print('Current {} = {:.2f} on {}.\n'.format(stat, self.stock.loc[self.stock.index[-1], stat], self.max_date))
ans.append(self.stock.loc[self.stock.index[-1], stat])
return ans
# Method to linearly interpolate prices on the weekends
def resample(self, dataframe):
# Change the index and resample at daily level
dataframe = dataframe.set_index('ds')
dataframe = dataframe.resample('D')
# Reset the index and interpolate nan values
dataframe = dataframe.reset_index(level=0)
dataframe = dataframe.interpolate()
return dataframe
# Remove weekends from a dataframe
def remove_weekends(self, dataframe):
# Reset index to use ix
dataframe = dataframe.reset_index(drop=True)
weekends = []
# Find all of the weekends
for i, date in enumerate(dataframe['ds']):
if (date.weekday()) == 5 | (date.weekday() == 6):
weekends.append(i)
# Drop the weekends
dataframe = dataframe.drop(weekends, axis=0)
return dataframe
# Calculate and plot profit from buying and holding shares for specified date range
def buy_and_hold(self, start_date=None, end_date=None, nshares=1):
start_date, end_date = self.handle_dates(start_date, end_date)
# Find starting and ending price of stock
start_price = | __init__ | identifier_name | |
stocker.py | .get('%s/%s' % (exchange, ticker))
except Exception as e:
print('Error Retrieving Data.')
print(e)
return
# Set the index to a column called Date
stock = stock.reset_index(level=0)
# Columns required for prophet
stock['ds'] = stock['Date']
if ('Adj. Close' not in stock.columns):
stock['Adj. Close'] = stock['Close']
stock['Adj. Open'] = stock['Open']
stock['y'] = stock['Adj. Close']
stock['Daily Change'] = stock['Adj. Close'] - stock['Adj. Open']
# Data assigned as class attribute
self.stock = stock.copy()
# Minimum and maximum date in range
self.min_date = min(stock['Date'])
self.max_date = max(stock['Date'])
# Find max and min prices and dates on which they occurred
self.max_price = np.max(self.stock['y'])
self.min_price = np.min(self.stock['y'])
self.min_price_date = self.stock[self.stock['y'] == self.min_price]['Date']
self.min_price_date = self.min_price_date[self.min_price_date.index[0]]
self.max_price_date = self.stock[self.stock['y'] == self.max_price]['Date']
self.max_price_date = self.max_price_date[self.max_price_date.index[0]]
# The starting price (starting with the opening price)
self.starting_price = float(self.stock.loc[0, 'Adj. Open'])
# The most recent price
self.most_recent_price = float(self.stock.loc[self.stock.index[-1], 'y'])
# Whether or not to round dates
self.round_dates = True
# Number of years of data to train on
self.training_years = 3
# Prophet parameters
# Default prior from library
self.changepoint_prior_scale = 0.05
self.weekly_seasonality = False
self.daily_seasonality = False
self.monthly_seasonality = True
self.yearly_seasonality = True
self.changepoints = None
"""
Make sure start and end dates are in the range and can be
converted to pandas datetimes. Returns dates in the correct format
"""
def handle_dates(self, start_date, end_date):
# Default start and end date are the beginning and end of data
if start_date is None:
start_date = self.min_date
if end_date is None:
end_date = self.max_date
try:
# Convert to pandas datetime for indexing dataframe
start_date = pd.to_datetime(start_date)
end_date = pd.to_datetime(end_date)
except Exception as e:
print('Enter valid pandas date format.')
print(e)
return
valid_start = False
valid_end = False
# User will continue to enter dates until valid dates are met
while (not valid_start) & (not valid_end):
valid_end = True
valid_start = True
if end_date < start_date:
print('End Date must be later than start date.')
start_date = pd.to_datetime(input('Enter a new start date: '))
end_date= pd.to_datetime(input('Enter a new end date: '))
valid_end = False
valid_start = False
else:
if end_date > self.max_date:
print('End Date exceeds data range')
end_date= pd.to_datetime(input('Enter a new end date: '))
valid_end = False
if start_date < self.min_date:
print('Start Date is before date range')
start_date = pd.to_datetime(input('Enter a new start date: '))
valid_start = False
return start_date, end_date
"""
Return the dataframe trimmed to the specified range.
"""
def make_df(self, start_date, end_date, df=None):
# Default is to use the object stock data
if not df:
df = self.stock.copy()
start_date, end_date = self.handle_dates(start_date, end_date)
# keep track of whether the start and end dates are in the data
start_in = True
end_in = True
# If user wants to round dates (default behavior)
if self.round_dates:
# Record if start and end date are in df
if (start_date not in list(df['Date'])):
start_in = False
if (end_date not in list(df['Date'])):
end_in = False
# If both are not in dataframe, round both
if (not end_in) & (not start_in):
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] <= end_date)]
else:
# If both are in dataframe, round neither
if (end_in) & (start_in):
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] <= end_date)]
else:
# If only start is missing, round start
if (not start_in):
trim_df = df[(df['Date'] > start_date) &
(df['Date'] <= end_date)]
# If only end is imssing round end
elif (not end_in):
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] < end_date)]
else:
valid_start = False
valid_end = False
while (not valid_start) & (not valid_end):
start_date, end_date = self.handle_dates(start_date, end_date)
# No round dates, if either data not in, print message and return
if (start_date in list(df['Date'])):
valid_start = True
if (end_date in list(df['Date'])):
valid_end = True
# Check to make sure dates are in the data
if (start_date not in list(df['Date'])):
print('Start Date not in data (either out of range or not a trading day.)')
start_date = pd.to_datetime(input(prompt='Enter a new start date: '))
elif (end_date not in list(df['Date'])):
print('End Date not in data (either out of range or not a trading day.)')
end_date = pd.to_datetime(input(prompt='Enter a new end date: ') )
# Dates are not rounded
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] <= end_date.date)]
return trim_df
# Basic Historical Plots and Basic Statistics
def get_stats(self, start_date=None, end_date=None, stats=['Adj. Close']):
if start_date is None:
start_date = self.min_date
if end_date is None:
end_date = self.max_date
stock_plot = self.make_df(start_date, end_date)
ans = []
for i, stat in enumerate(stats):
#stat_min = min(stock_plot[stat])
#stat_max = max(stock_plot[stat])
#stat_avg = np.mean(stock_plot[stat])
#date_stat_min = stock_plot[stock_plot[stat] == stat_min]['Date']
#date_stat_min = date_stat_min[date_stat_min.index[0]]
#date_stat_max = stock_plot[stock_plot[stat] == stat_max]['Date']
#date_stat_max = date_stat_max[date_stat_max.index[0]]
#print('Maximum {} = {:.2f} on {}.'.format(stat, stat_max, date_stat_max))
#print('Minimum {} = {:.2f} on {}.'.format(stat, stat_min, date_stat_min))
#print('Current {} = {:.2f} on {}.\n'.format(stat, self.stock.loc[self.stock.index[-1], stat], self.max_date))
ans.append(self.stock.loc[self.stock.index[-1], stat])
return ans
# Method to linearly interpolate prices on the weekends
def resample(self, dataframe):
# Change the index and resample at daily level
dataframe = dataframe.set_index('ds')
dataframe = dataframe.resample('D')
# Reset the index and interpolate nan values
dataframe = dataframe.reset_index(level=0)
dataframe = dataframe.interpolate()
return dataframe
# Remove weekends from a dataframe
def remove_weekends(self, dataframe):
# Reset index to use ix
|
# Calculate and plot profit from buying and holding shares for specified date range
def buy_and_hold(self, start_date=None, end_date=None, nshares=1):
start_date, end_date = self.handle_dates(start_date, end_date)
# Find starting and ending price of stock
start_price = float(self.stock[self.stock['Date'] == start_date]['Adj. Open'])
end_price = float(self.stock.tail(1)['Adj. Close'])
# Make a profit dataframe and calculate profit column
profits = self.make_df(start_date, end_date)
profits['hold_profit'] = nshares * (profits['Adj. Close'] - start_price)
# Total profit
total_hold_profit = nshares | dataframe = dataframe.reset_index(drop=True)
weekends = []
# Find all of the weekends
for i, date in enumerate(dataframe['ds']):
if (date.weekday()) == 5 | (date.weekday() == 6):
weekends.append(i)
# Drop the weekends
dataframe = dataframe.drop(weekends, axis=0)
return dataframe | identifier_body |
main.go | .Flags() | log.Lshortfile)
flag.Parse()
var r RiceLa
if err := r.run(); err != nil {
log.Fatalf("%+v", err)
}
}
type ClimateState struct {
InsideTemp float64 `json:"inside_temp"`
OutsideTemp float64 `json:"outside_temp"`
DriverTempSetting float64 `json:"driver_temp_setting"`
PassengerTempSetting float64 `json:"passenger_temp_setting"`
LeftTempDirection float64 `json:"left_temp_direction"`
RightTempDirection float64 `json:"right_temp_direction"`
IsAutoConditioningOn bool `json:"is_auto_conditioning_on"`
IsFrontDefrosterOn interface{} `json:"is_front_defroster_on"`
IsRearDefrosterOn bool `json:"is_rear_defroster_on"`
FanStatus interface{} `json:"fan_status"`
IsClimateOn bool `json:"is_climate_on"`
MinAvailTemp float64 `json:"min_avail_temp"`
MaxAvailTemp float64 `json:"max_avail_temp"`
SeatHeaterLeft int `json:"seat_heater_left"`
SeatHeaterRight int `json:"seat_heater_right"`
SeatHeaterRearLeft int `json:"seat_heater_rear_left"`
SeatHeaterRearRight int `json:"seat_heater_rear_right"`
SeatHeaterRearCenter int `json:"seat_heater_rear_center"` | SeatHeaterRearLeftBack int `json:"seat_heater_rear_left_back"`
SmartPreconditioning bool `json:"smart_preconditioning"`
}
type VehicleData struct {
UserID int64 `json:"user_id"`
VehicleID int64 `json:"vehicle_id"`
VIN string `json:"vin"`
State string `json:"online"`
ChargeState tesla.ChargeState `json:"charge_state"`
VehicleState tesla.VehicleState `json:"vehicle_state"`
ClimateState ClimateState `json:"climate_state"`
DriveState tesla.DriveState `json:"drive_state"`
}
type VehicleDataResponse struct {
Response VehicleData `json:"response"`
}
func (r *RiceLa) getVehicleData(ctx context.Context, v *tesla.Vehicle) (*VehicleData, error) {
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
log.Printf("Polling %s: %v", v.DisplayName, v.ID)
req, err := http.NewRequestWithContext(ctx, "GET", tesla.BaseURL+"/vehicles/"+strconv.FormatInt(v.ID, 10)+"/vehicle_data", nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Bearer "+r.client.Token.AccessToken)
req.Header.Set("Accept", "application/json")
req.Header.Set("Content-Type", "application/json")
res, err := r.client.HTTP.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
return nil, errors.Errorf("%s: %s", res.Status, body)
}
out := map[string]interface{}{}
if err := json.Unmarshal(body, &out); err != nil {
return nil, err
}
spew.Dump(out)
count := r.processCounter("tesla", out["response"])
log.Printf("updated %d counters", count)
var resp VehicleDataResponse
if err := json.Unmarshal(body, &resp); err != nil {
return nil, errors.Wrapf(err, "unmarshalling vehicle_data")
}
return &resp.Response, nil
}
var counterStrs = map[string]float64{
"--": -1,
"NONE": -1,
"PRESENT": 1,
"ENGAGED": 1,
"DISENGAGED": 0,
"LATCHED": 1,
"NOMINAL": 1,
"FAULT": 0,
"ERROR": 0,
"DRIVE": 2,
"PARKED": 1,
"REVERSE": 3,
"NEUTRAL": 4,
"On": 1,
"Off": 0,
"Stopped": 0,
"IDLE": 0,
"ACTIVE": 1,
"on": 1,
"off": 0,
"yes": 1,
"no": 0,
"": -1,
}
func (r *RiceLa) processCounter(key string, v interface{}) int {
switch v := v.(type) {
case map[string]interface{}:
count := 0
for k, v := range v {
key := key + ":" + k
count += r.processCounter(key, v)
}
return count
case float64:
r.setCounter(key, v)
return 1
case int:
r.setCounter(key, float64(v))
return 1
case int64:
r.setCounter(key, float64(v))
return 1
case int32:
r.setCounter(key, float64(v))
return 1
case float32:
r.setCounter(key, float64(v))
return 1
case bool:
if v {
r.setCounter(key, 1)
} else {
r.setCounter(key, 0)
}
return 1
case string:
f, err := strconv.ParseFloat(v, 64)
if err == nil {
r.setCounter(key, f)
return 1
}
f, ok := counterStrs[v]
if ok {
r.setCounter(key, f)
return 1
}
return 0
default:
if v == nil {
r.setCounter(key, 0)
return 1
}
return 0
}
}
func (r *RiceLa) setCounter(key string, v float64) {
r.mu.Lock()
defer r.mu.Unlock()
g, ok := r.mu.gauges[key]
if !ok {
g = promauto.NewGauge(prometheus.GaugeOpts{Name: key})
r.mu.gauges[key] = g
}
g.Set(v)
}
type RiceLa struct {
client *tesla.Client
chargepoint *chargepoint.Client
mu struct {
sync.Mutex
charging bool
gauges map[string]prometheus.Gauge
}
}
func pollTime(data VehicleData) time.Duration {
if !data.VehicleState.Locked && (data.DriveState.ShiftState == nil || data.DriveState.ShiftState == "P" || data.DriveState.ShiftState == "R") && !data.ChargeState.ChargePortDoorOpen {
return *activePollTime
}
if data.DriveState.ShiftState == "D" || data.DriveState.ShiftState == "R" || data.DriveState.ShiftState == "N" || data.ClimateState.IsClimateOn {
return *drivePollTime
}
return *standbyPollTime
}
func (r *RiceLa) startNearbyCharging(ctx context.Context, data tesla.DriveState) error {
log.Println("starting charging")
latlng := s2.LatLngFromDegrees(data.Latitude, data.Longitude)
for _, charger := range knownChargers {
if charger.DistanceInMeters(latlng) < 20 {
return charger.Start(ctx, r)
}
}
return nil
}
func (r *RiceLa) stopCharging(ctx context.Context) error {
log.Println("stop charging")
userStatus, err := r.chargepoint.UserStatus(ctx)
log.Printf("Charge Point user status %+v", userStatus)
if err != nil {
return err
}
for _, station := range userStatus.Charging.Stations {
if err := r.chargepoint.StopSession(ctx, userStatus.Charging.SessionID, station.DeviceID); err != nil {
return err
}
}
return nil
}
func (r *RiceLa) setCharging(charging bool) {
r.mu.Lock()
defer r.mu.Unlock()
r.mu.charging = charging
}
func (r *RiceLa) charging() bool {
r.mu.Lock()
defer r.mu.Unlock()
return r.mu.charging
}
func (r *RiceLa) monitorVehicle(ctx context.Context, v *tesla.Vehicle) error {
var data, prevData *VehicleData
for {
b := backoff.NewExponentialBackOff()
b.MaxElapsedTime = 1 * time.Minute
if err := backoff.Retry(func() error {
var err error
data, err = r.getVehicleData(ctx, v)
if err != nil {
log.Printf("got error polling (likely retrying) %+v", err)
}
return err
}, b); err != nil {
return err
}
pilotCurrent, _ := | SeatHeaterRearRightBack int `json:"seat_heater_rear_right_back"` | random_line_split |
main.go | .Flags() | log.Lshortfile)
flag.Parse()
var r RiceLa
if err := r.run(); err != nil {
log.Fatalf("%+v", err)
}
}
type ClimateState struct {
InsideTemp float64 `json:"inside_temp"`
OutsideTemp float64 `json:"outside_temp"`
DriverTempSetting float64 `json:"driver_temp_setting"`
PassengerTempSetting float64 `json:"passenger_temp_setting"`
LeftTempDirection float64 `json:"left_temp_direction"`
RightTempDirection float64 `json:"right_temp_direction"`
IsAutoConditioningOn bool `json:"is_auto_conditioning_on"`
IsFrontDefrosterOn interface{} `json:"is_front_defroster_on"`
IsRearDefrosterOn bool `json:"is_rear_defroster_on"`
FanStatus interface{} `json:"fan_status"`
IsClimateOn bool `json:"is_climate_on"`
MinAvailTemp float64 `json:"min_avail_temp"`
MaxAvailTemp float64 `json:"max_avail_temp"`
SeatHeaterLeft int `json:"seat_heater_left"`
SeatHeaterRight int `json:"seat_heater_right"`
SeatHeaterRearLeft int `json:"seat_heater_rear_left"`
SeatHeaterRearRight int `json:"seat_heater_rear_right"`
SeatHeaterRearCenter int `json:"seat_heater_rear_center"`
SeatHeaterRearRightBack int `json:"seat_heater_rear_right_back"`
SeatHeaterRearLeftBack int `json:"seat_heater_rear_left_back"`
SmartPreconditioning bool `json:"smart_preconditioning"`
}
type VehicleData struct {
UserID int64 `json:"user_id"`
VehicleID int64 `json:"vehicle_id"`
VIN string `json:"vin"`
State string `json:"online"`
ChargeState tesla.ChargeState `json:"charge_state"`
VehicleState tesla.VehicleState `json:"vehicle_state"`
ClimateState ClimateState `json:"climate_state"`
DriveState tesla.DriveState `json:"drive_state"`
}
type VehicleDataResponse struct {
Response VehicleData `json:"response"`
}
func (r *RiceLa) getVehicleData(ctx context.Context, v *tesla.Vehicle) (*VehicleData, error) {
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
log.Printf("Polling %s: %v", v.DisplayName, v.ID)
req, err := http.NewRequestWithContext(ctx, "GET", tesla.BaseURL+"/vehicles/"+strconv.FormatInt(v.ID, 10)+"/vehicle_data", nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Bearer "+r.client.Token.AccessToken)
req.Header.Set("Accept", "application/json")
req.Header.Set("Content-Type", "application/json")
res, err := r.client.HTTP.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
return nil, errors.Errorf("%s: %s", res.Status, body)
}
out := map[string]interface{}{}
if err := json.Unmarshal(body, &out); err != nil {
return nil, err
}
spew.Dump(out)
count := r.processCounter("tesla", out["response"])
log.Printf("updated %d counters", count)
var resp VehicleDataResponse
if err := json.Unmarshal(body, &resp); err != nil {
return nil, errors.Wrapf(err, "unmarshalling vehicle_data")
}
return &resp.Response, nil
}
var counterStrs = map[string]float64{
"--": -1,
"NONE": -1,
"PRESENT": 1,
"ENGAGED": 1,
"DISENGAGED": 0,
"LATCHED": 1,
"NOMINAL": 1,
"FAULT": 0,
"ERROR": 0,
"DRIVE": 2,
"PARKED": 1,
"REVERSE": 3,
"NEUTRAL": 4,
"On": 1,
"Off": 0,
"Stopped": 0,
"IDLE": 0,
"ACTIVE": 1,
"on": 1,
"off": 0,
"yes": 1,
"no": 0,
"": -1,
}
func (r *RiceLa) processCounter(key string, v interface{}) int {
switch v := v.(type) {
case map[string]interface{}:
count := 0
for k, v := range v {
key := key + ":" + k
count += r.processCounter(key, v)
}
return count
case float64:
r.setCounter(key, v)
return 1
case int:
r.setCounter(key, float64(v))
return 1
case int64:
r.setCounter(key, float64(v))
return 1
case int32:
r.setCounter(key, float64(v))
return 1
case float32:
r.setCounter(key, float64(v))
return 1
case bool:
if v {
r.setCounter(key, 1)
} else {
r.setCounter(key, 0)
}
return 1
case string:
f, err := strconv.ParseFloat(v, 64)
if err == nil {
r.setCounter(key, f)
return 1
}
f, ok := counterStrs[v]
if ok {
r.setCounter(key, f)
return 1
}
return 0
default:
if v == nil {
r.setCounter(key, 0)
return 1
}
return 0
}
}
func (r *RiceLa) setCounter(key string, v float64) {
r.mu.Lock()
defer r.mu.Unlock()
g, ok := r.mu.gauges[key]
if !ok {
g = promauto.NewGauge(prometheus.GaugeOpts{Name: key})
r.mu.gauges[key] = g
}
g.Set(v)
}
type RiceLa struct {
client *tesla.Client
chargepoint *chargepoint.Client
mu struct {
sync.Mutex
charging bool
gauges map[string]prometheus.Gauge
}
}
func pollTime(data VehicleData) time.Duration {
if !data.VehicleState.Locked && (data.DriveState.ShiftState == nil || data.DriveState.ShiftState == "P" || data.DriveState.ShiftState == "R") && !data.ChargeState.ChargePortDoorOpen {
return *activePollTime
}
if data.DriveState.ShiftState == "D" || data.DriveState.ShiftState == "R" || data.DriveState.ShiftState == "N" || data.ClimateState.IsClimateOn {
return *drivePollTime
}
return *standbyPollTime
}
func (r *RiceLa) startNearbyCharging(ctx context.Context, data tesla.DriveState) error {
log.Println("starting charging")
latlng := s2.LatLngFromDegrees(data.Latitude, data.Longitude)
for _, charger := range knownChargers {
if charger.DistanceInMeters(latlng) < 20 {
return charger.Start(ctx, r)
}
}
return nil
}
func (r *RiceLa) stopCharging(ctx context.Context) error {
log.Println("stop charging")
userStatus, err := r.chargepoint.UserStatus(ctx)
log.Printf("Charge Point user status %+v", userStatus)
if err != nil {
return err
}
for _, station := range userStatus.Charging.Stations {
if err := r.chargepoint.StopSession(ctx, userStatus.Charging.SessionID, station.DeviceID); err != nil {
return err
}
}
return nil
}
func (r *RiceLa) setCharging(charging bool) |
func (r *RiceLa) charging() bool {
r.mu.Lock()
defer r.mu.Unlock()
return r.mu.charging
}
func (r *RiceLa) monitorVehicle(ctx context.Context, v *tesla.Vehicle) error {
var data, prevData *VehicleData
for {
b := backoff.NewExponentialBackOff()
b.MaxElapsedTime = 1 * time.Minute
if err := backoff.Retry(func() error {
var err error
data, err = r.getVehicleData(ctx, v)
if err != nil {
log.Printf("got error polling (likely retrying) %+v", err)
}
return err
}, b); err != nil {
return err
}
pilotCurrent, | {
r.mu.Lock()
defer r.mu.Unlock()
r.mu.charging = charging
} | identifier_body |
main.go | .Flags() | log.Lshortfile)
flag.Parse()
var r RiceLa
if err := r.run(); err != nil {
log.Fatalf("%+v", err)
}
}
type ClimateState struct {
InsideTemp float64 `json:"inside_temp"`
OutsideTemp float64 `json:"outside_temp"`
DriverTempSetting float64 `json:"driver_temp_setting"`
PassengerTempSetting float64 `json:"passenger_temp_setting"`
LeftTempDirection float64 `json:"left_temp_direction"`
RightTempDirection float64 `json:"right_temp_direction"`
IsAutoConditioningOn bool `json:"is_auto_conditioning_on"`
IsFrontDefrosterOn interface{} `json:"is_front_defroster_on"`
IsRearDefrosterOn bool `json:"is_rear_defroster_on"`
FanStatus interface{} `json:"fan_status"`
IsClimateOn bool `json:"is_climate_on"`
MinAvailTemp float64 `json:"min_avail_temp"`
MaxAvailTemp float64 `json:"max_avail_temp"`
SeatHeaterLeft int `json:"seat_heater_left"`
SeatHeaterRight int `json:"seat_heater_right"`
SeatHeaterRearLeft int `json:"seat_heater_rear_left"`
SeatHeaterRearRight int `json:"seat_heater_rear_right"`
SeatHeaterRearCenter int `json:"seat_heater_rear_center"`
SeatHeaterRearRightBack int `json:"seat_heater_rear_right_back"`
SeatHeaterRearLeftBack int `json:"seat_heater_rear_left_back"`
SmartPreconditioning bool `json:"smart_preconditioning"`
}
type VehicleData struct {
UserID int64 `json:"user_id"`
VehicleID int64 `json:"vehicle_id"`
VIN string `json:"vin"`
State string `json:"online"`
ChargeState tesla.ChargeState `json:"charge_state"`
VehicleState tesla.VehicleState `json:"vehicle_state"`
ClimateState ClimateState `json:"climate_state"`
DriveState tesla.DriveState `json:"drive_state"`
}
type VehicleDataResponse struct {
Response VehicleData `json:"response"`
}
func (r *RiceLa) getVehicleData(ctx context.Context, v *tesla.Vehicle) (*VehicleData, error) {
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
log.Printf("Polling %s: %v", v.DisplayName, v.ID)
req, err := http.NewRequestWithContext(ctx, "GET", tesla.BaseURL+"/vehicles/"+strconv.FormatInt(v.ID, 10)+"/vehicle_data", nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Bearer "+r.client.Token.AccessToken)
req.Header.Set("Accept", "application/json")
req.Header.Set("Content-Type", "application/json")
res, err := r.client.HTTP.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
return nil, errors.Errorf("%s: %s", res.Status, body)
}
out := map[string]interface{}{}
if err := json.Unmarshal(body, &out); err != nil {
return nil, err
}
spew.Dump(out)
count := r.processCounter("tesla", out["response"])
log.Printf("updated %d counters", count)
var resp VehicleDataResponse
if err := json.Unmarshal(body, &resp); err != nil {
return nil, errors.Wrapf(err, "unmarshalling vehicle_data")
}
return &resp.Response, nil
}
var counterStrs = map[string]float64{
"--": -1,
"NONE": -1,
"PRESENT": 1,
"ENGAGED": 1,
"DISENGAGED": 0,
"LATCHED": 1,
"NOMINAL": 1,
"FAULT": 0,
"ERROR": 0,
"DRIVE": 2,
"PARKED": 1,
"REVERSE": 3,
"NEUTRAL": 4,
"On": 1,
"Off": 0,
"Stopped": 0,
"IDLE": 0,
"ACTIVE": 1,
"on": 1,
"off": 0,
"yes": 1,
"no": 0,
"": -1,
}
func (r *RiceLa) processCounter(key string, v interface{}) int {
switch v := v.(type) {
case map[string]interface{}:
count := 0
for k, v := range v |
return count
case float64:
r.setCounter(key, v)
return 1
case int:
r.setCounter(key, float64(v))
return 1
case int64:
r.setCounter(key, float64(v))
return 1
case int32:
r.setCounter(key, float64(v))
return 1
case float32:
r.setCounter(key, float64(v))
return 1
case bool:
if v {
r.setCounter(key, 1)
} else {
r.setCounter(key, 0)
}
return 1
case string:
f, err := strconv.ParseFloat(v, 64)
if err == nil {
r.setCounter(key, f)
return 1
}
f, ok := counterStrs[v]
if ok {
r.setCounter(key, f)
return 1
}
return 0
default:
if v == nil {
r.setCounter(key, 0)
return 1
}
return 0
}
}
func (r *RiceLa) setCounter(key string, v float64) {
r.mu.Lock()
defer r.mu.Unlock()
g, ok := r.mu.gauges[key]
if !ok {
g = promauto.NewGauge(prometheus.GaugeOpts{Name: key})
r.mu.gauges[key] = g
}
g.Set(v)
}
type RiceLa struct {
client *tesla.Client
chargepoint *chargepoint.Client
mu struct {
sync.Mutex
charging bool
gauges map[string]prometheus.Gauge
}
}
func pollTime(data VehicleData) time.Duration {
if !data.VehicleState.Locked && (data.DriveState.ShiftState == nil || data.DriveState.ShiftState == "P" || data.DriveState.ShiftState == "R") && !data.ChargeState.ChargePortDoorOpen {
return *activePollTime
}
if data.DriveState.ShiftState == "D" || data.DriveState.ShiftState == "R" || data.DriveState.ShiftState == "N" || data.ClimateState.IsClimateOn {
return *drivePollTime
}
return *standbyPollTime
}
func (r *RiceLa) startNearbyCharging(ctx context.Context, data tesla.DriveState) error {
log.Println("starting charging")
latlng := s2.LatLngFromDegrees(data.Latitude, data.Longitude)
for _, charger := range knownChargers {
if charger.DistanceInMeters(latlng) < 20 {
return charger.Start(ctx, r)
}
}
return nil
}
func (r *RiceLa) stopCharging(ctx context.Context) error {
log.Println("stop charging")
userStatus, err := r.chargepoint.UserStatus(ctx)
log.Printf("Charge Point user status %+v", userStatus)
if err != nil {
return err
}
for _, station := range userStatus.Charging.Stations {
if err := r.chargepoint.StopSession(ctx, userStatus.Charging.SessionID, station.DeviceID); err != nil {
return err
}
}
return nil
}
func (r *RiceLa) setCharging(charging bool) {
r.mu.Lock()
defer r.mu.Unlock()
r.mu.charging = charging
}
func (r *RiceLa) charging() bool {
r.mu.Lock()
defer r.mu.Unlock()
return r.mu.charging
}
func (r *RiceLa) monitorVehicle(ctx context.Context, v *tesla.Vehicle) error {
var data, prevData *VehicleData
for {
b := backoff.NewExponentialBackOff()
b.MaxElapsedTime = 1 * time.Minute
if err := backoff.Retry(func() error {
var err error
data, err = r.getVehicleData(ctx, v)
if err != nil {
log.Printf("got error polling (likely retrying) %+v", err)
}
return err
}, b); err != nil {
return err
}
pilotCurrent, | {
key := key + ":" + k
count += r.processCounter(key, v)
} | conditional_block |
main.go | .Flags() | log.Lshortfile)
flag.Parse()
var r RiceLa
if err := r.run(); err != nil {
log.Fatalf("%+v", err)
}
}
type ClimateState struct {
InsideTemp float64 `json:"inside_temp"`
OutsideTemp float64 `json:"outside_temp"`
DriverTempSetting float64 `json:"driver_temp_setting"`
PassengerTempSetting float64 `json:"passenger_temp_setting"`
LeftTempDirection float64 `json:"left_temp_direction"`
RightTempDirection float64 `json:"right_temp_direction"`
IsAutoConditioningOn bool `json:"is_auto_conditioning_on"`
IsFrontDefrosterOn interface{} `json:"is_front_defroster_on"`
IsRearDefrosterOn bool `json:"is_rear_defroster_on"`
FanStatus interface{} `json:"fan_status"`
IsClimateOn bool `json:"is_climate_on"`
MinAvailTemp float64 `json:"min_avail_temp"`
MaxAvailTemp float64 `json:"max_avail_temp"`
SeatHeaterLeft int `json:"seat_heater_left"`
SeatHeaterRight int `json:"seat_heater_right"`
SeatHeaterRearLeft int `json:"seat_heater_rear_left"`
SeatHeaterRearRight int `json:"seat_heater_rear_right"`
SeatHeaterRearCenter int `json:"seat_heater_rear_center"`
SeatHeaterRearRightBack int `json:"seat_heater_rear_right_back"`
SeatHeaterRearLeftBack int `json:"seat_heater_rear_left_back"`
SmartPreconditioning bool `json:"smart_preconditioning"`
}
type VehicleData struct {
UserID int64 `json:"user_id"`
VehicleID int64 `json:"vehicle_id"`
VIN string `json:"vin"`
State string `json:"online"`
ChargeState tesla.ChargeState `json:"charge_state"`
VehicleState tesla.VehicleState `json:"vehicle_state"`
ClimateState ClimateState `json:"climate_state"`
DriveState tesla.DriveState `json:"drive_state"`
}
type VehicleDataResponse struct {
Response VehicleData `json:"response"`
}
func (r *RiceLa) getVehicleData(ctx context.Context, v *tesla.Vehicle) (*VehicleData, error) {
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
log.Printf("Polling %s: %v", v.DisplayName, v.ID)
req, err := http.NewRequestWithContext(ctx, "GET", tesla.BaseURL+"/vehicles/"+strconv.FormatInt(v.ID, 10)+"/vehicle_data", nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Bearer "+r.client.Token.AccessToken)
req.Header.Set("Accept", "application/json")
req.Header.Set("Content-Type", "application/json")
res, err := r.client.HTTP.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
return nil, errors.Errorf("%s: %s", res.Status, body)
}
out := map[string]interface{}{}
if err := json.Unmarshal(body, &out); err != nil {
return nil, err
}
spew.Dump(out)
count := r.processCounter("tesla", out["response"])
log.Printf("updated %d counters", count)
var resp VehicleDataResponse
if err := json.Unmarshal(body, &resp); err != nil {
return nil, errors.Wrapf(err, "unmarshalling vehicle_data")
}
return &resp.Response, nil
}
var counterStrs = map[string]float64{
"--": -1,
"NONE": -1,
"PRESENT": 1,
"ENGAGED": 1,
"DISENGAGED": 0,
"LATCHED": 1,
"NOMINAL": 1,
"FAULT": 0,
"ERROR": 0,
"DRIVE": 2,
"PARKED": 1,
"REVERSE": 3,
"NEUTRAL": 4,
"On": 1,
"Off": 0,
"Stopped": 0,
"IDLE": 0,
"ACTIVE": 1,
"on": 1,
"off": 0,
"yes": 1,
"no": 0,
"": -1,
}
func (r *RiceLa) | (key string, v interface{}) int {
switch v := v.(type) {
case map[string]interface{}:
count := 0
for k, v := range v {
key := key + ":" + k
count += r.processCounter(key, v)
}
return count
case float64:
r.setCounter(key, v)
return 1
case int:
r.setCounter(key, float64(v))
return 1
case int64:
r.setCounter(key, float64(v))
return 1
case int32:
r.setCounter(key, float64(v))
return 1
case float32:
r.setCounter(key, float64(v))
return 1
case bool:
if v {
r.setCounter(key, 1)
} else {
r.setCounter(key, 0)
}
return 1
case string:
f, err := strconv.ParseFloat(v, 64)
if err == nil {
r.setCounter(key, f)
return 1
}
f, ok := counterStrs[v]
if ok {
r.setCounter(key, f)
return 1
}
return 0
default:
if v == nil {
r.setCounter(key, 0)
return 1
}
return 0
}
}
func (r *RiceLa) setCounter(key string, v float64) {
r.mu.Lock()
defer r.mu.Unlock()
g, ok := r.mu.gauges[key]
if !ok {
g = promauto.NewGauge(prometheus.GaugeOpts{Name: key})
r.mu.gauges[key] = g
}
g.Set(v)
}
type RiceLa struct {
client *tesla.Client
chargepoint *chargepoint.Client
mu struct {
sync.Mutex
charging bool
gauges map[string]prometheus.Gauge
}
}
func pollTime(data VehicleData) time.Duration {
if !data.VehicleState.Locked && (data.DriveState.ShiftState == nil || data.DriveState.ShiftState == "P" || data.DriveState.ShiftState == "R") && !data.ChargeState.ChargePortDoorOpen {
return *activePollTime
}
if data.DriveState.ShiftState == "D" || data.DriveState.ShiftState == "R" || data.DriveState.ShiftState == "N" || data.ClimateState.IsClimateOn {
return *drivePollTime
}
return *standbyPollTime
}
func (r *RiceLa) startNearbyCharging(ctx context.Context, data tesla.DriveState) error {
log.Println("starting charging")
latlng := s2.LatLngFromDegrees(data.Latitude, data.Longitude)
for _, charger := range knownChargers {
if charger.DistanceInMeters(latlng) < 20 {
return charger.Start(ctx, r)
}
}
return nil
}
func (r *RiceLa) stopCharging(ctx context.Context) error {
log.Println("stop charging")
userStatus, err := r.chargepoint.UserStatus(ctx)
log.Printf("Charge Point user status %+v", userStatus)
if err != nil {
return err
}
for _, station := range userStatus.Charging.Stations {
if err := r.chargepoint.StopSession(ctx, userStatus.Charging.SessionID, station.DeviceID); err != nil {
return err
}
}
return nil
}
func (r *RiceLa) setCharging(charging bool) {
r.mu.Lock()
defer r.mu.Unlock()
r.mu.charging = charging
}
func (r *RiceLa) charging() bool {
r.mu.Lock()
defer r.mu.Unlock()
return r.mu.charging
}
func (r *RiceLa) monitorVehicle(ctx context.Context, v *tesla.Vehicle) error {
var data, prevData *VehicleData
for {
b := backoff.NewExponentialBackOff()
b.MaxElapsedTime = 1 * time.Minute
if err := backoff.Retry(func() error {
var err error
data, err = r.getVehicleData(ctx, v)
if err != nil {
log.Printf("got error polling (likely retrying) %+v", err)
}
return err
}, b); err != nil {
return err
}
pilotCurrent, _ | processCounter | identifier_name |
norace_test.go | nil {
t.Fatalf("Error on connect: %v", err)
}
defer nc1.Close()
nc2, err := nats.Connect(fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port))
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer nc2.Close()
data := make([]byte, 1024*1024) // 1MB payload
rand.Read(data)
expected := int32(500)
received := int32(0)
done := make(chan bool)
// Create Subscription.
nc1.Subscribe("slow.consumer", func(m *nats.Msg) {
// Just eat it so that we are not measuring
// code time, just delivery.
atomic.AddInt32(&received, 1)
if received >= expected {
done <- true
}
})
// Create Error handler
nc1.SetErrorHandler(func(c *nats.Conn, s *nats.Subscription, err error) {
t.Fatalf("Received an error on the subscription's connection: %v\n", err)
})
nc1.Flush()
for i := 0; i < int(expected); i++ {
nc2.Publish("slow.consumer", data)
}
nc2.Flush()
select {
case <-done:
return
case <-time.After(10 * time.Second):
r := atomic.LoadInt32(&received)
if s.NumSlowConsumers() > 0 {
t.Fatalf("Did not receive all large messages due to slow consumer status: %d of %d", r, expected)
}
t.Fatalf("Failed to receive all large messages: %d of %d\n", r, expected)
}
}
func TestNoRaceRoutedQueueAutoUnsubscribe(t *testing.T) {
optsA, _ := ProcessConfigFile("./configs/seed.conf")
optsA.NoSigs, optsA.NoLog = true, true
srvA := RunServer(optsA)
defer srvA.Shutdown()
srvARouteURL := fmt.Sprintf("nats://%s:%d", optsA.Cluster.Host, srvA.ClusterAddr().Port)
optsB := nextServerOpts(optsA)
optsB.Routes = RoutesFromStr(srvARouteURL)
srvB := RunServer(optsB)
defer srvB.Shutdown()
// Wait for these 2 to connect to each other
checkClusterFormed(t, srvA, srvB)
// Have a client connection to each server
ncA, err := nats.Connect(fmt.Sprintf("nats://%s:%d", optsA.Host, optsA.Port))
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer ncA.Close()
ncB, err := nats.Connect(fmt.Sprintf("nats://%s:%d", optsB.Host, optsB.Port))
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer ncB.Close()
rbar := int32(0)
barCb := func(m *nats.Msg) {
atomic.AddInt32(&rbar, 1)
}
rbaz := int32(0)
bazCb := func(m *nats.Msg) {
atomic.AddInt32(&rbaz, 1)
}
// Create 125 queue subs with auto-unsubscribe to each server for
// group bar and group baz. So 250 total per queue group.
cons := []*nats.Conn{ncA, ncB}
for _, c := range cons {
for i := 0; i < 125; i++ {
qsub, err := c.QueueSubscribe("foo", "bar", barCb)
if err != nil {
t.Fatalf("Error on subscribe: %v", err)
}
if err := qsub.AutoUnsubscribe(1); err != nil {
t.Fatalf("Error on auto-unsubscribe: %v", err)
}
qsub, err = c.QueueSubscribe("foo", "baz", bazCb)
if err != nil {
t.Fatalf("Error on subscribe: %v", err)
}
if err := qsub.AutoUnsubscribe(1); err != nil {
t.Fatalf("Error on auto-unsubscribe: %v", err)
}
}
c.Subscribe("TEST.COMPLETE", func(m *nats.Msg) {})
}
// We coelasce now so for each server we will have all local (250) plus
// two from the remote side for each queue group. We also create one more
// and will wait til each server has 254 subscriptions, that will make sure
// that we have everything setup.
checkFor(t, 10*time.Second, 100*time.Millisecond, func() error {
subsA := srvA.NumSubscriptions()
subsB := srvB.NumSubscriptions()
if subsA != 254 || subsB != 254 {
return fmt.Errorf("Not all subs processed yet: %d and %d", subsA, subsB)
}
return nil
})
expected := int32(250)
// Now send messages from each server
for i := int32(0); i < expected; i++ {
c := cons[i%2]
c.Publish("foo", []byte("Don't Drop Me!"))
}
for _, c := range cons {
c.Flush()
}
checkFor(t, 10*time.Second, 100*time.Millisecond, func() error {
nbar := atomic.LoadInt32(&rbar)
nbaz := atomic.LoadInt32(&rbaz)
if nbar == expected && nbaz == expected {
time.Sleep(500 * time.Millisecond)
return nil
}
return fmt.Errorf("Did not receive all %d queue messages, received %d for 'bar' and %d for 'baz'",
expected, atomic.LoadInt32(&rbar), atomic.LoadInt32(&rbaz))
})
}
func TestNoRaceClosedSlowConsumerWriteDeadline(t *testing.T) {
opts := DefaultOptions()
opts.WriteDeadline = 10 * time.Millisecond // Make very small to trip.
opts.MaxPending = 500 * 1024 * 1024 // Set high so it will not trip here.
s := RunServer(opts)
defer s.Shutdown()
c, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", opts.Host, opts.Port), 3*time.Second)
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer c.Close()
if _, err := c.Write([]byte("CONNECT {}\r\nPING\r\nSUB foo 1\r\n")); err != nil {
t.Fatalf("Error sending protocols to server: %v", err)
}
// Reduce socket buffer to increase reliability of data backing up in the server destined
// for our subscribed client.
c.(*net.TCPConn).SetReadBuffer(128)
url := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port)
sender, err := nats.Connect(url)
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer sender.Close()
payload := make([]byte, 1024*1024)
for i := 0; i < 100; i++ {
if err := sender.Publish("foo", payload); err != nil {
t.Fatalf("Error on publish: %v", err)
}
}
// Flush sender connection to ensure that all data has been sent.
if err := sender.Flush(); err != nil {
t.Fatalf("Error on flush: %v", err)
}
// At this point server should have closed connection c.
checkClosedConns(t, s, 1, 2*time.Second)
conns := s.closedClients()
if lc := len(conns); lc != 1 {
t.Fatalf("len(conns) expected to be %d, got %d\n", 1, lc)
}
checkReason(t, conns[0].Reason, SlowConsumerWriteDeadline)
}
func TestNoRaceClosedSlowConsumerPendingBytes(t *testing.T) {
opts := DefaultOptions()
opts.WriteDeadline = 30 * time.Second // Wait for long time so write deadline does not trigger slow consumer.
opts.MaxPending = 1 * 1024 * 1024 // Set to low value (1MB) to allow SC to trip.
s := RunServer(opts)
defer s.Shutdown()
c, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", opts.Host, opts.Port), 3*time.Second)
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer c.Close()
if _, err := c.Write([]byte("CONNECT {}\r\nPING\r\nSUB foo 1\r\n")); err != nil |
// Reduce socket buffer to increase reliability of data backing up in the server destined
// for our subscribed client.
c.(*net.TCPConn).SetReadBuffer(128)
url := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port)
sender, err := nats.Connect(url)
if err != nil {
t.Fatalf("Error | {
t.Fatalf("Error sending protocols to server: %v", err)
} | conditional_block |
norace_test.go | != nil {
t.Fatalf("Error on connect: %v", err)
}
defer nc1.Close()
nc2, err := nats.Connect(fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port))
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer nc2.Close()
data := make([]byte, 1024*1024) // 1MB payload
rand.Read(data)
expected := int32(500)
received := int32(0)
done := make(chan bool)
// Create Subscription.
nc1.Subscribe("slow.consumer", func(m *nats.Msg) {
// Just eat it so that we are not measuring
// code time, just delivery.
atomic.AddInt32(&received, 1)
if received >= expected {
done <- true
}
})
// Create Error handler
nc1.SetErrorHandler(func(c *nats.Conn, s *nats.Subscription, err error) {
t.Fatalf("Received an error on the subscription's connection: %v\n", err)
})
nc1.Flush()
for i := 0; i < int(expected); i++ {
nc2.Publish("slow.consumer", data)
}
nc2.Flush()
select {
case <-done:
return
case <-time.After(10 * time.Second):
r := atomic.LoadInt32(&received)
if s.NumSlowConsumers() > 0 {
t.Fatalf("Did not receive all large messages due to slow consumer status: %d of %d", r, expected)
}
t.Fatalf("Failed to receive all large messages: %d of %d\n", r, expected)
}
}
func TestNoRaceRoutedQueueAutoUnsubscribe(t *testing.T) {
optsA, _ := ProcessConfigFile("./configs/seed.conf")
optsA.NoSigs, optsA.NoLog = true, true
srvA := RunServer(optsA)
defer srvA.Shutdown()
srvARouteURL := fmt.Sprintf("nats://%s:%d", optsA.Cluster.Host, srvA.ClusterAddr().Port)
optsB := nextServerOpts(optsA)
optsB.Routes = RoutesFromStr(srvARouteURL)
srvB := RunServer(optsB)
defer srvB.Shutdown()
// Wait for these 2 to connect to each other
checkClusterFormed(t, srvA, srvB)
// Have a client connection to each server
ncA, err := nats.Connect(fmt.Sprintf("nats://%s:%d", optsA.Host, optsA.Port))
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer ncA.Close()
ncB, err := nats.Connect(fmt.Sprintf("nats://%s:%d", optsB.Host, optsB.Port))
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer ncB.Close()
rbar := int32(0)
barCb := func(m *nats.Msg) {
atomic.AddInt32(&rbar, 1)
}
rbaz := int32(0)
bazCb := func(m *nats.Msg) {
atomic.AddInt32(&rbaz, 1)
}
// Create 125 queue subs with auto-unsubscribe to each server for
// group bar and group baz. So 250 total per queue group.
cons := []*nats.Conn{ncA, ncB}
for _, c := range cons {
for i := 0; i < 125; i++ {
qsub, err := c.QueueSubscribe("foo", "bar", barCb)
if err != nil {
t.Fatalf("Error on subscribe: %v", err)
}
if err := qsub.AutoUnsubscribe(1); err != nil {
t.Fatalf("Error on auto-unsubscribe: %v", err)
}
qsub, err = c.QueueSubscribe("foo", "baz", bazCb)
if err != nil {
t.Fatalf("Error on subscribe: %v", err)
}
if err := qsub.AutoUnsubscribe(1); err != nil {
t.Fatalf("Error on auto-unsubscribe: %v", err)
}
}
c.Subscribe("TEST.COMPLETE", func(m *nats.Msg) {})
}
// We coelasce now so for each server we will have all local (250) plus
// two from the remote side for each queue group. We also create one more
// and will wait til each server has 254 subscriptions, that will make sure
// that we have everything setup.
checkFor(t, 10*time.Second, 100*time.Millisecond, func() error {
subsA := srvA.NumSubscriptions()
subsB := srvB.NumSubscriptions()
if subsA != 254 || subsB != 254 {
return fmt.Errorf("Not all subs processed yet: %d and %d", subsA, subsB)
}
return nil
})
expected := int32(250)
// Now send messages from each server
for i := int32(0); i < expected; i++ {
c := cons[i%2]
c.Publish("foo", []byte("Don't Drop Me!"))
}
for _, c := range cons {
c.Flush()
}
checkFor(t, 10*time.Second, 100*time.Millisecond, func() error {
nbar := atomic.LoadInt32(&rbar)
nbaz := atomic.LoadInt32(&rbaz)
if nbar == expected && nbaz == expected {
time.Sleep(500 * time.Millisecond)
return nil
}
return fmt.Errorf("Did not receive all %d queue messages, received %d for 'bar' and %d for 'baz'",
expected, atomic.LoadInt32(&rbar), atomic.LoadInt32(&rbaz))
})
}
func | (t *testing.T) {
opts := DefaultOptions()
opts.WriteDeadline = 10 * time.Millisecond // Make very small to trip.
opts.MaxPending = 500 * 1024 * 1024 // Set high so it will not trip here.
s := RunServer(opts)
defer s.Shutdown()
c, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", opts.Host, opts.Port), 3*time.Second)
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer c.Close()
if _, err := c.Write([]byte("CONNECT {}\r\nPING\r\nSUB foo 1\r\n")); err != nil {
t.Fatalf("Error sending protocols to server: %v", err)
}
// Reduce socket buffer to increase reliability of data backing up in the server destined
// for our subscribed client.
c.(*net.TCPConn).SetReadBuffer(128)
url := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port)
sender, err := nats.Connect(url)
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer sender.Close()
payload := make([]byte, 1024*1024)
for i := 0; i < 100; i++ {
if err := sender.Publish("foo", payload); err != nil {
t.Fatalf("Error on publish: %v", err)
}
}
// Flush sender connection to ensure that all data has been sent.
if err := sender.Flush(); err != nil {
t.Fatalf("Error on flush: %v", err)
}
// At this point server should have closed connection c.
checkClosedConns(t, s, 1, 2*time.Second)
conns := s.closedClients()
if lc := len(conns); lc != 1 {
t.Fatalf("len(conns) expected to be %d, got %d\n", 1, lc)
}
checkReason(t, conns[0].Reason, SlowConsumerWriteDeadline)
}
func TestNoRaceClosedSlowConsumerPendingBytes(t *testing.T) {
opts := DefaultOptions()
opts.WriteDeadline = 30 * time.Second // Wait for long time so write deadline does not trigger slow consumer.
opts.MaxPending = 1 * 1024 * 1024 // Set to low value (1MB) to allow SC to trip.
s := RunServer(opts)
defer s.Shutdown()
c, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", opts.Host, opts.Port), 3*time.Second)
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer c.Close()
if _, err := c.Write([]byte("CONNECT {}\r\nPING\r\nSUB foo 1\r\n")); err != nil {
t.Fatalf("Error sending protocols to server: %v", err)
}
// Reduce socket buffer to increase reliability of data backing up in the server destined
// for our subscribed client.
c.(*net.TCPConn).SetReadBuffer(128)
url := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port)
sender, err := nats.Connect(url)
if err != nil {
t.Fatalf("Error | TestNoRaceClosedSlowConsumerWriteDeadline | identifier_name |
norace_test.go | != nil {
t.Fatalf("Error on connect: %v", err)
}
defer nc1.Close()
nc2, err := nats.Connect(fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port))
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer nc2.Close()
data := make([]byte, 1024*1024) // 1MB payload
rand.Read(data)
expected := int32(500)
received := int32(0)
done := make(chan bool)
// Create Subscription.
nc1.Subscribe("slow.consumer", func(m *nats.Msg) {
// Just eat it so that we are not measuring
// code time, just delivery.
atomic.AddInt32(&received, 1)
if received >= expected {
done <- true
}
})
// Create Error handler
nc1.SetErrorHandler(func(c *nats.Conn, s *nats.Subscription, err error) {
t.Fatalf("Received an error on the subscription's connection: %v\n", err)
})
nc1.Flush()
for i := 0; i < int(expected); i++ {
nc2.Publish("slow.consumer", data)
}
nc2.Flush()
select {
case <-done:
return
case <-time.After(10 * time.Second):
r := atomic.LoadInt32(&received)
if s.NumSlowConsumers() > 0 {
t.Fatalf("Did not receive all large messages due to slow consumer status: %d of %d", r, expected)
}
t.Fatalf("Failed to receive all large messages: %d of %d\n", r, expected)
}
}
func TestNoRaceRoutedQueueAutoUnsubscribe(t *testing.T) {
optsA, _ := ProcessConfigFile("./configs/seed.conf")
optsA.NoSigs, optsA.NoLog = true, true
srvA := RunServer(optsA)
defer srvA.Shutdown()
srvARouteURL := fmt.Sprintf("nats://%s:%d", optsA.Cluster.Host, srvA.ClusterAddr().Port)
optsB := nextServerOpts(optsA)
optsB.Routes = RoutesFromStr(srvARouteURL)
srvB := RunServer(optsB)
defer srvB.Shutdown()
// Wait for these 2 to connect to each other
checkClusterFormed(t, srvA, srvB)
// Have a client connection to each server
ncA, err := nats.Connect(fmt.Sprintf("nats://%s:%d", optsA.Host, optsA.Port))
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer ncA.Close()
ncB, err := nats.Connect(fmt.Sprintf("nats://%s:%d", optsB.Host, optsB.Port))
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer ncB.Close()
rbar := int32(0)
barCb := func(m *nats.Msg) {
atomic.AddInt32(&rbar, 1)
}
rbaz := int32(0)
bazCb := func(m *nats.Msg) {
atomic.AddInt32(&rbaz, 1)
}
// Create 125 queue subs with auto-unsubscribe to each server for
// group bar and group baz. So 250 total per queue group.
cons := []*nats.Conn{ncA, ncB}
for _, c := range cons {
for i := 0; i < 125; i++ {
qsub, err := c.QueueSubscribe("foo", "bar", barCb)
if err != nil {
t.Fatalf("Error on subscribe: %v", err)
}
if err := qsub.AutoUnsubscribe(1); err != nil {
t.Fatalf("Error on auto-unsubscribe: %v", err)
}
qsub, err = c.QueueSubscribe("foo", "baz", bazCb)
if err != nil {
t.Fatalf("Error on subscribe: %v", err)
}
if err := qsub.AutoUnsubscribe(1); err != nil {
t.Fatalf("Error on auto-unsubscribe: %v", err)
}
}
c.Subscribe("TEST.COMPLETE", func(m *nats.Msg) {})
}
// We coelasce now so for each server we will have all local (250) plus
// two from the remote side for each queue group. We also create one more
// and will wait til each server has 254 subscriptions, that will make sure
// that we have everything setup.
checkFor(t, 10*time.Second, 100*time.Millisecond, func() error {
subsA := srvA.NumSubscriptions()
subsB := srvB.NumSubscriptions()
if subsA != 254 || subsB != 254 {
return fmt.Errorf("Not all subs processed yet: %d and %d", subsA, subsB)
}
return nil
})
expected := int32(250)
// Now send messages from each server
for i := int32(0); i < expected; i++ {
c := cons[i%2]
c.Publish("foo", []byte("Don't Drop Me!"))
}
for _, c := range cons {
c.Flush()
}
checkFor(t, 10*time.Second, 100*time.Millisecond, func() error {
nbar := atomic.LoadInt32(&rbar)
nbaz := atomic.LoadInt32(&rbaz)
if nbar == expected && nbaz == expected {
time.Sleep(500 * time.Millisecond)
return nil
}
return fmt.Errorf("Did not receive all %d queue messages, received %d for 'bar' and %d for 'baz'",
expected, atomic.LoadInt32(&rbar), atomic.LoadInt32(&rbaz))
})
}
func TestNoRaceClosedSlowConsumerWriteDeadline(t *testing.T) | sender, err := nats.Connect(url)
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer sender.Close()
payload := make([]byte, 1024*1024)
for i := 0; i < 100; i++ {
if err := sender.Publish("foo", payload); err != nil {
t.Fatalf("Error on publish: %v", err)
}
}
// Flush sender connection to ensure that all data has been sent.
if err := sender.Flush(); err != nil {
t.Fatalf("Error on flush: %v", err)
}
// At this point server should have closed connection c.
checkClosedConns(t, s, 1, 2*time.Second)
conns := s.closedClients()
if lc := len(conns); lc != 1 {
t.Fatalf("len(conns) expected to be %d, got %d\n", 1, lc)
}
checkReason(t, conns[0].Reason, SlowConsumerWriteDeadline)
}
func TestNoRaceClosedSlowConsumerPendingBytes(t *testing.T) {
opts := DefaultOptions()
opts.WriteDeadline = 30 * time.Second // Wait for long time so write deadline does not trigger slow consumer.
opts.MaxPending = 1 * 1024 * 1024 // Set to low value (1MB) to allow SC to trip.
s := RunServer(opts)
defer s.Shutdown()
c, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", opts.Host, opts.Port), 3*time.Second)
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer c.Close()
if _, err := c.Write([]byte("CONNECT {}\r\nPING\r\nSUB foo 1\r\n")); err != nil {
t.Fatalf("Error sending protocols to server: %v", err)
}
// Reduce socket buffer to increase reliability of data backing up in the server destined
// for our subscribed client.
c.(*net.TCPConn).SetReadBuffer(128)
url := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port)
sender, err := nats.Connect(url)
if err != nil {
t.Fatalf("Error | {
opts := DefaultOptions()
opts.WriteDeadline = 10 * time.Millisecond // Make very small to trip.
opts.MaxPending = 500 * 1024 * 1024 // Set high so it will not trip here.
s := RunServer(opts)
defer s.Shutdown()
c, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", opts.Host, opts.Port), 3*time.Second)
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer c.Close()
if _, err := c.Write([]byte("CONNECT {}\r\nPING\r\nSUB foo 1\r\n")); err != nil {
t.Fatalf("Error sending protocols to server: %v", err)
}
// Reduce socket buffer to increase reliability of data backing up in the server destined
// for our subscribed client.
c.(*net.TCPConn).SetReadBuffer(128)
url := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port) | identifier_body |
norace_test.go | // http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !race
package server
import (
"fmt"
"math/rand"
"net"
"sync/atomic"
"testing"
"time"
"github.com/nats-io/go-nats"
)
// IMPORTANT: Tests in this file are not executed when running with the -race flag.
// The test name should be prefixed with TestNoRace so we can run only
// those tests: go test -run=TestNoRace ...
func TestNoRaceAvoidSlowConsumerBigMessages(t *testing.T) {
opts := DefaultOptions() // Use defaults to make sure they avoid pending slow consumer.
s := RunServer(opts)
defer s.Shutdown()
nc1, err := nats.Connect(fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port))
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer nc1.Close()
nc2, err := nats.Connect(fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port))
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer nc2.Close()
data := make([]byte, 1024*1024) // 1MB payload
rand.Read(data)
expected := int32(500)
received := int32(0)
done := make(chan bool)
// Create Subscription.
nc1.Subscribe("slow.consumer", func(m *nats.Msg) {
// Just eat it so that we are not measuring
// code time, just delivery.
atomic.AddInt32(&received, 1)
if received >= expected {
done <- true
}
})
// Create Error handler
nc1.SetErrorHandler(func(c *nats.Conn, s *nats.Subscription, err error) {
t.Fatalf("Received an error on the subscription's connection: %v\n", err)
})
nc1.Flush()
for i := 0; i < int(expected); i++ {
nc2.Publish("slow.consumer", data)
}
nc2.Flush()
select {
case <-done:
return
case <-time.After(10 * time.Second):
r := atomic.LoadInt32(&received)
if s.NumSlowConsumers() > 0 {
t.Fatalf("Did not receive all large messages due to slow consumer status: %d of %d", r, expected)
}
t.Fatalf("Failed to receive all large messages: %d of %d\n", r, expected)
}
}
func TestNoRaceRoutedQueueAutoUnsubscribe(t *testing.T) {
optsA, _ := ProcessConfigFile("./configs/seed.conf")
optsA.NoSigs, optsA.NoLog = true, true
srvA := RunServer(optsA)
defer srvA.Shutdown()
srvARouteURL := fmt.Sprintf("nats://%s:%d", optsA.Cluster.Host, srvA.ClusterAddr().Port)
optsB := nextServerOpts(optsA)
optsB.Routes = RoutesFromStr(srvARouteURL)
srvB := RunServer(optsB)
defer srvB.Shutdown()
// Wait for these 2 to connect to each other
checkClusterFormed(t, srvA, srvB)
// Have a client connection to each server
ncA, err := nats.Connect(fmt.Sprintf("nats://%s:%d", optsA.Host, optsA.Port))
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer ncA.Close()
ncB, err := nats.Connect(fmt.Sprintf("nats://%s:%d", optsB.Host, optsB.Port))
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer ncB.Close()
rbar := int32(0)
barCb := func(m *nats.Msg) {
atomic.AddInt32(&rbar, 1)
}
rbaz := int32(0)
bazCb := func(m *nats.Msg) {
atomic.AddInt32(&rbaz, 1)
}
// Create 125 queue subs with auto-unsubscribe to each server for
// group bar and group baz. So 250 total per queue group.
cons := []*nats.Conn{ncA, ncB}
for _, c := range cons {
for i := 0; i < 125; i++ {
qsub, err := c.QueueSubscribe("foo", "bar", barCb)
if err != nil {
t.Fatalf("Error on subscribe: %v", err)
}
if err := qsub.AutoUnsubscribe(1); err != nil {
t.Fatalf("Error on auto-unsubscribe: %v", err)
}
qsub, err = c.QueueSubscribe("foo", "baz", bazCb)
if err != nil {
t.Fatalf("Error on subscribe: %v", err)
}
if err := qsub.AutoUnsubscribe(1); err != nil {
t.Fatalf("Error on auto-unsubscribe: %v", err)
}
}
c.Subscribe("TEST.COMPLETE", func(m *nats.Msg) {})
}
// We coelasce now so for each server we will have all local (250) plus
// two from the remote side for each queue group. We also create one more
// and will wait til each server has 254 subscriptions, that will make sure
// that we have everything setup.
checkFor(t, 10*time.Second, 100*time.Millisecond, func() error {
subsA := srvA.NumSubscriptions()
subsB := srvB.NumSubscriptions()
if subsA != 254 || subsB != 254 {
return fmt.Errorf("Not all subs processed yet: %d and %d", subsA, subsB)
}
return nil
})
expected := int32(250)
// Now send messages from each server
for i := int32(0); i < expected; i++ {
c := cons[i%2]
c.Publish("foo", []byte("Don't Drop Me!"))
}
for _, c := range cons {
c.Flush()
}
checkFor(t, 10*time.Second, 100*time.Millisecond, func() error {
nbar := atomic.LoadInt32(&rbar)
nbaz := atomic.LoadInt32(&rbaz)
if nbar == expected && nbaz == expected {
time.Sleep(500 * time.Millisecond)
return nil
}
return fmt.Errorf("Did not receive all %d queue messages, received %d for 'bar' and %d for 'baz'",
expected, atomic.LoadInt32(&rbar), atomic.LoadInt32(&rbaz))
})
}
func TestNoRaceClosedSlowConsumerWriteDeadline(t *testing.T) {
opts := DefaultOptions()
opts.WriteDeadline = 10 * time.Millisecond // Make very small to trip.
opts.MaxPending = 500 * 1024 * 1024 // Set high so it will not trip here.
s := RunServer(opts)
defer s.Shutdown()
c, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", opts.Host, opts.Port), 3*time.Second)
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer c.Close()
if _, err := c.Write([]byte("CONNECT {}\r\nPING\r\nSUB foo 1\r\n")); err != nil {
t.Fatalf("Error sending protocols to server: %v", err)
}
// Reduce socket buffer to increase reliability of data backing up in the server destined
// for our subscribed client.
c.(*net.TCPConn).SetReadBuffer(128)
url := fmt.Sprintf("nats://%s:%d", opts.Host, opts.Port)
sender, err := nats.Connect(url)
if err != nil {
t.Fatalf("Error on connect: %v", err)
}
defer sender.Close()
payload := make([]byte, 1024*1024)
for i := 0; i < 100; i++ {
if err := sender.Publish("foo", payload); err != nil {
t.Fatalf("Error on publish: %v", err)
}
}
// Flush sender connection to ensure that all data has been sent.
if err := sender.Flush(); err != nil {
t.Fatalf("Error on flush: %v", err)
}
// At this point server should have closed connection c.
checkClosedConns(t, s, 1, 2*time.Second)
conns := s.closedClients()
if lc := len(conns); lc != 1 {
t.Fatalf("len(conns) expected to be %d, got %d\n", 1, lc)
| // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// | random_line_split | |
bootstrap.go | .TrustedProxy {
if ip := net.ParseIP(trustedProxy); ip != nil {
bs.config.Config.TrustedProxyIPs = append(bs.config.Config.TrustedProxyIPs, &ip)
continue
}
if _, ipNet, errParseCIDR := net.ParseCIDR(trustedProxy); errParseCIDR == nil {
bs.config.Config.TrustedProxyNets = append(bs.config.Config.TrustedProxyNets, ipNet)
continue
}
}
if len(bs.config.Config.TrustedProxyIPs) > 0 {
logger.Infoln("trusted proxy IPs", bs.config.Config.TrustedProxyIPs)
}
if len(bs.config.Config.TrustedProxyNets) > 0 {
logger.Infoln("trusted proxy networks", bs.config.Config.TrustedProxyNets)
}
if len(settings.AllowScope) > 0 {
bs.config.Config.AllowedScopes = settings.AllowScope
logger.Infoln("using custom allowed OAuth 2 scopes", bs.config.Config.AllowedScopes)
}
bs.config.Config.AllowClientGuests = settings.AllowClientGuests
if bs.config.Config.AllowClientGuests {
logger.Infoln("client controlled guests are enabled")
}
bs.config.Config.AllowDynamicClientRegistration = settings.AllowDynamicClientRegistration
if bs.config.Config.AllowDynamicClientRegistration {
logger.Infoln("dynamic client registration is enabled")
}
encryptionSecretFn := settings.EncryptionSecretFile
if encryptionSecretFn != "" {
logger.WithField("file", encryptionSecretFn).Infoln("loading encryption secret from file")
bs.config.EncryptionSecret, err = ioutil.ReadFile(encryptionSecretFn)
if err != nil {
return fmt.Errorf("failed to load encryption secret from file: %v", err)
}
if len(bs.config.EncryptionSecret) != encryption.KeySize {
return fmt.Errorf("invalid encryption secret size - must be %d bytes", encryption.KeySize)
}
} else {
logger.Warnf("missing --encryption-secret parameter, using random encyption secret with %d bytes", encryption.KeySize)
bs.config.EncryptionSecret = rndm.GenerateRandomBytes(encryption.KeySize)
}
bs.config.Config.ListenAddr = settings.Listen
bs.config.IdentifierClientDisabled = settings.IdentifierClientDisabled
bs.config.IdentifierClientPath = settings.IdentifierClientPath
bs.config.IdentifierRegistrationConf = settings.IdentifierRegistrationConf
if bs.config.IdentifierRegistrationConf != "" {
bs.config.IdentifierRegistrationConf, _ = filepath.Abs(bs.config.IdentifierRegistrationConf)
if _, errStat := os.Stat(bs.config.IdentifierRegistrationConf); errStat != nil {
return fmt.Errorf("identifier-registration-conf file not found or unable to access: %v", errStat)
}
bs.config.IdentifierAuthoritiesConf = bs.config.IdentifierRegistrationConf
}
bs.config.IdentifierScopesConf = settings.IdentifierScopesConf
if bs.config.IdentifierScopesConf != "" {
bs.config.IdentifierScopesConf, _ = filepath.Abs(bs.config.IdentifierScopesConf)
if _, errStat := os.Stat(bs.config.IdentifierScopesConf); errStat != nil {
return fmt.Errorf("identifier-scopes-conf file not found or unable to access: %v", errStat)
}
}
if settings.IdentifierDefaultBannerLogo != "" {
// Load from file.
b, errRead := ioutil.ReadFile(settings.IdentifierDefaultBannerLogo)
if errRead != nil {
return fmt.Errorf("identifier-default-banner-logo failed to open: %w", errRead)
}
bs.config.IdentifierDefaultBannerLogo = b
}
if settings.IdentifierDefaultSignInPageText != "" {
bs.config.IdentifierDefaultSignInPageText = &settings.IdentifierDefaultSignInPageText
}
if settings.IdentifierDefaultUsernameHintText != "" {
bs.config.IdentifierDefaultUsernameHintText = &settings.IdentifierDefaultUsernameHintText
}
bs.config.IdentifierUILocales = settings.IdentifierUILocales
bs.config.SigningKeyID = settings.SigningKid
bs.config.Signers = make(map[string]crypto.Signer)
bs.config.Validators = make(map[string]crypto.PublicKey)
bs.config.Certificates = make(map[string][]*x509.Certificate)
signingMethodString := settings.SigningMethod
bs.config.SigningMethod = jwt.GetSigningMethod(signingMethodString)
if bs.config.SigningMethod == nil {
return fmt.Errorf("unknown signing method: %s", signingMethodString)
}
signingKeyFns := settings.SigningPrivateKeyFiles
if len(signingKeyFns) > 0 {
first := true
for _, signingKeyFn := range signingKeyFns {
logger.WithField("path", signingKeyFn).Infoln("loading signing key")
err = addSignerWithIDFromFile(signingKeyFn, "", bs)
if err != nil {
return err
}
if first {
// Also add key under the provided id.
first = false
err = addSignerWithIDFromFile(signingKeyFn, bs.config.SigningKeyID, bs)
if err != nil {
return err
}
}
}
} else {
//NOTE(longsleep): remove me - create keypair a random key pair.
sm := jwt.SigningMethodPS256
bs.config.SigningMethod = sm
logger.WithField("alg", sm.Name).Warnf("missing --signing-private-key parameter, using random %d bit signing key", DefaultSigningKeyBits)
signer, _ := rsa.GenerateKey(rand.Reader, DefaultSigningKeyBits)
bs.config.Signers[bs.config.SigningKeyID] = signer
}
// Ensure we have a signer for the things we need.
err = validateSigners(bs)
if err != nil {
return err
}
validationKeysPath := settings.ValidationKeysPath
if validationKeysPath != "" {
logger.WithField("path", validationKeysPath).Infoln("loading validation keys")
err = addValidatorsFromPath(validationKeysPath, bs)
if err != nil {
return err
}
}
bs.config.Config.HTTPTransport = utils.HTTPTransportWithTLSClientConfig(bs.config.TLSClientConfig)
bs.config.AccessTokenDurationSeconds = settings.AccessTokenDurationSeconds
if bs.config.AccessTokenDurationSeconds == 0 {
bs.config.AccessTokenDurationSeconds = 60 * 10 // 10 Minutes
}
bs.config.IDTokenDurationSeconds = settings.IDTokenDurationSeconds
if bs.config.IDTokenDurationSeconds == 0 {
bs.config.IDTokenDurationSeconds = 60 * 60 // 1 Hour
}
bs.config.RefreshTokenDurationSeconds = settings.RefreshTokenDurationSeconds
if bs.config.RefreshTokenDurationSeconds == 0 {
bs.config.RefreshTokenDurationSeconds = 60 * 60 * 24 * 365 * 3 // 3 Years
}
bs.config.DyamicClientSecretDurationSeconds = settings.DyamicClientSecretDurationSeconds
return nil
}
// setup takes care of setting up the managers based on the associated
// Bootstrap's data.
func (bs *bootstrap) setup(ctx context.Context, settings *Settings) error {
managers, err := newManagers(ctx, bs)
if err != nil {
return err
}
bs.managers = managers
identityManager, err := bs.setupIdentity(ctx, settings)
if err != nil {
return err
}
managers.Set("identity", identityManager)
guestManager, err := bs.setupGuest(ctx, identityManager)
if err != nil {
return err
}
managers.Set("guest", guestManager)
oidcProvider, err := bs.setupOIDCProvider(ctx)
if err != nil {
return err
}
managers.Set("oidc", oidcProvider)
managers.Set("handler", oidcProvider) // Use OIDC provider as default HTTP handler.
err = managers.Apply()
if err != nil {
return fmt.Errorf("failed to apply managers: %v", err)
}
// Final steps
err = oidcProvider.InitializeMetadata()
if err != nil {
return fmt.Errorf("failed to initialize provider metadata: %v", err)
}
return nil
}
func (bs *bootstrap) MakeURIPath(api APIType, subpath string) string {
subpath = strings.TrimPrefix(subpath, "/")
uriPath := ""
switch api {
case APITypeKonnect:
uriPath = fmt.Sprintf("%s/konnect/v1/%s", strings.TrimSuffix(bs.uriBasePath, "/"), subpath)
case APITypeSignin:
uriPath = fmt.Sprintf("%s/signin/v1/%s", strings.TrimSuffix(bs.uriBasePath, "/"), subpath)
default:
panic("unknown api type")
}
if subpath == "" {
uriPath = strings.TrimSuffix(uriPath, "/")
}
return uriPath
}
func (bs *bootstrap) MakeURI(api APIType, subpath string) *url.URL {
uriPath := bs.MakeURIPath(api, subpath)
uri, _ := url.Parse(bs.config.IssuerIdentifierURI.String())
uri.Path = uriPath
return uri
}
func (bs *bootstrap) setupIdentity(ctx context.Context, settings *Settings) (identity.Manager, error) {
logger := bs.config.Config.Logger
if settings.IdentityManager == "" {
return nil, fmt.Errorf("identity-manager argument missing")
}
// Identity manager.
identityManagerName := settings.IdentityManager
identityManager, err := getIdentityManagerByName(identityManagerName, bs)
if err != nil | {
return nil, err
} | conditional_block | |
bootstrap.go | func (bs *bootstrap) Config() *Config {
return bs.config
}
// Managers returns bootstrapped identity-managers.
func (bs *bootstrap) Managers() *managers.Managers {
return bs.managers
}
// Boot is the main entry point to bootstrap the service after validating the
// given configuration. The resulting Bootstrap struct can be used to retrieve
// configured identity-managers and their respective http-handlers and config.
//
// This function should be used by consumers which want to embed this project
// as a library.
func Boot(ctx context.Context, settings *Settings, cfg *config.Config) (Bootstrap, error) {
// NOTE(longsleep): Ensure to use same salt length as the hash size.
// See https://www.ietf.org/mail-archive/web/jose/current/msg02901.html for
// reference and https://github.com/golang-jwt/jwt/v4/issues/285 for
// the issue in upstream jwt-go.
for _, alg := range []string{jwt.SigningMethodPS256.Name, jwt.SigningMethodPS384.Name, jwt.SigningMethodPS512.Name} {
sm := jwt.GetSigningMethod(alg)
if signingMethodRSAPSS, ok := sm.(*jwt.SigningMethodRSAPSS); ok {
signingMethodRSAPSS.Options.SaltLength = rsa.PSSSaltLengthEqualsHash
}
}
bs := &bootstrap{
config: &Config{
Config: cfg,
Settings: settings,
},
}
err := bs.initialize(settings)
if err != nil {
return nil, err
}
err = bs.setup(ctx, settings)
if err != nil {
return nil, err
}
return bs, nil
}
// initialize, parsed parameters from commandline with validation and adds them
// to the associated Bootstrap data.
func (bs *bootstrap) initialize(settings *Settings) error {
logger := bs.config.Config.Logger
var err error
if settings.IdentityManager == "" {
return fmt.Errorf("identity-manager argument missing, use one of kc, ldap, cookie, dummy")
}
bs.config.IssuerIdentifierURI, err = url.Parse(settings.Iss)
if err != nil {
return fmt.Errorf("invalid iss value, iss is not a valid URL), %v", err)
} else if settings.Iss == "" {
return fmt.Errorf("missing iss value, did you provide the --iss parameter?")
} else if bs.config.IssuerIdentifierURI.Scheme != "https" {
return fmt.Errorf("invalid iss value, URL must start with https://")
} else if bs.config.IssuerIdentifierURI.Host == "" {
return fmt.Errorf("invalid iss value, URL must have a host")
}
bs.uriBasePath = settings.URIBasePath
bs.config.SignInFormURI, err = url.Parse(settings.SignInURI)
if err != nil {
return fmt.Errorf("invalid sign-in URI, %v", err)
}
bs.config.SignedOutURI, err = url.Parse(settings.SignedOutURI)
if err != nil {
return fmt.Errorf("invalid signed-out URI, %v", err)
}
bs.config.AuthorizationEndpointURI, err = url.Parse(settings.AuthorizationEndpointURI)
if err != nil {
return fmt.Errorf("invalid authorization-endpoint-uri, %v", err)
}
bs.config.EndSessionEndpointURI, err = url.Parse(settings.EndsessionEndpointURI)
if err != nil {
return fmt.Errorf("invalid endsession-endpoint-uri, %v", err)
}
if settings.Insecure {
// NOTE(longsleep): This disable http2 client support. See https://github.com/golang/go/issues/14275 for reasons.
bs.config.TLSClientConfig = utils.InsecureSkipVerifyTLSConfig()
logger.Warnln("insecure mode, TLS client connections are susceptible to man-in-the-middle attacks")
} else {
bs.config.TLSClientConfig = utils.DefaultTLSConfig()
}
for _, trustedProxy := range settings.TrustedProxy {
if ip := net.ParseIP(trustedProxy); ip != nil {
bs.config.Config.TrustedProxyIPs = append(bs.config.Config.TrustedProxyIPs, &ip)
continue
}
if _, ipNet, errParseCIDR := net.ParseCIDR(trustedProxy); errParseCIDR == nil {
bs.config.Config.TrustedProxyNets = append(bs.config.Config.TrustedProxyNets, ipNet)
continue
}
}
if len(bs.config.Config.TrustedProxyIPs) > 0 {
logger.Infoln("trusted proxy IPs", bs.config.Config.TrustedProxyIPs)
}
if len(bs.config.Config.TrustedProxyNets) > 0 {
logger.Infoln("trusted proxy networks", bs.config.Config.TrustedProxyNets)
}
if len(settings.AllowScope) > 0 {
bs.config.Config.AllowedScopes = settings.AllowScope
logger.Infoln("using custom allowed OAuth 2 scopes", bs.config.Config.AllowedScopes)
}
bs.config.Config.AllowClientGuests = settings.AllowClientGuests
if bs.config.Config.AllowClientGuests {
logger.Infoln("client controlled guests are enabled")
}
bs.config.Config.AllowDynamicClientRegistration = settings.AllowDynamicClientRegistration
if bs.config.Config.AllowDynamicClientRegistration {
logger.Infoln("dynamic client registration is enabled")
}
encryptionSecretFn := settings.EncryptionSecretFile
if encryptionSecretFn != "" {
logger.WithField("file", encryptionSecretFn).Infoln("loading encryption secret from file")
bs.config.EncryptionSecret, err = ioutil.ReadFile(encryptionSecretFn)
if err != nil {
return fmt.Errorf("failed to load encryption secret from file: %v", err)
}
if len(bs.config.EncryptionSecret) != encryption.KeySize {
return fmt.Errorf("invalid encryption secret size - must be %d bytes", encryption.KeySize)
}
} else {
logger.Warnf("missing --encryption-secret parameter, using random encyption secret with %d bytes", encryption.KeySize)
bs.config.EncryptionSecret = rndm.GenerateRandomBytes(encryption.KeySize)
}
bs.config.Config.ListenAddr = settings.Listen
bs.config.IdentifierClientDisabled = settings.IdentifierClientDisabled
bs.config.IdentifierClientPath = settings.IdentifierClientPath
bs.config.IdentifierRegistrationConf = settings.IdentifierRegistrationConf
if bs.config.IdentifierRegistrationConf != "" {
bs.config.IdentifierRegistrationConf, _ = filepath.Abs(bs.config.IdentifierRegistrationConf)
if _, errStat := os.Stat(bs.config.IdentifierRegistrationConf); errStat != nil {
return fmt.Errorf("identifier-registration-conf file not found or unable to access: %v", errStat)
}
bs.config.IdentifierAuthoritiesConf = bs.config.IdentifierRegistrationConf
}
bs.config.IdentifierScopesConf = settings.IdentifierScopesConf
if bs.config.IdentifierScopesConf != "" {
bs.config.IdentifierScopesConf, _ = filepath.Abs(bs.config.IdentifierScopesConf)
if _, errStat := os.Stat(bs.config.IdentifierScopesConf); errStat != nil {
return fmt.Errorf("identifier-scopes-conf file not found or unable to access: %v", errStat)
}
}
if settings.IdentifierDefaultBannerLogo != "" {
// Load from file.
b, errRead := ioutil.ReadFile(settings.IdentifierDefaultBannerLogo)
if errRead != nil {
return fmt.Errorf("identifier-default-banner-logo failed to open: %w", errRead)
}
bs.config.IdentifierDefaultBannerLogo = b
}
if settings.IdentifierDefaultSignInPageText != "" {
bs.config.IdentifierDefaultSignInPageText = &settings.IdentifierDefaultSignInPageText
}
if settings.IdentifierDefaultUsernameHintText != "" {
bs.config.IdentifierDefaultUsernameHintText = &settings.IdentifierDefaultUsernameHintText
}
bs.config.IdentifierUILocales = settings.IdentifierUILocales
bs.config.SigningKeyID = settings.SigningKid
bs.config.Signers = make(map[string]crypto.Signer)
bs.config.Validators = make(map[string]crypto.PublicKey)
bs.config.Certificates = make(map[string][]*x509.Certificate)
signingMethodString := settings.SigningMethod
bs.config.SigningMethod = jwt.GetSigningMethod(signingMethodString)
if bs.config.SigningMethod == nil {
return fmt.Errorf("unknown signing method: %s", signingMethodString)
}
signingKeyFns := settings.SigningPrivateKeyFiles
if len(signingKeyFns) > 0 {
first := true
for _, signingKeyFn := range signingKeyFns {
logger.WithField("path", signingKeyFn).Infoln("loading signing key")
err = addSignerWithIDFromFile(signingKeyFn, "", bs)
if err != nil {
return err
}
if first {
// Also add key under the provided id.
first = false
err = addSignerWithIDFromFile(signingKeyFn, bs.config.SigningKeyID, bs)
if err != nil {
return err
}
}
}
} else {
//NOTE(longsleep): remove me - create keypair a random key pair.
sm := jwt.SigningMethodPS256
bs.config.SigningMethod = sm
logger.WithField("alg", sm.Name).Warnf("missing --signing-private-key parameter, using random %d bit signing key", DefaultSigningKeyBits)
signer, _ := rsa.GenerateKey(rand.Reader, DefaultSigningKeyBits)
bs.config.Signers[bs.config.Sign | // Config returns the bootstap configuration. | random_line_split | |
bootstrap.go | if err != nil {
return nil, err
}
err = bs.setup(ctx, settings)
if err != nil {
return nil, err
}
return bs, nil
}
// initialize, parsed parameters from commandline with validation and adds them
// to the associated Bootstrap data.
func (bs *bootstrap) initialize(settings *Settings) error {
logger := bs.config.Config.Logger
var err error
if settings.IdentityManager == "" {
return fmt.Errorf("identity-manager argument missing, use one of kc, ldap, cookie, dummy")
}
bs.config.IssuerIdentifierURI, err = url.Parse(settings.Iss)
if err != nil {
return fmt.Errorf("invalid iss value, iss is not a valid URL), %v", err)
} else if settings.Iss == "" {
return fmt.Errorf("missing iss value, did you provide the --iss parameter?")
} else if bs.config.IssuerIdentifierURI.Scheme != "https" {
return fmt.Errorf("invalid iss value, URL must start with https://")
} else if bs.config.IssuerIdentifierURI.Host == "" {
return fmt.Errorf("invalid iss value, URL must have a host")
}
bs.uriBasePath = settings.URIBasePath
bs.config.SignInFormURI, err = url.Parse(settings.SignInURI)
if err != nil {
return fmt.Errorf("invalid sign-in URI, %v", err)
}
bs.config.SignedOutURI, err = url.Parse(settings.SignedOutURI)
if err != nil {
return fmt.Errorf("invalid signed-out URI, %v", err)
}
bs.config.AuthorizationEndpointURI, err = url.Parse(settings.AuthorizationEndpointURI)
if err != nil {
return fmt.Errorf("invalid authorization-endpoint-uri, %v", err)
}
bs.config.EndSessionEndpointURI, err = url.Parse(settings.EndsessionEndpointURI)
if err != nil {
return fmt.Errorf("invalid endsession-endpoint-uri, %v", err)
}
if settings.Insecure {
// NOTE(longsleep): This disable http2 client support. See https://github.com/golang/go/issues/14275 for reasons.
bs.config.TLSClientConfig = utils.InsecureSkipVerifyTLSConfig()
logger.Warnln("insecure mode, TLS client connections are susceptible to man-in-the-middle attacks")
} else {
bs.config.TLSClientConfig = utils.DefaultTLSConfig()
}
for _, trustedProxy := range settings.TrustedProxy {
if ip := net.ParseIP(trustedProxy); ip != nil {
bs.config.Config.TrustedProxyIPs = append(bs.config.Config.TrustedProxyIPs, &ip)
continue
}
if _, ipNet, errParseCIDR := net.ParseCIDR(trustedProxy); errParseCIDR == nil {
bs.config.Config.TrustedProxyNets = append(bs.config.Config.TrustedProxyNets, ipNet)
continue
}
}
if len(bs.config.Config.TrustedProxyIPs) > 0 {
logger.Infoln("trusted proxy IPs", bs.config.Config.TrustedProxyIPs)
}
if len(bs.config.Config.TrustedProxyNets) > 0 {
logger.Infoln("trusted proxy networks", bs.config.Config.TrustedProxyNets)
}
if len(settings.AllowScope) > 0 {
bs.config.Config.AllowedScopes = settings.AllowScope
logger.Infoln("using custom allowed OAuth 2 scopes", bs.config.Config.AllowedScopes)
}
bs.config.Config.AllowClientGuests = settings.AllowClientGuests
if bs.config.Config.AllowClientGuests {
logger.Infoln("client controlled guests are enabled")
}
bs.config.Config.AllowDynamicClientRegistration = settings.AllowDynamicClientRegistration
if bs.config.Config.AllowDynamicClientRegistration {
logger.Infoln("dynamic client registration is enabled")
}
encryptionSecretFn := settings.EncryptionSecretFile
if encryptionSecretFn != "" {
logger.WithField("file", encryptionSecretFn).Infoln("loading encryption secret from file")
bs.config.EncryptionSecret, err = ioutil.ReadFile(encryptionSecretFn)
if err != nil {
return fmt.Errorf("failed to load encryption secret from file: %v", err)
}
if len(bs.config.EncryptionSecret) != encryption.KeySize {
return fmt.Errorf("invalid encryption secret size - must be %d bytes", encryption.KeySize)
}
} else {
logger.Warnf("missing --encryption-secret parameter, using random encyption secret with %d bytes", encryption.KeySize)
bs.config.EncryptionSecret = rndm.GenerateRandomBytes(encryption.KeySize)
}
bs.config.Config.ListenAddr = settings.Listen
bs.config.IdentifierClientDisabled = settings.IdentifierClientDisabled
bs.config.IdentifierClientPath = settings.IdentifierClientPath
bs.config.IdentifierRegistrationConf = settings.IdentifierRegistrationConf
if bs.config.IdentifierRegistrationConf != "" {
bs.config.IdentifierRegistrationConf, _ = filepath.Abs(bs.config.IdentifierRegistrationConf)
if _, errStat := os.Stat(bs.config.IdentifierRegistrationConf); errStat != nil {
return fmt.Errorf("identifier-registration-conf file not found or unable to access: %v", errStat)
}
bs.config.IdentifierAuthoritiesConf = bs.config.IdentifierRegistrationConf
}
bs.config.IdentifierScopesConf = settings.IdentifierScopesConf
if bs.config.IdentifierScopesConf != "" {
bs.config.IdentifierScopesConf, _ = filepath.Abs(bs.config.IdentifierScopesConf)
if _, errStat := os.Stat(bs.config.IdentifierScopesConf); errStat != nil {
return fmt.Errorf("identifier-scopes-conf file not found or unable to access: %v", errStat)
}
}
if settings.IdentifierDefaultBannerLogo != "" {
// Load from file.
b, errRead := ioutil.ReadFile(settings.IdentifierDefaultBannerLogo)
if errRead != nil {
return fmt.Errorf("identifier-default-banner-logo failed to open: %w", errRead)
}
bs.config.IdentifierDefaultBannerLogo = b
}
if settings.IdentifierDefaultSignInPageText != "" {
bs.config.IdentifierDefaultSignInPageText = &settings.IdentifierDefaultSignInPageText
}
if settings.IdentifierDefaultUsernameHintText != "" {
bs.config.IdentifierDefaultUsernameHintText = &settings.IdentifierDefaultUsernameHintText
}
bs.config.IdentifierUILocales = settings.IdentifierUILocales
bs.config.SigningKeyID = settings.SigningKid
bs.config.Signers = make(map[string]crypto.Signer)
bs.config.Validators = make(map[string]crypto.PublicKey)
bs.config.Certificates = make(map[string][]*x509.Certificate)
signingMethodString := settings.SigningMethod
bs.config.SigningMethod = jwt.GetSigningMethod(signingMethodString)
if bs.config.SigningMethod == nil {
return fmt.Errorf("unknown signing method: %s", signingMethodString)
}
signingKeyFns := settings.SigningPrivateKeyFiles
if len(signingKeyFns) > 0 {
first := true
for _, signingKeyFn := range signingKeyFns {
logger.WithField("path", signingKeyFn).Infoln("loading signing key")
err = addSignerWithIDFromFile(signingKeyFn, "", bs)
if err != nil {
return err
}
if first {
// Also add key under the provided id.
first = false
err = addSignerWithIDFromFile(signingKeyFn, bs.config.SigningKeyID, bs)
if err != nil {
return err
}
}
}
} else {
//NOTE(longsleep): remove me - create keypair a random key pair.
sm := jwt.SigningMethodPS256
bs.config.SigningMethod = sm
logger.WithField("alg", sm.Name).Warnf("missing --signing-private-key parameter, using random %d bit signing key", DefaultSigningKeyBits)
signer, _ := rsa.GenerateKey(rand.Reader, DefaultSigningKeyBits)
bs.config.Signers[bs.config.SigningKeyID] = signer
}
// Ensure we have a signer for the things we need.
err = validateSigners(bs)
if err != nil {
return err
}
validationKeysPath := settings.ValidationKeysPath
if validationKeysPath != "" {
logger.WithField("path", validationKeysPath).Infoln("loading validation keys")
err = addValidatorsFromPath(validationKeysPath, bs)
if err != nil {
return err
}
}
bs.config.Config.HTTPTransport = utils.HTTPTransportWithTLSClientConfig(bs.config.TLSClientConfig)
bs.config.AccessTokenDurationSeconds = settings.AccessTokenDurationSeconds
if bs | {
// NOTE(longsleep): Ensure to use same salt length as the hash size.
// See https://www.ietf.org/mail-archive/web/jose/current/msg02901.html for
// reference and https://github.com/golang-jwt/jwt/v4/issues/285 for
// the issue in upstream jwt-go.
for _, alg := range []string{jwt.SigningMethodPS256.Name, jwt.SigningMethodPS384.Name, jwt.SigningMethodPS512.Name} {
sm := jwt.GetSigningMethod(alg)
if signingMethodRSAPSS, ok := sm.(*jwt.SigningMethodRSAPSS); ok {
signingMethodRSAPSS.Options.SaltLength = rsa.PSSSaltLengthEqualsHash
}
}
bs := &bootstrap{
config: &Config{
Config: cfg,
Settings: settings,
},
}
err := bs.initialize(settings) | identifier_body | |
bootstrap.go | Proxy); errParseCIDR == nil {
bs.config.Config.TrustedProxyNets = append(bs.config.Config.TrustedProxyNets, ipNet)
continue
}
}
if len(bs.config.Config.TrustedProxyIPs) > 0 {
logger.Infoln("trusted proxy IPs", bs.config.Config.TrustedProxyIPs)
}
if len(bs.config.Config.TrustedProxyNets) > 0 {
logger.Infoln("trusted proxy networks", bs.config.Config.TrustedProxyNets)
}
if len(settings.AllowScope) > 0 {
bs.config.Config.AllowedScopes = settings.AllowScope
logger.Infoln("using custom allowed OAuth 2 scopes", bs.config.Config.AllowedScopes)
}
bs.config.Config.AllowClientGuests = settings.AllowClientGuests
if bs.config.Config.AllowClientGuests {
logger.Infoln("client controlled guests are enabled")
}
bs.config.Config.AllowDynamicClientRegistration = settings.AllowDynamicClientRegistration
if bs.config.Config.AllowDynamicClientRegistration {
logger.Infoln("dynamic client registration is enabled")
}
encryptionSecretFn := settings.EncryptionSecretFile
if encryptionSecretFn != "" {
logger.WithField("file", encryptionSecretFn).Infoln("loading encryption secret from file")
bs.config.EncryptionSecret, err = ioutil.ReadFile(encryptionSecretFn)
if err != nil {
return fmt.Errorf("failed to load encryption secret from file: %v", err)
}
if len(bs.config.EncryptionSecret) != encryption.KeySize {
return fmt.Errorf("invalid encryption secret size - must be %d bytes", encryption.KeySize)
}
} else {
logger.Warnf("missing --encryption-secret parameter, using random encyption secret with %d bytes", encryption.KeySize)
bs.config.EncryptionSecret = rndm.GenerateRandomBytes(encryption.KeySize)
}
bs.config.Config.ListenAddr = settings.Listen
bs.config.IdentifierClientDisabled = settings.IdentifierClientDisabled
bs.config.IdentifierClientPath = settings.IdentifierClientPath
bs.config.IdentifierRegistrationConf = settings.IdentifierRegistrationConf
if bs.config.IdentifierRegistrationConf != "" {
bs.config.IdentifierRegistrationConf, _ = filepath.Abs(bs.config.IdentifierRegistrationConf)
if _, errStat := os.Stat(bs.config.IdentifierRegistrationConf); errStat != nil {
return fmt.Errorf("identifier-registration-conf file not found or unable to access: %v", errStat)
}
bs.config.IdentifierAuthoritiesConf = bs.config.IdentifierRegistrationConf
}
bs.config.IdentifierScopesConf = settings.IdentifierScopesConf
if bs.config.IdentifierScopesConf != "" {
bs.config.IdentifierScopesConf, _ = filepath.Abs(bs.config.IdentifierScopesConf)
if _, errStat := os.Stat(bs.config.IdentifierScopesConf); errStat != nil {
return fmt.Errorf("identifier-scopes-conf file not found or unable to access: %v", errStat)
}
}
if settings.IdentifierDefaultBannerLogo != "" {
// Load from file.
b, errRead := ioutil.ReadFile(settings.IdentifierDefaultBannerLogo)
if errRead != nil {
return fmt.Errorf("identifier-default-banner-logo failed to open: %w", errRead)
}
bs.config.IdentifierDefaultBannerLogo = b
}
if settings.IdentifierDefaultSignInPageText != "" {
bs.config.IdentifierDefaultSignInPageText = &settings.IdentifierDefaultSignInPageText
}
if settings.IdentifierDefaultUsernameHintText != "" {
bs.config.IdentifierDefaultUsernameHintText = &settings.IdentifierDefaultUsernameHintText
}
bs.config.IdentifierUILocales = settings.IdentifierUILocales
bs.config.SigningKeyID = settings.SigningKid
bs.config.Signers = make(map[string]crypto.Signer)
bs.config.Validators = make(map[string]crypto.PublicKey)
bs.config.Certificates = make(map[string][]*x509.Certificate)
signingMethodString := settings.SigningMethod
bs.config.SigningMethod = jwt.GetSigningMethod(signingMethodString)
if bs.config.SigningMethod == nil {
return fmt.Errorf("unknown signing method: %s", signingMethodString)
}
signingKeyFns := settings.SigningPrivateKeyFiles
if len(signingKeyFns) > 0 {
first := true
for _, signingKeyFn := range signingKeyFns {
logger.WithField("path", signingKeyFn).Infoln("loading signing key")
err = addSignerWithIDFromFile(signingKeyFn, "", bs)
if err != nil {
return err
}
if first {
// Also add key under the provided id.
first = false
err = addSignerWithIDFromFile(signingKeyFn, bs.config.SigningKeyID, bs)
if err != nil {
return err
}
}
}
} else {
//NOTE(longsleep): remove me - create keypair a random key pair.
sm := jwt.SigningMethodPS256
bs.config.SigningMethod = sm
logger.WithField("alg", sm.Name).Warnf("missing --signing-private-key parameter, using random %d bit signing key", DefaultSigningKeyBits)
signer, _ := rsa.GenerateKey(rand.Reader, DefaultSigningKeyBits)
bs.config.Signers[bs.config.SigningKeyID] = signer
}
// Ensure we have a signer for the things we need.
err = validateSigners(bs)
if err != nil {
return err
}
validationKeysPath := settings.ValidationKeysPath
if validationKeysPath != "" {
logger.WithField("path", validationKeysPath).Infoln("loading validation keys")
err = addValidatorsFromPath(validationKeysPath, bs)
if err != nil {
return err
}
}
bs.config.Config.HTTPTransport = utils.HTTPTransportWithTLSClientConfig(bs.config.TLSClientConfig)
bs.config.AccessTokenDurationSeconds = settings.AccessTokenDurationSeconds
if bs.config.AccessTokenDurationSeconds == 0 {
bs.config.AccessTokenDurationSeconds = 60 * 10 // 10 Minutes
}
bs.config.IDTokenDurationSeconds = settings.IDTokenDurationSeconds
if bs.config.IDTokenDurationSeconds == 0 {
bs.config.IDTokenDurationSeconds = 60 * 60 // 1 Hour
}
bs.config.RefreshTokenDurationSeconds = settings.RefreshTokenDurationSeconds
if bs.config.RefreshTokenDurationSeconds == 0 {
bs.config.RefreshTokenDurationSeconds = 60 * 60 * 24 * 365 * 3 // 3 Years
}
bs.config.DyamicClientSecretDurationSeconds = settings.DyamicClientSecretDurationSeconds
return nil
}
// setup takes care of setting up the managers based on the associated
// Bootstrap's data.
func (bs *bootstrap) setup(ctx context.Context, settings *Settings) error {
managers, err := newManagers(ctx, bs)
if err != nil {
return err
}
bs.managers = managers
identityManager, err := bs.setupIdentity(ctx, settings)
if err != nil {
return err
}
managers.Set("identity", identityManager)
guestManager, err := bs.setupGuest(ctx, identityManager)
if err != nil {
return err
}
managers.Set("guest", guestManager)
oidcProvider, err := bs.setupOIDCProvider(ctx)
if err != nil {
return err
}
managers.Set("oidc", oidcProvider)
managers.Set("handler", oidcProvider) // Use OIDC provider as default HTTP handler.
err = managers.Apply()
if err != nil {
return fmt.Errorf("failed to apply managers: %v", err)
}
// Final steps
err = oidcProvider.InitializeMetadata()
if err != nil {
return fmt.Errorf("failed to initialize provider metadata: %v", err)
}
return nil
}
func (bs *bootstrap) MakeURIPath(api APIType, subpath string) string {
subpath = strings.TrimPrefix(subpath, "/")
uriPath := ""
switch api {
case APITypeKonnect:
uriPath = fmt.Sprintf("%s/konnect/v1/%s", strings.TrimSuffix(bs.uriBasePath, "/"), subpath)
case APITypeSignin:
uriPath = fmt.Sprintf("%s/signin/v1/%s", strings.TrimSuffix(bs.uriBasePath, "/"), subpath)
default:
panic("unknown api type")
}
if subpath == "" {
uriPath = strings.TrimSuffix(uriPath, "/")
}
return uriPath
}
func (bs *bootstrap) MakeURI(api APIType, subpath string) *url.URL {
uriPath := bs.MakeURIPath(api, subpath)
uri, _ := url.Parse(bs.config.IssuerIdentifierURI.String())
uri.Path = uriPath
return uri
}
func (bs *bootstrap) setupIdentity(ctx context.Context, settings *Settings) (identity.Manager, error) {
logger := bs.config.Config.Logger
if settings.IdentityManager == "" {
return nil, fmt.Errorf("identity-manager argument missing")
}
// Identity manager.
identityManagerName := settings.IdentityManager
identityManager, err := getIdentityManagerByName(identityManagerName, bs)
if err != nil {
return nil, err
}
logger.WithFields(logrus.Fields{
"name": identityManagerName,
"scopes": identityManager.ScopesSupported(nil),
"claims": identityManager.ClaimsSupported(nil),
}).Infoln("identity manager set up")
return identityManager, nil
}
func (bs *bootstrap) | setupGuest | identifier_name | |
source.rs |
/// ```
pub fn indent_of<T: LintContext>(cx: &T, span: Span) -> Option<usize> {
snippet_opt(cx, line_span(cx, span)).and_then(|snip| snip.find(|c: char| !c.is_whitespace()))
}
/// Gets a snippet of the indentation of the line of a span
pub fn snippet_indent<T: LintContext>(cx: &T, span: Span) -> Option<String> {
snippet_opt(cx, line_span(cx, span)).map(|mut s| {
let len = s.len() - s.trim_start().len();
s.truncate(len);
s
})
}
// If the snippet is empty, it's an attribute that was inserted during macro
// expansion and we want to ignore those, because they could come from external
// sources that the user has no control over.
// For some reason these attributes don't have any expansion info on them, so
// we have to check it this way until there is a better way.
pub fn is_present_in_source<T: LintContext>(cx: &T, span: Span) -> bool {
if let Some(snippet) = snippet_opt(cx, span) {
if snippet.is_empty() {
return false;
}
}
true
}
/// Returns the positon just before rarrow
///
/// ```rust,ignore
/// fn into(self) -> () {}
/// ^
/// // in case of unformatted code
/// fn into2(self)-> () {}
/// ^
/// fn into3(self) -> () {}
/// ^
/// ```
pub fn position_before_rarrow(s: &str) -> Option<usize> {
s.rfind("->").map(|rpos| {
let mut rpos = rpos;
let chars: Vec<char> = s.chars().collect();
while rpos > 1 {
if let Some(c) = chars.get(rpos - 1) {
if c.is_whitespace() {
rpos -= 1;
continue;
}
}
break;
}
rpos
})
}
/// Reindent a multiline string with possibility of ignoring the first line.
#[allow(clippy::needless_pass_by_value)]
pub fn reindent_multiline(s: Cow<'_, str>, ignore_first: bool, indent: Option<usize>) -> Cow<'_, str> {
let s_space = reindent_multiline_inner(&s, ignore_first, indent, ' ');
let s_tab = reindent_multiline_inner(&s_space, ignore_first, indent, '\t');
reindent_multiline_inner(&s_tab, ignore_first, indent, ' ').into()
}
fn reindent_multiline_inner(s: &str, ignore_first: bool, indent: Option<usize>, ch: char) -> String {
let x = s
.lines()
.skip(usize::from(ignore_first))
.filter_map(|l| {
if l.is_empty() {
None
} else {
// ignore empty lines
Some(l.char_indices().find(|&(_, x)| x != ch).unwrap_or((l.len(), ch)).0)
}
})
.min()
.unwrap_or(0);
let indent = indent.unwrap_or(0);
s.lines()
.enumerate()
.map(|(i, l)| {
if (ignore_first && i == 0) || l.is_empty() {
l.to_owned()
} else if x > indent | else {
" ".repeat(indent - x) + l
}
})
.collect::<Vec<String>>()
.join("\n")
}
/// Converts a span to a code snippet if available, otherwise returns the default.
///
/// This is useful if you want to provide suggestions for your lint or more generally, if you want
/// to convert a given `Span` to a `str`. To create suggestions consider using
/// [`snippet_with_applicability`] to ensure that the applicability stays correct.
///
/// # Example
/// ```rust,ignore
/// // Given two spans one for `value` and one for the `init` expression.
/// let value = Vec::new();
/// // ^^^^^ ^^^^^^^^^^
/// // span1 span2
///
/// // The snipped call would return the corresponding code snippet
/// snippet(cx, span1, "..") // -> "value"
/// snippet(cx, span2, "..") // -> "Vec::new()"
/// ```
pub fn snippet<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet_opt(cx, span).map_or_else(|| Cow::Borrowed(default), From::from)
}
/// Same as [`snippet`], but it adapts the applicability level by following rules:
///
/// - Applicability level `Unspecified` will never be changed.
/// - If the span is inside a macro, change the applicability level to `MaybeIncorrect`.
/// - If the default value is used and the applicability level is `MachineApplicable`, change it to
/// `HasPlaceholders`
pub fn snippet_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
applicability: &mut Applicability,
) -> Cow<'a, str> {
if *applicability != Applicability::Unspecified && span.from_expansion() {
*applicability = Applicability::MaybeIncorrect;
}
snippet_opt(cx, span).map_or_else(
|| {
if *applicability == Applicability::MachineApplicable {
*applicability = Applicability::HasPlaceholders;
}
Cow::Borrowed(default)
},
From::from,
)
}
/// Same as `snippet`, but should only be used when it's clear that the input span is
/// not a macro argument.
pub fn snippet_with_macro_callsite<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet(cx, span.source_callsite(), default)
}
/// Converts a span to a code snippet. Returns `None` if not available.
pub fn snippet_opt<T: LintContext>(cx: &T, span: Span) -> Option<String> {
cx.sess().source_map().span_to_snippet(span).ok()
}
/// Converts a span (from a block) to a code snippet if available, otherwise use default.
///
/// This trims the code of indentation, except for the first line. Use it for blocks or block-like
/// things which need to be printed as such.
///
/// The `indent_relative_to` arg can be used, to provide a span, where the indentation of the
/// resulting snippet of the given span.
///
/// # Example
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", None)
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// }
/// ```
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", Some(if_expr.span))
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// } // aligned with `if`
/// ```
/// Note that the first line of the snippet always has 0 indentation.
pub fn snippet_block<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let snip = snippet(cx, span, default);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_block`, but adapts the applicability level by the rules of
/// `snippet_with_applicability`.
pub fn snippet_block_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
applicability: &mut Applicability,
) -> Cow<'a, str> {
let snip = snippet_with_applicability(cx, span, default, applicability);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_with_applicability`, but first walks the span up to the given context. This
/// will result in the macro call, rather then the expansion, if the span is from a child context.
/// If the span is not from a child context, it will be used directly instead.
///
/// e.g. Given the expression `&vec![]`, getting a snippet from the span for `vec![]` as a HIR node
/// would result in `box []`. If given the context of the address of expression, this function will
/// correctly get a snippet of `vec![]`.
///
/// This will also return whether or not the snippet is a macro call.
pub fn snippet_with_context(
cx: &LateContext<'_>,
span: Span,
outer: SyntaxContext,
default: &'a str,
applicability: &mut Applicability | {
l.split_at(x - indent).1.to_owned()
} | conditional_block |
source.rs |
/// ```
pub fn indent_of<T: LintContext>(cx: &T, span: Span) -> Option<usize> {
snippet_opt(cx, line_span(cx, span)).and_then(|snip| snip.find(|c: char| !c.is_whitespace()))
}
/// Gets a snippet of the indentation of the line of a span
pub fn snippet_indent<T: LintContext>(cx: &T, span: Span) -> Option<String> {
snippet_opt(cx, line_span(cx, span)).map(|mut s| {
let len = s.len() - s.trim_start().len();
s.truncate(len);
s
})
}
// If the snippet is empty, it's an attribute that was inserted during macro
// expansion and we want to ignore those, because they could come from external
// sources that the user has no control over.
// For some reason these attributes don't have any expansion info on them, so
// we have to check it this way until there is a better way.
pub fn is_present_in_source<T: LintContext>(cx: &T, span: Span) -> bool {
if let Some(snippet) = snippet_opt(cx, span) {
if snippet.is_empty() {
return false;
}
}
true
}
/// Returns the positon just before rarrow
///
/// ```rust,ignore
/// fn into(self) -> () {}
/// ^
/// // in case of unformatted code
/// fn into2(self)-> () {}
/// ^
/// fn into3(self) -> () {}
/// ^
/// ```
pub fn position_before_rarrow(s: &str) -> Option<usize> {
s.rfind("->").map(|rpos| {
let mut rpos = rpos;
let chars: Vec<char> = s.chars().collect();
while rpos > 1 {
if let Some(c) = chars.get(rpos - 1) {
if c.is_whitespace() {
rpos -= 1;
continue;
}
}
break;
}
rpos
})
}
/// Reindent a multiline string with possibility of ignoring the first line.
#[allow(clippy::needless_pass_by_value)]
pub fn reindent_multiline(s: Cow<'_, str>, ignore_first: bool, indent: Option<usize>) -> Cow<'_, str> {
let s_space = reindent_multiline_inner(&s, ignore_first, indent, ' ');
let s_tab = reindent_multiline_inner(&s_space, ignore_first, indent, '\t');
reindent_multiline_inner(&s_tab, ignore_first, indent, ' ').into()
}
fn reindent_multiline_inner(s: &str, ignore_first: bool, indent: Option<usize>, ch: char) -> String {
let x = s
.lines()
.skip(usize::from(ignore_first))
.filter_map(|l| {
if l.is_empty() {
None
} else {
// ignore empty lines
Some(l.char_indices().find(|&(_, x)| x != ch).unwrap_or((l.len(), ch)).0)
}
})
.min()
.unwrap_or(0);
let indent = indent.unwrap_or(0);
s.lines()
.enumerate()
.map(|(i, l)| {
if (ignore_first && i == 0) || l.is_empty() {
l.to_owned()
} else if x > indent {
l.split_at(x - indent).1.to_owned()
} else {
" ".repeat(indent - x) + l
}
})
.collect::<Vec<String>>()
.join("\n") | /// Converts a span to a code snippet if available, otherwise returns the default.
///
/// This is useful if you want to provide suggestions for your lint or more generally, if you want
/// to convert a given `Span` to a `str`. To create suggestions consider using
/// [`snippet_with_applicability`] to ensure that the applicability stays correct.
///
/// # Example
/// ```rust,ignore
/// // Given two spans one for `value` and one for the `init` expression.
/// let value = Vec::new();
/// // ^^^^^ ^^^^^^^^^^
/// // span1 span2
///
/// // The snipped call would return the corresponding code snippet
/// snippet(cx, span1, "..") // -> "value"
/// snippet(cx, span2, "..") // -> "Vec::new()"
/// ```
pub fn snippet<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet_opt(cx, span).map_or_else(|| Cow::Borrowed(default), From::from)
}
/// Same as [`snippet`], but it adapts the applicability level by following rules:
///
/// - Applicability level `Unspecified` will never be changed.
/// - If the span is inside a macro, change the applicability level to `MaybeIncorrect`.
/// - If the default value is used and the applicability level is `MachineApplicable`, change it to
/// `HasPlaceholders`
pub fn snippet_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
applicability: &mut Applicability,
) -> Cow<'a, str> {
if *applicability != Applicability::Unspecified && span.from_expansion() {
*applicability = Applicability::MaybeIncorrect;
}
snippet_opt(cx, span).map_or_else(
|| {
if *applicability == Applicability::MachineApplicable {
*applicability = Applicability::HasPlaceholders;
}
Cow::Borrowed(default)
},
From::from,
)
}
/// Same as `snippet`, but should only be used when it's clear that the input span is
/// not a macro argument.
pub fn snippet_with_macro_callsite<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet(cx, span.source_callsite(), default)
}
/// Converts a span to a code snippet. Returns `None` if not available.
pub fn snippet_opt<T: LintContext>(cx: &T, span: Span) -> Option<String> {
cx.sess().source_map().span_to_snippet(span).ok()
}
/// Converts a span (from a block) to a code snippet if available, otherwise use default.
///
/// This trims the code of indentation, except for the first line. Use it for blocks or block-like
/// things which need to be printed as such.
///
/// The `indent_relative_to` arg can be used, to provide a span, where the indentation of the
/// resulting snippet of the given span.
///
/// # Example
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", None)
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// }
/// ```
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", Some(if_expr.span))
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// } // aligned with `if`
/// ```
/// Note that the first line of the snippet always has 0 indentation.
pub fn snippet_block<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let snip = snippet(cx, span, default);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_block`, but adapts the applicability level by the rules of
/// `snippet_with_applicability`.
pub fn snippet_block_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
applicability: &mut Applicability,
) -> Cow<'a, str> {
let snip = snippet_with_applicability(cx, span, default, applicability);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_with_applicability`, but first walks the span up to the given context. This
/// will result in the macro call, rather then the expansion, if the span is from a child context.
/// If the span is not from a child context, it will be used directly instead.
///
/// e.g. Given the expression `&vec![]`, getting a snippet from the span for `vec![]` as a HIR node
/// would result in `box []`. If given the context of the address of expression, this function will
/// correctly get a snippet of `vec![]`.
///
/// This will also return whether or not the snippet is a macro call.
pub fn snippet_with_context(
cx: &LateContext<'_>,
span: Span,
outer: SyntaxContext,
default: &'a str,
applicability: &mut Applicability,
| }
| random_line_split |
source.rs | <'a, T: LintContext>(
cx: &T,
expr: &Expr<'_>,
option: Option<String>,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let code = snippet_block(cx, expr.span, default, indent_relative_to);
let string = option.unwrap_or_default();
if expr.span.from_expansion() {
Cow::Owned(format!("{{ {} }}", snippet_with_macro_callsite(cx, expr.span, default)))
} else if let ExprKind::Block(_, _) = expr.kind {
Cow::Owned(format!("{}{}", code, string))
} else if string.is_empty() {
Cow::Owned(format!("{{ {} }}", code))
} else {
Cow::Owned(format!("{{\n{};\n{}\n}}", code, string))
}
}
/// Returns a new Span that extends the original Span to the first non-whitespace char of the first
/// line.
///
/// ```rust,ignore
/// let x = ();
/// // ^^
/// // will be converted to
/// let x = ();
/// // ^^^^^^^^^^
/// ```
pub fn first_line_of_span<T: LintContext>(cx: &T, span: Span) -> Span {
first_char_in_first_line(cx, span).map_or(span, |first_char_pos| span.with_lo(first_char_pos))
}
fn first_char_in_first_line<T: LintContext>(cx: &T, span: Span) -> Option<BytePos> {
let line_span = line_span(cx, span);
snippet_opt(cx, line_span).and_then(|snip| {
snip.find(|c: char| !c.is_whitespace())
.map(|pos| line_span.lo() + BytePos::from_usize(pos))
})
}
/// Returns the indentation of the line of a span
///
/// ```rust,ignore
/// let x = ();
/// // ^^ -- will return 0
/// let x = ();
/// // ^^ -- will return 4
/// ```
pub fn indent_of<T: LintContext>(cx: &T, span: Span) -> Option<usize> {
snippet_opt(cx, line_span(cx, span)).and_then(|snip| snip.find(|c: char| !c.is_whitespace()))
}
/// Gets a snippet of the indentation of the line of a span
pub fn snippet_indent<T: LintContext>(cx: &T, span: Span) -> Option<String> {
snippet_opt(cx, line_span(cx, span)).map(|mut s| {
let len = s.len() - s.trim_start().len();
s.truncate(len);
s
})
}
// If the snippet is empty, it's an attribute that was inserted during macro
// expansion and we want to ignore those, because they could come from external
// sources that the user has no control over.
// For some reason these attributes don't have any expansion info on them, so
// we have to check it this way until there is a better way.
pub fn is_present_in_source<T: LintContext>(cx: &T, span: Span) -> bool {
if let Some(snippet) = snippet_opt(cx, span) {
if snippet.is_empty() {
return false;
}
}
true
}
/// Returns the positon just before rarrow
///
/// ```rust,ignore
/// fn into(self) -> () {}
/// ^
/// // in case of unformatted code
/// fn into2(self)-> () {}
/// ^
/// fn into3(self) -> () {}
/// ^
/// ```
pub fn position_before_rarrow(s: &str) -> Option<usize> {
s.rfind("->").map(|rpos| {
let mut rpos = rpos;
let chars: Vec<char> = s.chars().collect();
while rpos > 1 {
if let Some(c) = chars.get(rpos - 1) {
if c.is_whitespace() {
rpos -= 1;
continue;
}
}
break;
}
rpos
})
}
/// Reindent a multiline string with possibility of ignoring the first line.
#[allow(clippy::needless_pass_by_value)]
pub fn reindent_multiline(s: Cow<'_, str>, ignore_first: bool, indent: Option<usize>) -> Cow<'_, str> {
let s_space = reindent_multiline_inner(&s, ignore_first, indent, ' ');
let s_tab = reindent_multiline_inner(&s_space, ignore_first, indent, '\t');
reindent_multiline_inner(&s_tab, ignore_first, indent, ' ').into()
}
fn reindent_multiline_inner(s: &str, ignore_first: bool, indent: Option<usize>, ch: char) -> String {
let x = s
.lines()
.skip(usize::from(ignore_first))
.filter_map(|l| {
if l.is_empty() {
None
} else {
// ignore empty lines
Some(l.char_indices().find(|&(_, x)| x != ch).unwrap_or((l.len(), ch)).0)
}
})
.min()
.unwrap_or(0);
let indent = indent.unwrap_or(0);
s.lines()
.enumerate()
.map(|(i, l)| {
if (ignore_first && i == 0) || l.is_empty() {
l.to_owned()
} else if x > indent {
l.split_at(x - indent).1.to_owned()
} else {
" ".repeat(indent - x) + l
}
})
.collect::<Vec<String>>()
.join("\n")
}
/// Converts a span to a code snippet if available, otherwise returns the default.
///
/// This is useful if you want to provide suggestions for your lint or more generally, if you want
/// to convert a given `Span` to a `str`. To create suggestions consider using
/// [`snippet_with_applicability`] to ensure that the applicability stays correct.
///
/// # Example
/// ```rust,ignore
/// // Given two spans one for `value` and one for the `init` expression.
/// let value = Vec::new();
/// // ^^^^^ ^^^^^^^^^^
/// // span1 span2
///
/// // The snipped call would return the corresponding code snippet
/// snippet(cx, span1, "..") // -> "value"
/// snippet(cx, span2, "..") // -> "Vec::new()"
/// ```
pub fn snippet<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet_opt(cx, span).map_or_else(|| Cow::Borrowed(default), From::from)
}
/// Same as [`snippet`], but it adapts the applicability level by following rules:
///
/// - Applicability level `Unspecified` will never be changed.
/// - If the span is inside a macro, change the applicability level to `MaybeIncorrect`.
/// - If the default value is used and the applicability level is `MachineApplicable`, change it to
/// `HasPlaceholders`
pub fn snippet_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
applicability: &mut Applicability,
) -> Cow<'a, str> {
if *applicability != Applicability::Unspecified && span.from_expansion() {
*applicability = Applicability::MaybeIncorrect;
}
snippet_opt(cx, span).map_or_else(
|| {
if *applicability == Applicability::MachineApplicable {
*applicability = Applicability::HasPlaceholders;
}
Cow::Borrowed(default)
},
From::from,
)
}
/// Same as `snippet`, but should only be used when it's clear that the input span is
/// not a macro argument.
pub fn snippet_with_macro_callsite<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet(cx, span.source_callsite(), default)
}
/// Converts a span to a code snippet. Returns `None` if not available.
pub fn snippet_opt<T: LintContext>(cx: &T, span: Span) -> Option<String> {
cx.sess().source_map().span_to_snippet(span).ok()
}
/// Converts a span (from a block) to a code snippet if available, otherwise use default.
///
/// This trims the code of indentation, except for the first line. Use it for blocks or block-like
/// things which need to be printed as such.
///
/// The `indent_relative_to` arg can be used, to provide a span, where the indentation of the
/// resulting snippet of the given span.
///
/// # Example
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", None)
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// }
/// ```
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", Some(if_expr.span))
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// } | expr_block | identifier_name | |
source.rs |
/// ```
pub fn indent_of<T: LintContext>(cx: &T, span: Span) -> Option<usize> {
snippet_opt(cx, line_span(cx, span)).and_then(|snip| snip.find(|c: char| !c.is_whitespace()))
}
/// Gets a snippet of the indentation of the line of a span
pub fn snippet_indent<T: LintContext>(cx: &T, span: Span) -> Option<String> {
snippet_opt(cx, line_span(cx, span)).map(|mut s| {
let len = s.len() - s.trim_start().len();
s.truncate(len);
s
})
}
// If the snippet is empty, it's an attribute that was inserted during macro
// expansion and we want to ignore those, because they could come from external
// sources that the user has no control over.
// For some reason these attributes don't have any expansion info on them, so
// we have to check it this way until there is a better way.
pub fn is_present_in_source<T: LintContext>(cx: &T, span: Span) -> bool {
if let Some(snippet) = snippet_opt(cx, span) {
if snippet.is_empty() {
return false;
}
}
true
}
/// Returns the positon just before rarrow
///
/// ```rust,ignore
/// fn into(self) -> () {}
/// ^
/// // in case of unformatted code
/// fn into2(self)-> () {}
/// ^
/// fn into3(self) -> () {}
/// ^
/// ```
pub fn position_before_rarrow(s: &str) -> Option<usize> {
s.rfind("->").map(|rpos| {
let mut rpos = rpos;
let chars: Vec<char> = s.chars().collect();
while rpos > 1 {
if let Some(c) = chars.get(rpos - 1) {
if c.is_whitespace() {
rpos -= 1;
continue;
}
}
break;
}
rpos
})
}
/// Reindent a multiline string with possibility of ignoring the first line.
#[allow(clippy::needless_pass_by_value)]
pub fn reindent_multiline(s: Cow<'_, str>, ignore_first: bool, indent: Option<usize>) -> Cow<'_, str> {
let s_space = reindent_multiline_inner(&s, ignore_first, indent, ' ');
let s_tab = reindent_multiline_inner(&s_space, ignore_first, indent, '\t');
reindent_multiline_inner(&s_tab, ignore_first, indent, ' ').into()
}
fn reindent_multiline_inner(s: &str, ignore_first: bool, indent: Option<usize>, ch: char) -> String {
let x = s
.lines()
.skip(usize::from(ignore_first))
.filter_map(|l| {
if l.is_empty() {
None
} else {
// ignore empty lines
Some(l.char_indices().find(|&(_, x)| x != ch).unwrap_or((l.len(), ch)).0)
}
})
.min()
.unwrap_or(0);
let indent = indent.unwrap_or(0);
s.lines()
.enumerate()
.map(|(i, l)| {
if (ignore_first && i == 0) || l.is_empty() {
l.to_owned()
} else if x > indent {
l.split_at(x - indent).1.to_owned()
} else {
" ".repeat(indent - x) + l
}
})
.collect::<Vec<String>>()
.join("\n")
}
/// Converts a span to a code snippet if available, otherwise returns the default.
///
/// This is useful if you want to provide suggestions for your lint or more generally, if you want
/// to convert a given `Span` to a `str`. To create suggestions consider using
/// [`snippet_with_applicability`] to ensure that the applicability stays correct.
///
/// # Example
/// ```rust,ignore
/// // Given two spans one for `value` and one for the `init` expression.
/// let value = Vec::new();
/// // ^^^^^ ^^^^^^^^^^
/// // span1 span2
///
/// // The snipped call would return the corresponding code snippet
/// snippet(cx, span1, "..") // -> "value"
/// snippet(cx, span2, "..") // -> "Vec::new()"
/// ```
pub fn snippet<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet_opt(cx, span).map_or_else(|| Cow::Borrowed(default), From::from)
}
/// Same as [`snippet`], but it adapts the applicability level by following rules:
///
/// - Applicability level `Unspecified` will never be changed.
/// - If the span is inside a macro, change the applicability level to `MaybeIncorrect`.
/// - If the default value is used and the applicability level is `MachineApplicable`, change it to
/// `HasPlaceholders`
pub fn snippet_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
applicability: &mut Applicability,
) -> Cow<'a, str> |
/// Same as `snippet`, but should only be used when it's clear that the input span is
/// not a macro argument.
pub fn snippet_with_macro_callsite<'a, T: LintContext>(cx: &T, span: Span, default: &'a str) -> Cow<'a, str> {
snippet(cx, span.source_callsite(), default)
}
/// Converts a span to a code snippet. Returns `None` if not available.
pub fn snippet_opt<T: LintContext>(cx: &T, span: Span) -> Option<String> {
cx.sess().source_map().span_to_snippet(span).ok()
}
/// Converts a span (from a block) to a code snippet if available, otherwise use default.
///
/// This trims the code of indentation, except for the first line. Use it for blocks or block-like
/// things which need to be printed as such.
///
/// The `indent_relative_to` arg can be used, to provide a span, where the indentation of the
/// resulting snippet of the given span.
///
/// # Example
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", None)
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// }
/// ```
///
/// ```rust,ignore
/// snippet_block(cx, block.span, "..", Some(if_expr.span))
/// // where, `block` is the block of the if expr
/// if x {
/// y;
/// }
/// // will return the snippet
/// {
/// y;
/// } // aligned with `if`
/// ```
/// Note that the first line of the snippet always has 0 indentation.
pub fn snippet_block<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
) -> Cow<'a, str> {
let snip = snippet(cx, span, default);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_block`, but adapts the applicability level by the rules of
/// `snippet_with_applicability`.
pub fn snippet_block_with_applicability<'a, T: LintContext>(
cx: &T,
span: Span,
default: &'a str,
indent_relative_to: Option<Span>,
applicability: &mut Applicability,
) -> Cow<'a, str> {
let snip = snippet_with_applicability(cx, span, default, applicability);
let indent = indent_relative_to.and_then(|s| indent_of(cx, s));
reindent_multiline(snip, true, indent)
}
/// Same as `snippet_with_applicability`, but first walks the span up to the given context. This
/// will result in the macro call, rather then the expansion, if the span is from a child context.
/// If the span is not from a child context, it will be used directly instead.
///
/// e.g. Given the expression `&vec![]`, getting a snippet from the span for `vec![]` as a HIR node
/// would result in `box []`. If given the context of the address of expression, this function will
/// correctly get a snippet of `vec![]`.
///
/// This will also return whether or not the snippet is a macro call.
pub fn snippet_with_context(
cx: &LateContext<'_>,
span: Span,
outer: SyntaxContext,
default: &'a str,
applicability: &mut Applic | {
if *applicability != Applicability::Unspecified && span.from_expansion() {
*applicability = Applicability::MaybeIncorrect;
}
snippet_opt(cx, span).map_or_else(
|| {
if *applicability == Applicability::MachineApplicable {
*applicability = Applicability::HasPlaceholders;
}
Cow::Borrowed(default)
},
From::from,
)
} | identifier_body |
ogre_unit.py | IS.py"""
import sys
import os
import os.path
paths = [os.path.join(os.getcwd(), 'plugins.cfg'),
'/etc/OGRE/plugins.cfg',
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'plugins.cfg')]
for path in paths:
if os.path.exists(path):
return path
sys.stderr.write("\n"
"** Warning: Unable to locate a suitable plugins.cfg file.\n"
"** Warning: Please check your ogre installation and copy a\n"
"** Warning: working plugins.cfg file to the current directory.\n\n")
raise ogre.Exception(0, "can't locate the 'plugins.cfg' file", "")
def setup_resources(resources_path = 'resources.cfg'):
'''Load resources, such as from 'resources.cfg'.'''
config = ogre.ConfigFile()
config.load(resources_path)
section_iter = config.getSectionIterator()
while section_iter.hasMoreElements():
section_name = section_iter.peekNextKey()
settings = section_iter.getNext()
for item in settings:
ogre.ResourceGroupManager.getSingleton().addResourceLocation(item.value, item.key, section_name)
def setup_root(plugins_path = getPluginPath(),
resources_path = 'resources.cfg'):
'''Return new root, sceneManager.'''
root = ogre.Root(plugins_path)
root.setFrameSmoothingPeriod(5.0)
setup_resources(resources_path)
sceneManager = root.createSceneManager(ogre.ST_GENERIC,"ExampleSMInstance")
return root, sceneManager
def initialise_null_render(plugins_path = getPluginPath()):
'Prepare to null renderer and return ogre root.'
ogre_root = ogre.Root(plugins_path)
rend_list = ogre_root.getAvailableRenderers()
ogre_root.setRenderSystem(rend_list[-1])
ogre_root.getRenderSystem()._initRenderTargets()
ogre_root.initialise(False)
return ogre_root
def setup_null_root(plugins_path = getPluginPath(),
resources_path = 'resources.cfg'):
'''Return root, sceneManager. Suitable for unit test without entity, camera, light.
>>> logManager, logListener = quiet_log()
>>> root, sceneManager = setup_null_root()
>>> assert root
>>> assert sceneManager
>>> for i in range(10):
... if not renderOneFrame(root): print False
>>> del sceneManager, root
>>> del logManager, logListener
'''
root = initialise_null_render(plugins_path)
setup_resources(resources_path)
sceneManager = root.createSceneManager(ogre.ST_GENERIC,"ExampleSMInstance")
return root, sceneManager
def setup_viewport(root, sceneManager):
'''Create render window and viewport from user selection, and return renderWindow and camera.'''
renderWindow = configure(root)
if not renderWindow:
return None
camera = sceneManager.createCamera('Camera')
viewport = renderWindow.addViewport(camera)
return renderWindow, camera
def configure(ogre_root):
"""This shows the config dialog and returns the renderWindow."""
user_confirmation = ogre_root.showConfigDialog()
if user_confirmation:
return ogre_root.initialise(True, "OGRE Render Window")
else:
return None
def setup_unittest():
'''With tiny render window and resources. Return root and sceneManager.
>>> logManager, logListener = quiet_log()
>>> root, sceneManager, renderWindow, camera = setup_unittest()
>>> assert root
>>> assert sceneManager
>>> assert renderWindow
>>> assert camera
>>> del sceneManager, root, renderWindow, camera
>>> del logManager, logListener
'''
root, sceneManager = setup_null_root()
renderWindow = root.createRenderWindow('test', 4, 3, False)
camera = sceneManager.createCamera('Camera')
ogre.ResourceGroupManager.getSingleton().initialiseAllResourceGroups()
return root, sceneManager, renderWindow, camera
def setup():
'''Set up minimal Ogre application and return root and sceneManager.
>>> logManager, logListener = quiet_log()
# TODO: Doctest crashes, although external call to setup works!
#>>> root, sceneManager, renderWindow, camera = setup()
#>>> assert root
#>>> assert sceneManager
#>>> assert renderWindow
#>>> assert camera
#>>> application = setup_quiet_application(setup_unittest)
#>>> for i in range(10):
#... print i,
#... if not renderOneFrame(root): print False
#0 1 2 3 4 5 6 7 8 9
#>>> sceneManager.clearScene()
#>>> del renderWindow
#>>> del camera
#>>> del sceneManager
#>>> del root
>>> del logManager, logListener
'''
root, sceneManager = setup_root()
renderWindow, camera = setup_viewport(root, sceneManager)
ogre.TextureManager.getSingleton().setDefaultNumMipmaps(5)
ogre.ResourceGroupManager.getSingleton().initialiseAllResourceGroups()
return root, sceneManager, renderWindow, camera
def run(root, sceneManager, renderWindow, camera):
'''Construct and render.'''
root, sceneManager, renderWindow, camera = setup()
if root and sceneManager:
root.startRendering()
def renderOneFrame(ogre_root):
'Render a frame. Return False if closed. Useful for unit test.'
ogre.WindowEventUtilities().messagePump()
return ogre_root.renderOneFrame()
# For applications that do not have logListener attribute.
logListener = None
logManager = None
| class quiet_logListener_class(ogre.LogListener):
def messageLogged(self, message, level, debug, logName):
'''Called by Ogre instead of logging.'''
pass
#print message
def quiet_log():
'''Replace log with quiet version. Useful for unit test.
Return logManager and logListener, which must destructed AFTER root.
>>> logManager, logListener = quiet_log()
>>> root, sceneManager, renderWindow, camera = setup_unittest()
Gotcha: If you encounter 'R6025 Pure virtual function call' error within a class, then write destructor to destroy root before logManager and logListener. http://www.indiegamer.com/archives/t-3533.html
>>> del root
>>> del sceneManager, renderWindow, camera
>>> del logManager, logListener
Derived from examples:
http://www.ogre3d.org/phpBB2addons/viewtopic.php?p=10887&sid=ce193664e1d3d7c4af509e6f4e2718c6
http://wiki.python-ogre.org/index.php/ChangeLog
'''
logManager = ogre.LogManager()
log = ogre.LogManager.getSingletonPtr().createLog(
'quiet.log', True, False, True)
logListener = quiet_logListener_class()
log.addListener(logListener)
return logManager, logListener
class application_class(object):
'''Minimal Ogre application, which needs reference to root.
>>> application = setup_quiet_application(setup_unittest)
>>> for i in range(10):
... print i,
... if not renderOneFrame(application.root): print False
0 1 2 3 4 5 6 7 8 9
>>> assert application
'''
def __init__(self):
self.root = None
self.sceneManager = None
self.renderWindow = None
self.camera = None
# For quiet_log
self.logManager = None
self.logListener = None
def __del__(self):
del self.sceneManager
del self.root
del self.renderWindow
del self.camera
# Must delete root before logManager and logListener
del self.logListener
del self.logManager
def setup_quiet_application(setup_function = setup, application = None):
'''Return a minimal, application with logging disabled.
>>> application = setup_quiet_application(setup_unittest)
Alternatively try to make an application be quiet.
>>> application = application_class()
>>> application = setup_quiet_application(setup_unittest, application)
'''
if not application:
application = application_class()
if hasattr(application, 'logManager') \
and hasattr(application, 'logListener'):
if quiet_logListener_class != type(application.logListener):
application.logManager, application.logListener = quiet_log()
else:
global logListener, logManager
logManager, logListener = quiet_log()
application.root, application.sceneManager, application.renderWindow, application.camera = setup_function()
return application
def setup_unittest_application(application = None):
'''Convenience function to avoid accessing namespace.
>>> application = setup_unittest_application()
>>> del application
Try to setup an existing application quietly.
>>> application = application_class()
>>> application = setup_unittest_application(application)
'''
return setup_quiet_application(setup_unittest, application)
def setup_unittest_sample_framework_application(application):
'''Setup a unit test for SampleFramework, by assigning camera and renderWindow.
>>> import ogre.renderer | random_line_split | |
ogre_unit.py | ():
"""Return the absolute path to a valid plugins.cfg file.
Copied from sf_OIS.py"""
import sys
import os
import os.path
paths = [os.path.join(os.getcwd(), 'plugins.cfg'),
'/etc/OGRE/plugins.cfg',
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'plugins.cfg')]
for path in paths:
if os.path.exists(path):
return path
sys.stderr.write("\n"
"** Warning: Unable to locate a suitable plugins.cfg file.\n"
"** Warning: Please check your ogre installation and copy a\n"
"** Warning: working plugins.cfg file to the current directory.\n\n")
raise ogre.Exception(0, "can't locate the 'plugins.cfg' file", "")
def setup_resources(resources_path = 'resources.cfg'):
'''Load resources, such as from 'resources.cfg'.'''
config = ogre.ConfigFile()
config.load(resources_path)
section_iter = config.getSectionIterator()
while section_iter.hasMoreElements():
section_name = section_iter.peekNextKey()
settings = section_iter.getNext()
for item in settings:
ogre.ResourceGroupManager.getSingleton().addResourceLocation(item.value, item.key, section_name)
def setup_root(plugins_path = getPluginPath(),
resources_path = 'resources.cfg'):
'''Return new root, sceneManager.'''
root = ogre.Root(plugins_path)
root.setFrameSmoothingPeriod(5.0)
setup_resources(resources_path)
sceneManager = root.createSceneManager(ogre.ST_GENERIC,"ExampleSMInstance")
return root, sceneManager
def initialise_null_render(plugins_path = getPluginPath()):
'Prepare to null renderer and return ogre root.'
ogre_root = ogre.Root(plugins_path)
rend_list = ogre_root.getAvailableRenderers()
ogre_root.setRenderSystem(rend_list[-1])
ogre_root.getRenderSystem()._initRenderTargets()
ogre_root.initialise(False)
return ogre_root
def setup_null_root(plugins_path = getPluginPath(),
resources_path = 'resources.cfg'):
'''Return root, sceneManager. Suitable for unit test without entity, camera, light.
>>> logManager, logListener = quiet_log()
>>> root, sceneManager = setup_null_root()
>>> assert root
>>> assert sceneManager
>>> for i in range(10):
... if not renderOneFrame(root): print False
>>> del sceneManager, root
>>> del logManager, logListener
'''
root = initialise_null_render(plugins_path)
setup_resources(resources_path)
sceneManager = root.createSceneManager(ogre.ST_GENERIC,"ExampleSMInstance")
return root, sceneManager
def setup_viewport(root, sceneManager):
'''Create render window and viewport from user selection, and return renderWindow and camera.'''
renderWindow = configure(root)
if not renderWindow:
return None
camera = sceneManager.createCamera('Camera')
viewport = renderWindow.addViewport(camera)
return renderWindow, camera
def configure(ogre_root):
"""This shows the config dialog and returns the renderWindow."""
user_confirmation = ogre_root.showConfigDialog()
if user_confirmation:
return ogre_root.initialise(True, "OGRE Render Window")
else:
return None
def setup_unittest():
'''With tiny render window and resources. Return root and sceneManager.
>>> logManager, logListener = quiet_log()
>>> root, sceneManager, renderWindow, camera = setup_unittest()
>>> assert root
>>> assert sceneManager
>>> assert renderWindow
>>> assert camera
>>> del sceneManager, root, renderWindow, camera
>>> del logManager, logListener
'''
root, sceneManager = setup_null_root()
renderWindow = root.createRenderWindow('test', 4, 3, False)
camera = sceneManager.createCamera('Camera')
ogre.ResourceGroupManager.getSingleton().initialiseAllResourceGroups()
return root, sceneManager, renderWindow, camera
def setup():
'''Set up minimal Ogre application and return root and sceneManager.
>>> logManager, logListener = quiet_log()
# TODO: Doctest crashes, although external call to setup works!
#>>> root, sceneManager, renderWindow, camera = setup()
#>>> assert root
#>>> assert sceneManager
#>>> assert renderWindow
#>>> assert camera
#>>> application = setup_quiet_application(setup_unittest)
#>>> for i in range(10):
#... print i,
#... if not renderOneFrame(root): print False
#0 1 2 3 4 5 6 7 8 9
#>>> sceneManager.clearScene()
#>>> del renderWindow
#>>> del camera
#>>> del sceneManager
#>>> del root
>>> del logManager, logListener
'''
root, sceneManager = setup_root()
renderWindow, camera = setup_viewport(root, sceneManager)
ogre.TextureManager.getSingleton().setDefaultNumMipmaps(5)
ogre.ResourceGroupManager.getSingleton().initialiseAllResourceGroups()
return root, sceneManager, renderWindow, camera
def run(root, sceneManager, renderWindow, camera):
'''Construct and render.'''
root, sceneManager, renderWindow, camera = setup()
if root and sceneManager:
root.startRendering()
def renderOneFrame(ogre_root):
'Render a frame. Return False if closed. Useful for unit test.'
ogre.WindowEventUtilities().messagePump()
return ogre_root.renderOneFrame()
# For applications that do not have logListener attribute.
logListener = None
logManager = None
class quiet_logListener_class(ogre.LogListener):
def messageLogged(self, message, level, debug, logName):
'''Called by Ogre instead of logging.'''
pass
#print message
def quiet_log():
'''Replace log with quiet version. Useful for unit test.
Return logManager and logListener, which must destructed AFTER root.
>>> logManager, logListener = quiet_log()
>>> root, sceneManager, renderWindow, camera = setup_unittest()
Gotcha: If you encounter 'R6025 Pure virtual function call' error within a class, then write destructor to destroy root before logManager and logListener. http://www.indiegamer.com/archives/t-3533.html
>>> del root
>>> del sceneManager, renderWindow, camera
>>> del logManager, logListener
Derived from examples:
http://www.ogre3d.org/phpBB2addons/viewtopic.php?p=10887&sid=ce193664e1d3d7c4af509e6f4e2718c6
http://wiki.python-ogre.org/index.php/ChangeLog
'''
logManager = ogre.LogManager()
log = ogre.LogManager.getSingletonPtr().createLog(
'quiet.log', True, False, True)
logListener = quiet_logListener_class()
log.addListener(logListener)
return logManager, logListener
class application_class(object):
'''Minimal Ogre application, which needs reference to root.
>>> application = setup_quiet_application(setup_unittest)
>>> for i in range(10):
... print i,
... if not renderOneFrame(application.root): print False
0 1 2 3 4 5 6 7 8 9
>>> assert application
'''
def __init__(self):
self.root = None
self.sceneManager = None
self.renderWindow = None
self.camera = None
# For quiet_log
self.logManager = None
self.logListener = None
def __del__(self):
del self.sceneManager
del self.root
del self.renderWindow
del self.camera
# Must delete root before logManager and logListener
del self.logListener
del self.logManager
def setup_quiet_application(setup_function = setup, application = None):
'''Return a minimal, application with logging disabled.
>>> application = setup_quiet_application(setup_unittest)
Alternatively try to make an application be quiet.
>>> application = application_class()
>>> application = setup_quiet_application(setup_unittest, application)
'''
if not application:
application = application_class()
if hasattr(application, 'logManager') \
and hasattr(application, 'logListener'):
if quiet_logListener_class != type(application.logListener):
application.logManager, application.logListener = quiet_log()
else:
global logListener, logManager
logManager, logListener = quiet_log()
application.root, application.sceneManager, application.renderWindow, application.camera = setup_function()
return application
def setup_unittest_application(application = None):
'''Convenience function to avoid accessing namespace.
>>> application = setup_unittest_application()
>>> del application
Try to setup an existing application quietly.
>>> application = application_class()
>>> application = setup_unittest_application(application)
'''
return setup_quiet_application(setup_unittest, application)
def setup_unittest_sample_framework_application(application):
| getPluginPath | identifier_name | |
ogre_unit.py | IS.py"""
import sys
import os
import os.path
paths = [os.path.join(os.getcwd(), 'plugins.cfg'),
'/etc/OGRE/plugins.cfg',
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'plugins.cfg')]
for path in paths:
if os.path.exists(path):
return path
sys.stderr.write("\n"
"** Warning: Unable to locate a suitable plugins.cfg file.\n"
"** Warning: Please check your ogre installation and copy a\n"
"** Warning: working plugins.cfg file to the current directory.\n\n")
raise ogre.Exception(0, "can't locate the 'plugins.cfg' file", "")
def setup_resources(resources_path = 'resources.cfg'):
|
def setup_root(plugins_path = getPluginPath(),
resources_path = 'resources.cfg'):
'''Return new root, sceneManager.'''
root = ogre.Root(plugins_path)
root.setFrameSmoothingPeriod(5.0)
setup_resources(resources_path)
sceneManager = root.createSceneManager(ogre.ST_GENERIC,"ExampleSMInstance")
return root, sceneManager
def initialise_null_render(plugins_path = getPluginPath()):
'Prepare to null renderer and return ogre root.'
ogre_root = ogre.Root(plugins_path)
rend_list = ogre_root.getAvailableRenderers()
ogre_root.setRenderSystem(rend_list[-1])
ogre_root.getRenderSystem()._initRenderTargets()
ogre_root.initialise(False)
return ogre_root
def setup_null_root(plugins_path = getPluginPath(),
resources_path = 'resources.cfg'):
'''Return root, sceneManager. Suitable for unit test without entity, camera, light.
>>> logManager, logListener = quiet_log()
>>> root, sceneManager = setup_null_root()
>>> assert root
>>> assert sceneManager
>>> for i in range(10):
... if not renderOneFrame(root): print False
>>> del sceneManager, root
>>> del logManager, logListener
'''
root = initialise_null_render(plugins_path)
setup_resources(resources_path)
sceneManager = root.createSceneManager(ogre.ST_GENERIC,"ExampleSMInstance")
return root, sceneManager
def setup_viewport(root, sceneManager):
'''Create render window and viewport from user selection, and return renderWindow and camera.'''
renderWindow = configure(root)
if not renderWindow:
return None
camera = sceneManager.createCamera('Camera')
viewport = renderWindow.addViewport(camera)
return renderWindow, camera
def configure(ogre_root):
"""This shows the config dialog and returns the renderWindow."""
user_confirmation = ogre_root.showConfigDialog()
if user_confirmation:
return ogre_root.initialise(True, "OGRE Render Window")
else:
return None
def setup_unittest():
'''With tiny render window and resources. Return root and sceneManager.
>>> logManager, logListener = quiet_log()
>>> root, sceneManager, renderWindow, camera = setup_unittest()
>>> assert root
>>> assert sceneManager
>>> assert renderWindow
>>> assert camera
>>> del sceneManager, root, renderWindow, camera
>>> del logManager, logListener
'''
root, sceneManager = setup_null_root()
renderWindow = root.createRenderWindow('test', 4, 3, False)
camera = sceneManager.createCamera('Camera')
ogre.ResourceGroupManager.getSingleton().initialiseAllResourceGroups()
return root, sceneManager, renderWindow, camera
def setup():
'''Set up minimal Ogre application and return root and sceneManager.
>>> logManager, logListener = quiet_log()
# TODO: Doctest crashes, although external call to setup works!
#>>> root, sceneManager, renderWindow, camera = setup()
#>>> assert root
#>>> assert sceneManager
#>>> assert renderWindow
#>>> assert camera
#>>> application = setup_quiet_application(setup_unittest)
#>>> for i in range(10):
#... print i,
#... if not renderOneFrame(root): print False
#0 1 2 3 4 5 6 7 8 9
#>>> sceneManager.clearScene()
#>>> del renderWindow
#>>> del camera
#>>> del sceneManager
#>>> del root
>>> del logManager, logListener
'''
root, sceneManager = setup_root()
renderWindow, camera = setup_viewport(root, sceneManager)
ogre.TextureManager.getSingleton().setDefaultNumMipmaps(5)
ogre.ResourceGroupManager.getSingleton().initialiseAllResourceGroups()
return root, sceneManager, renderWindow, camera
def run(root, sceneManager, renderWindow, camera):
'''Construct and render.'''
root, sceneManager, renderWindow, camera = setup()
if root and sceneManager:
root.startRendering()
def renderOneFrame(ogre_root):
'Render a frame. Return False if closed. Useful for unit test.'
ogre.WindowEventUtilities().messagePump()
return ogre_root.renderOneFrame()
# For applications that do not have logListener attribute.
logListener = None
logManager = None
class quiet_logListener_class(ogre.LogListener):
def messageLogged(self, message, level, debug, logName):
'''Called by Ogre instead of logging.'''
pass
#print message
def quiet_log():
'''Replace log with quiet version. Useful for unit test.
Return logManager and logListener, which must destructed AFTER root.
>>> logManager, logListener = quiet_log()
>>> root, sceneManager, renderWindow, camera = setup_unittest()
Gotcha: If you encounter 'R6025 Pure virtual function call' error within a class, then write destructor to destroy root before logManager and logListener. http://www.indiegamer.com/archives/t-3533.html
>>> del root
>>> del sceneManager, renderWindow, camera
>>> del logManager, logListener
Derived from examples:
http://www.ogre3d.org/phpBB2addons/viewtopic.php?p=10887&sid=ce193664e1d3d7c4af509e6f4e2718c6
http://wiki.python-ogre.org/index.php/ChangeLog
'''
logManager = ogre.LogManager()
log = ogre.LogManager.getSingletonPtr().createLog(
'quiet.log', True, False, True)
logListener = quiet_logListener_class()
log.addListener(logListener)
return logManager, logListener
class application_class(object):
'''Minimal Ogre application, which needs reference to root.
>>> application = setup_quiet_application(setup_unittest)
>>> for i in range(10):
... print i,
... if not renderOneFrame(application.root): print False
0 1 2 3 4 5 6 7 8 9
>>> assert application
'''
def __init__(self):
self.root = None
self.sceneManager = None
self.renderWindow = None
self.camera = None
# For quiet_log
self.logManager = None
self.logListener = None
def __del__(self):
del self.sceneManager
del self.root
del self.renderWindow
del self.camera
# Must delete root before logManager and logListener
del self.logListener
del self.logManager
def setup_quiet_application(setup_function = setup, application = None):
'''Return a minimal, application with logging disabled.
>>> application = setup_quiet_application(setup_unittest)
Alternatively try to make an application be quiet.
>>> application = application_class()
>>> application = setup_quiet_application(setup_unittest, application)
'''
if not application:
application = application_class()
if hasattr(application, 'logManager') \
and hasattr(application, 'logListener'):
if quiet_logListener_class != type(application.logListener):
application.logManager, application.logListener = quiet_log()
else:
global logListener, logManager
logManager, logListener = quiet_log()
application.root, application.sceneManager, application.renderWindow, application.camera = setup_function()
return application
def setup_unittest_application(application = None):
'''Convenience function to avoid accessing namespace.
>>> application = setup_unittest_application()
>>> del application
Try to setup an existing application quietly.
>>> application = application_class()
>>> application = setup_unittest_application(application)
'''
return setup_quiet_application(setup_unittest, application)
def setup_unittest_sample_framework_application(application):
'''Setup a unit test for SampleFramework, by assigning camera and renderWindow.
>>> import ogre.renderer | '''Load resources, such as from 'resources.cfg'.'''
config = ogre.ConfigFile()
config.load(resources_path)
section_iter = config.getSectionIterator()
while section_iter.hasMoreElements():
section_name = section_iter.peekNextKey()
settings = section_iter.getNext()
for item in settings:
ogre.ResourceGroupManager.getSingleton().addResourceLocation(item.value, item.key, section_name) | identifier_body |
ogre_unit.py | IS.py"""
import sys
import os
import os.path
paths = [os.path.join(os.getcwd(), 'plugins.cfg'),
'/etc/OGRE/plugins.cfg',
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'plugins.cfg')]
for path in paths:
|
sys.stderr.write("\n"
"** Warning: Unable to locate a suitable plugins.cfg file.\n"
"** Warning: Please check your ogre installation and copy a\n"
"** Warning: working plugins.cfg file to the current directory.\n\n")
raise ogre.Exception(0, "can't locate the 'plugins.cfg' file", "")
def setup_resources(resources_path = 'resources.cfg'):
'''Load resources, such as from 'resources.cfg'.'''
config = ogre.ConfigFile()
config.load(resources_path)
section_iter = config.getSectionIterator()
while section_iter.hasMoreElements():
section_name = section_iter.peekNextKey()
settings = section_iter.getNext()
for item in settings:
ogre.ResourceGroupManager.getSingleton().addResourceLocation(item.value, item.key, section_name)
def setup_root(plugins_path = getPluginPath(),
resources_path = 'resources.cfg'):
'''Return new root, sceneManager.'''
root = ogre.Root(plugins_path)
root.setFrameSmoothingPeriod(5.0)
setup_resources(resources_path)
sceneManager = root.createSceneManager(ogre.ST_GENERIC,"ExampleSMInstance")
return root, sceneManager
def initialise_null_render(plugins_path = getPluginPath()):
'Prepare to null renderer and return ogre root.'
ogre_root = ogre.Root(plugins_path)
rend_list = ogre_root.getAvailableRenderers()
ogre_root.setRenderSystem(rend_list[-1])
ogre_root.getRenderSystem()._initRenderTargets()
ogre_root.initialise(False)
return ogre_root
def setup_null_root(plugins_path = getPluginPath(),
resources_path = 'resources.cfg'):
'''Return root, sceneManager. Suitable for unit test without entity, camera, light.
>>> logManager, logListener = quiet_log()
>>> root, sceneManager = setup_null_root()
>>> assert root
>>> assert sceneManager
>>> for i in range(10):
... if not renderOneFrame(root): print False
>>> del sceneManager, root
>>> del logManager, logListener
'''
root = initialise_null_render(plugins_path)
setup_resources(resources_path)
sceneManager = root.createSceneManager(ogre.ST_GENERIC,"ExampleSMInstance")
return root, sceneManager
def setup_viewport(root, sceneManager):
'''Create render window and viewport from user selection, and return renderWindow and camera.'''
renderWindow = configure(root)
if not renderWindow:
return None
camera = sceneManager.createCamera('Camera')
viewport = renderWindow.addViewport(camera)
return renderWindow, camera
def configure(ogre_root):
"""This shows the config dialog and returns the renderWindow."""
user_confirmation = ogre_root.showConfigDialog()
if user_confirmation:
return ogre_root.initialise(True, "OGRE Render Window")
else:
return None
def setup_unittest():
'''With tiny render window and resources. Return root and sceneManager.
>>> logManager, logListener = quiet_log()
>>> root, sceneManager, renderWindow, camera = setup_unittest()
>>> assert root
>>> assert sceneManager
>>> assert renderWindow
>>> assert camera
>>> del sceneManager, root, renderWindow, camera
>>> del logManager, logListener
'''
root, sceneManager = setup_null_root()
renderWindow = root.createRenderWindow('test', 4, 3, False)
camera = sceneManager.createCamera('Camera')
ogre.ResourceGroupManager.getSingleton().initialiseAllResourceGroups()
return root, sceneManager, renderWindow, camera
def setup():
'''Set up minimal Ogre application and return root and sceneManager.
>>> logManager, logListener = quiet_log()
# TODO: Doctest crashes, although external call to setup works!
#>>> root, sceneManager, renderWindow, camera = setup()
#>>> assert root
#>>> assert sceneManager
#>>> assert renderWindow
#>>> assert camera
#>>> application = setup_quiet_application(setup_unittest)
#>>> for i in range(10):
#... print i,
#... if not renderOneFrame(root): print False
#0 1 2 3 4 5 6 7 8 9
#>>> sceneManager.clearScene()
#>>> del renderWindow
#>>> del camera
#>>> del sceneManager
#>>> del root
>>> del logManager, logListener
'''
root, sceneManager = setup_root()
renderWindow, camera = setup_viewport(root, sceneManager)
ogre.TextureManager.getSingleton().setDefaultNumMipmaps(5)
ogre.ResourceGroupManager.getSingleton().initialiseAllResourceGroups()
return root, sceneManager, renderWindow, camera
def run(root, sceneManager, renderWindow, camera):
'''Construct and render.'''
root, sceneManager, renderWindow, camera = setup()
if root and sceneManager:
root.startRendering()
def renderOneFrame(ogre_root):
'Render a frame. Return False if closed. Useful for unit test.'
ogre.WindowEventUtilities().messagePump()
return ogre_root.renderOneFrame()
# For applications that do not have logListener attribute.
logListener = None
logManager = None
class quiet_logListener_class(ogre.LogListener):
def messageLogged(self, message, level, debug, logName):
'''Called by Ogre instead of logging.'''
pass
#print message
def quiet_log():
'''Replace log with quiet version. Useful for unit test.
Return logManager and logListener, which must destructed AFTER root.
>>> logManager, logListener = quiet_log()
>>> root, sceneManager, renderWindow, camera = setup_unittest()
Gotcha: If you encounter 'R6025 Pure virtual function call' error within a class, then write destructor to destroy root before logManager and logListener. http://www.indiegamer.com/archives/t-3533.html
>>> del root
>>> del sceneManager, renderWindow, camera
>>> del logManager, logListener
Derived from examples:
http://www.ogre3d.org/phpBB2addons/viewtopic.php?p=10887&sid=ce193664e1d3d7c4af509e6f4e2718c6
http://wiki.python-ogre.org/index.php/ChangeLog
'''
logManager = ogre.LogManager()
log = ogre.LogManager.getSingletonPtr().createLog(
'quiet.log', True, False, True)
logListener = quiet_logListener_class()
log.addListener(logListener)
return logManager, logListener
class application_class(object):
'''Minimal Ogre application, which needs reference to root.
>>> application = setup_quiet_application(setup_unittest)
>>> for i in range(10):
... print i,
... if not renderOneFrame(application.root): print False
0 1 2 3 4 5 6 7 8 9
>>> assert application
'''
def __init__(self):
self.root = None
self.sceneManager = None
self.renderWindow = None
self.camera = None
# For quiet_log
self.logManager = None
self.logListener = None
def __del__(self):
del self.sceneManager
del self.root
del self.renderWindow
del self.camera
# Must delete root before logManager and logListener
del self.logListener
del self.logManager
def setup_quiet_application(setup_function = setup, application = None):
'''Return a minimal, application with logging disabled.
>>> application = setup_quiet_application(setup_unittest)
Alternatively try to make an application be quiet.
>>> application = application_class()
>>> application = setup_quiet_application(setup_unittest, application)
'''
if not application:
application = application_class()
if hasattr(application, 'logManager') \
and hasattr(application, 'logListener'):
if quiet_logListener_class != type(application.logListener):
application.logManager, application.logListener = quiet_log()
else:
global logListener, logManager
logManager, logListener = quiet_log()
application.root, application.sceneManager, application.renderWindow, application.camera = setup_function()
return application
def setup_unittest_application(application = None):
'''Convenience function to avoid accessing namespace.
>>> application = setup_unittest_application()
>>> del application
Try to setup an existing application quietly.
>>> application = application_class()
>>> application = setup_unittest_application(application)
'''
return setup_quiet_application(setup_unittest, application)
def setup_unittest_sample_framework_application(application):
'''Setup a unit test for SampleFramework, by assigning camera and renderWindow.
>>> import ogre.renderer.O | if os.path.exists(path):
return path | conditional_block |
data_loader.py | # self.paths = path is not None and librosa.util.find_files(path)
with open(path) as f:
self.paths = f.readlines()
self.sample_rate = sample_rate
self.noise_levels = noise_levels
def inject_noise(self, data):
noise_info_dic = json.loads(np.random.choice(self.paths))
noise_path = noise_info_dic['audio_filepath']
noise_level = np.random.uniform(*self.noise_levels)
return self.inject_noise_sample(data, noise_path, noise_level)
def inject_noise_sample(self, data, noise_path, noise_level):
# noise_len = get_audio_length(noise_path)
noise_len = sox.file_info.duration(noise_path)
data_len = len(data) / self.sample_rate
noise_start = np.random.rand() * (noise_len - data_len)
noise_end = noise_start + data_len
noise_dst = audio_with_sox(noise_path, self.sample_rate, noise_start, noise_end)
if len(data) != len(noise_dst):
data += 0
else:
|
return data
class SpectrogramParser(AudioParser):
def __init__(self,
audio_conf,
speed_volume_perturb=False,
reverberation=False):
"""
Parses audio file into spectrogram with optional normalization and various augmentations
:param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds
:param normalize(default False): Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
"""
super(SpectrogramParser, self).__init__()
self.sample_rate = audio_conf['sample_rate']
self.speed_volume_perturb = speed_volume_perturb
self.reverberation = reverberation
self.noiseInjector = NoiseInjection(audio_conf['noise_dir'], self.sample_rate,
audio_conf['noise_levels']) if audio_conf.get(
'noise_dir') is not None else None
self.noise_prob = audio_conf.get('noise_prob')
self.reverb_prob = audio_conf.get('reverb_prob')
self.reverb = ReverbAugmentor(min_distance=3, max_distance=5)
def parse_audio(self, audio_path):
# os.system("cp {} {}/wav".format(audio_path, os.getcwd()))
if self.speed_volume_perturb:
y = load_randomly_augmented_audio(audio_path, self.sample_rate)
# sf.write('wav/aaaa_{}'.format(os.path.basename(audio_path)), y, self.sample_rate)
else:
y = load_audio(audio_path)
if self.reverberation:
add_reverb = np.random.binomial(1, self.reverb_prob)
if add_reverb:
y = self.reverb.add_reverb(y)
# sf.write('wav/bbbb_{}'.format(os.path.basename(audio_path)), y, self.sample_rate)
if self.noiseInjector:
add_noise = np.random.binomial(1, self.noise_prob)
if add_noise:
y = self.noiseInjector.inject_noise(y)
# sf.write('wav/cccc_{}'.format(os.path.basename(audio_path)), y, self.sample_rate)
y = torch.FloatTensor(y)
return y
def parse_transcript(self, transcript_path):
raise NotImplementedError
class SpectrogramDataset(Dataset, SpectrogramParser):
def __init__(self,
audio_conf,
manifest_filepath,
labels,
word_form,
speed_volume_perturb=False,
reverberation=False,
min_durations=0.0,
max_durations=60.0):
"""
Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by
a comma. Each new line is a different sample. Example below:
/path/to/audio.wav,/path/to/audio.txt
...
:param audio_conf: Dictionary containing the sample rate
:param manifest_filepath: Path to manifest csv as describe above
:param labels: list containing all the possible characters to map to
:param normalize: Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
"""
self.word_form_fict = {"sinogram": "text", "pinyin": "pinyin", "english": "fully_pinyin"}
with open(manifest_filepath) as f:
ids = f.readlines()
ids = [i for i in ids if min_durations < float(json.loads(i)['duration']) <= max_durations]
self.ids = sorted(ids, key=lambda x: float(json.loads(x)['duration']), reverse=True)
self.size = len(ids)
self.word_form = word_form
self.labels_map = dict([(labels[i], i) for i in range(len(labels))])
super(SpectrogramDataset, self).__init__(audio_conf, speed_volume_perturb, reverberation)
def __getitem__(self, index):
sample = json.loads(self.ids[index])
# print("sample: {}".format(sample['duration']))
audio_path, transcripts = sample['audio_filepath'], sample[self.word_form_fict[self.word_form]]
raw_data = self.parse_audio(audio_path)
transcript_id = self.parse_transcript(transcripts)
return raw_data, transcript_id
def parse_transcript(self, transcript):
if self.word_form == 'pinyin':
transcript_id = [self.label_numerical(x) for x in transcript.split(' ')]
elif self.word_form == 'sinogram' or self.word_form == 'english':
transcript_id = list(filter(None, [self.labels_map.get(x) for x in list(transcript)]))
else:
raise ValueError('wrong word form: {}'.format(self.word_form))
return transcript_id
def __len__(self):
return self.size
def label_numerical(self, x):
if self.labels_map.get(x) is not None:
return self.labels_map.get(x)
else:
return self.labels_map.get('.')
def _collate_fn(batch):
def func(p):
return p[0].size(0)
batch = sorted(batch, key=lambda sample: sample[0].size(0), reverse=True)
longest_sample = max(batch, key=func)[0]
minibatch_size = len(batch)
max_seqlength = longest_sample.size(0)
inputs = torch.zeros(minibatch_size, max_seqlength)
input_percentages = torch.FloatTensor(minibatch_size)
target_sizes = torch.IntTensor(minibatch_size)
targets = []
for x in range(minibatch_size):
sample = batch[x]
tensor = sample[0]
target = sample[1]
seq_length = tensor.size(0)
inputs[x].narrow(0, 0, seq_length).copy_(tensor)
input_percentages[x] = seq_length / float(max_seqlength)
target_sizes[x] = len(target)
targets.extend(target)
targets = torch.IntTensor(targets)
return inputs, targets, input_percentages, target_sizes
class AudioDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
"""
Creates a data loader for AudioDatasets.
"""
super(AudioDataLoader, self).__init__(*args, **kwargs)
self.collate_fn = _collate_fn
class DSRandomSampler(Sampler):
"""
Implementation of a Random Sampler for sampling the dataset.
Added to ensure we reset the start index when an epoch is finished.
This is essential since we support saving/loading state during an epoch.
"""
def __init__(self, dataset, batch_size=1, start_index=0):
super().__init__(data_source=dataset)
self.dataset = dataset
self.start_index = start_index
self.batch_size = batch_size
ids = list(range(len(self.dataset)))
self.bins = [ids[i:i + self.batch_size] for i in range(0, len(ids), self.batch_size)]
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = (
torch.randperm(len(self.bins) - self.start_index, generator=g)
.add(self.start_index)
.tolist()
)
for x in indices:
batch_ids = self.bins[x]
np.random.shuffle(batch_ids)
yield batch_ids
def __len__(self):
return len(self.bins) - self.start_index
def set_epoch(self, epoch):
self.epoch = epoch
def reset_training_step(self, training_step):
self.start_index = training_step
class DSDistributedSampler(DistributedSampler):
"""
Overrides the DistributedSampler to ensure we reset the start index when an epoch is finished.
This is essential since we support saving/loading state during an epoch.
"""
def __init__(self, dataset, num_replicas=None, rank=None, start_index=0, batch_size=1):
super().__init__(dataset=dataset, num_replicas=num_replicas, rank=rank)
self.start_index = start_index
| noise_energy = np.sqrt(noise_dst.dot(noise_dst) / noise_dst.size)
data_energy = np.sqrt(data.dot(data) / data.size)
data += noise_level * noise_dst * data_energy / noise_energy | conditional_block |
data_loader.py | 60.0):
"""
Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by
a comma. Each new line is a different sample. Example below:
/path/to/audio.wav,/path/to/audio.txt
...
:param audio_conf: Dictionary containing the sample rate
:param manifest_filepath: Path to manifest csv as describe above
:param labels: list containing all the possible characters to map to
:param normalize: Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
"""
self.word_form_fict = {"sinogram": "text", "pinyin": "pinyin", "english": "fully_pinyin"}
with open(manifest_filepath) as f:
ids = f.readlines()
ids = [i for i in ids if min_durations < float(json.loads(i)['duration']) <= max_durations]
self.ids = sorted(ids, key=lambda x: float(json.loads(x)['duration']), reverse=True)
self.size = len(ids)
self.word_form = word_form
self.labels_map = dict([(labels[i], i) for i in range(len(labels))])
super(SpectrogramDataset, self).__init__(audio_conf, speed_volume_perturb, reverberation)
def __getitem__(self, index):
sample = json.loads(self.ids[index])
# print("sample: {}".format(sample['duration']))
audio_path, transcripts = sample['audio_filepath'], sample[self.word_form_fict[self.word_form]]
raw_data = self.parse_audio(audio_path)
transcript_id = self.parse_transcript(transcripts)
return raw_data, transcript_id
def parse_transcript(self, transcript):
if self.word_form == 'pinyin':
transcript_id = [self.label_numerical(x) for x in transcript.split(' ')]
elif self.word_form == 'sinogram' or self.word_form == 'english':
transcript_id = list(filter(None, [self.labels_map.get(x) for x in list(transcript)]))
else:
raise ValueError('wrong word form: {}'.format(self.word_form))
return transcript_id
def __len__(self):
return self.size
def label_numerical(self, x):
if self.labels_map.get(x) is not None:
return self.labels_map.get(x)
else:
return self.labels_map.get('.')
def _collate_fn(batch):
def func(p):
return p[0].size(0)
batch = sorted(batch, key=lambda sample: sample[0].size(0), reverse=True)
longest_sample = max(batch, key=func)[0]
minibatch_size = len(batch)
max_seqlength = longest_sample.size(0)
inputs = torch.zeros(minibatch_size, max_seqlength)
input_percentages = torch.FloatTensor(minibatch_size)
target_sizes = torch.IntTensor(minibatch_size)
targets = []
for x in range(minibatch_size):
sample = batch[x]
tensor = sample[0]
target = sample[1]
seq_length = tensor.size(0)
inputs[x].narrow(0, 0, seq_length).copy_(tensor)
input_percentages[x] = seq_length / float(max_seqlength)
target_sizes[x] = len(target)
targets.extend(target)
targets = torch.IntTensor(targets)
return inputs, targets, input_percentages, target_sizes
class AudioDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
"""
Creates a data loader for AudioDatasets.
"""
super(AudioDataLoader, self).__init__(*args, **kwargs)
self.collate_fn = _collate_fn
class DSRandomSampler(Sampler):
"""
Implementation of a Random Sampler for sampling the dataset.
Added to ensure we reset the start index when an epoch is finished.
This is essential since we support saving/loading state during an epoch.
"""
def __init__(self, dataset, batch_size=1, start_index=0):
super().__init__(data_source=dataset)
self.dataset = dataset
self.start_index = start_index
self.batch_size = batch_size
ids = list(range(len(self.dataset)))
self.bins = [ids[i:i + self.batch_size] for i in range(0, len(ids), self.batch_size)]
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = (
torch.randperm(len(self.bins) - self.start_index, generator=g)
.add(self.start_index)
.tolist()
)
for x in indices:
batch_ids = self.bins[x]
np.random.shuffle(batch_ids)
yield batch_ids
def __len__(self):
return len(self.bins) - self.start_index
def set_epoch(self, epoch):
self.epoch = epoch
def reset_training_step(self, training_step):
self.start_index = training_step
class DSDistributedSampler(DistributedSampler):
"""
Overrides the DistributedSampler to ensure we reset the start index when an epoch is finished.
This is essential since we support saving/loading state during an epoch.
"""
def __init__(self, dataset, num_replicas=None, rank=None, start_index=0, batch_size=1):
super().__init__(dataset=dataset, num_replicas=num_replicas, rank=rank)
self.start_index = start_index
self.batch_size = batch_size
ids = list(range(len(dataset)))
self.bins = [ids[i:i + self.batch_size] for i in range(0, len(ids), self.batch_size)]
self.num_samples = int(
math.ceil(float(len(self.bins) - self.start_index) / self.num_replicas)
)
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = (
torch.randperm(len(self.bins) - self.start_index, generator=g)
.add(self.start_index)
.tolist()
)
# print("self.bins : {}".format(self.bins))
indices = sorted(indices, reverse=False)
# print("indices : {}".format(indices))
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank: self.total_size: self.num_replicas]
assert len(indices) == self.num_samples
for x in indices:
batch_ids = self.bins[x]
np.random.shuffle(batch_ids)
yield batch_ids
def __len__(self):
return self.num_samples
def reset_training_step(self, training_step):
self.start_index = training_step
self.num_samples = int(
math.ceil(float(len(self.bins) - self.start_index) / self.num_replicas)
)
self.total_size = self.num_samples * self.num_replicas
def audio_with_sox(path, sample_rate, start_time, end_time):
"""
crop and resample the recording with sox and loads it.
"""
try:
with NamedTemporaryFile(suffix=".wav") as tar_file:
tar_filename = tar_file.name
sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} trim {} ={} >/dev/null 2>&1".format(path, sample_rate,
tar_filename,
start_time,
end_time)
os.system(sox_params)
y = load_audio(tar_filename)
except Exception as E:
y = load_audio(path)
return y
def augment_audio_with_sox(path, sample_rate, tempo, gain):
"""
Changes tempo and gain of the recording with sox and loads it.
"""
try:
with NamedTemporaryFile(suffix=".wav") as augmented_file:
augmented_filename = augmented_file.name
sox_augment_params = ["tempo", "{:.3f}".format(tempo), "gain", "{:.3f}".format(gain)]
sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} {} >/dev/null 2>&1".format(path, sample_rate,
augmented_filename,
" ".join(sox_augment_params))
os.system(sox_params)
y = load_audio(augmented_filename)
except Exception as E:
y = load_audio(path)
return y
def load_randomly_augmented_audio(path, sample_rate=16000, tempo_range=(0.85, 1.15),
gain_range=(-6, 8)):
| """
Picks tempo and gain uniformly, applies it to the utterance by using sox utility.
Returns the augmented utterance.
"""
low_tempo, high_tempo = tempo_range
tempo_value = np.random.uniform(low=low_tempo, high=high_tempo)
low_gain, high_gain = gain_range
gain_value = np.random.uniform(low=low_gain, high=high_gain)
audio = augment_audio_with_sox(path=path, sample_rate=sample_rate,
tempo=tempo_value, gain=gain_value)
return audio | identifier_body | |
data_loader.py | # self.paths = path is not None and librosa.util.find_files(path)
with open(path) as f:
self.paths = f.readlines()
self.sample_rate = sample_rate
self.noise_levels = noise_levels
def inject_noise(self, data):
noise_info_dic = json.loads(np.random.choice(self.paths))
noise_path = noise_info_dic['audio_filepath']
noise_level = np.random.uniform(*self.noise_levels)
return self.inject_noise_sample(data, noise_path, noise_level)
def inject_noise_sample(self, data, noise_path, noise_level):
# noise_len = get_audio_length(noise_path)
noise_len = sox.file_info.duration(noise_path)
data_len = len(data) / self.sample_rate
noise_start = np.random.rand() * (noise_len - data_len)
noise_end = noise_start + data_len
noise_dst = audio_with_sox(noise_path, self.sample_rate, noise_start, noise_end)
if len(data) != len(noise_dst):
data += 0
else:
noise_energy = np.sqrt(noise_dst.dot(noise_dst) / noise_dst.size)
data_energy = np.sqrt(data.dot(data) / data.size)
data += noise_level * noise_dst * data_energy / noise_energy
return data
class SpectrogramParser(AudioParser):
def __init__(self,
audio_conf,
speed_volume_perturb=False,
reverberation=False):
"""
Parses audio file into spectrogram with optional normalization and various augmentations
:param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds
:param normalize(default False): Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
"""
super(SpectrogramParser, self).__init__()
self.sample_rate = audio_conf['sample_rate']
self.speed_volume_perturb = speed_volume_perturb
self.reverberation = reverberation
self.noiseInjector = NoiseInjection(audio_conf['noise_dir'], self.sample_rate,
audio_conf['noise_levels']) if audio_conf.get(
'noise_dir') is not None else None
self.noise_prob = audio_conf.get('noise_prob')
self.reverb_prob = audio_conf.get('reverb_prob')
self.reverb = ReverbAugmentor(min_distance=3, max_distance=5)
def parse_audio(self, audio_path):
# os.system("cp {} {}/wav".format(audio_path, os.getcwd()))
if self.speed_volume_perturb:
y = load_randomly_augmented_audio(audio_path, self.sample_rate)
# sf.write('wav/aaaa_{}'.format(os.path.basename(audio_path)), y, self.sample_rate)
else:
y = load_audio(audio_path)
if self.reverberation:
add_reverb = np.random.binomial(1, self.reverb_prob)
if add_reverb:
y = self.reverb.add_reverb(y)
# sf.write('wav/bbbb_{}'.format(os.path.basename(audio_path)), y, self.sample_rate)
if self.noiseInjector:
add_noise = np.random.binomial(1, self.noise_prob)
if add_noise:
y = self.noiseInjector.inject_noise(y)
# sf.write('wav/cccc_{}'.format(os.path.basename(audio_path)), y, self.sample_rate)
y = torch.FloatTensor(y)
return y
def | (self, transcript_path):
raise NotImplementedError
class SpectrogramDataset(Dataset, SpectrogramParser):
def __init__(self,
audio_conf,
manifest_filepath,
labels,
word_form,
speed_volume_perturb=False,
reverberation=False,
min_durations=0.0,
max_durations=60.0):
"""
Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by
a comma. Each new line is a different sample. Example below:
/path/to/audio.wav,/path/to/audio.txt
...
:param audio_conf: Dictionary containing the sample rate
:param manifest_filepath: Path to manifest csv as describe above
:param labels: list containing all the possible characters to map to
:param normalize: Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
"""
self.word_form_fict = {"sinogram": "text", "pinyin": "pinyin", "english": "fully_pinyin"}
with open(manifest_filepath) as f:
ids = f.readlines()
ids = [i for i in ids if min_durations < float(json.loads(i)['duration']) <= max_durations]
self.ids = sorted(ids, key=lambda x: float(json.loads(x)['duration']), reverse=True)
self.size = len(ids)
self.word_form = word_form
self.labels_map = dict([(labels[i], i) for i in range(len(labels))])
super(SpectrogramDataset, self).__init__(audio_conf, speed_volume_perturb, reverberation)
def __getitem__(self, index):
sample = json.loads(self.ids[index])
# print("sample: {}".format(sample['duration']))
audio_path, transcripts = sample['audio_filepath'], sample[self.word_form_fict[self.word_form]]
raw_data = self.parse_audio(audio_path)
transcript_id = self.parse_transcript(transcripts)
return raw_data, transcript_id
def parse_transcript(self, transcript):
if self.word_form == 'pinyin':
transcript_id = [self.label_numerical(x) for x in transcript.split(' ')]
elif self.word_form == 'sinogram' or self.word_form == 'english':
transcript_id = list(filter(None, [self.labels_map.get(x) for x in list(transcript)]))
else:
raise ValueError('wrong word form: {}'.format(self.word_form))
return transcript_id
def __len__(self):
return self.size
def label_numerical(self, x):
if self.labels_map.get(x) is not None:
return self.labels_map.get(x)
else:
return self.labels_map.get('.')
def _collate_fn(batch):
def func(p):
return p[0].size(0)
batch = sorted(batch, key=lambda sample: sample[0].size(0), reverse=True)
longest_sample = max(batch, key=func)[0]
minibatch_size = len(batch)
max_seqlength = longest_sample.size(0)
inputs = torch.zeros(minibatch_size, max_seqlength)
input_percentages = torch.FloatTensor(minibatch_size)
target_sizes = torch.IntTensor(minibatch_size)
targets = []
for x in range(minibatch_size):
sample = batch[x]
tensor = sample[0]
target = sample[1]
seq_length = tensor.size(0)
inputs[x].narrow(0, 0, seq_length).copy_(tensor)
input_percentages[x] = seq_length / float(max_seqlength)
target_sizes[x] = len(target)
targets.extend(target)
targets = torch.IntTensor(targets)
return inputs, targets, input_percentages, target_sizes
class AudioDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
"""
Creates a data loader for AudioDatasets.
"""
super(AudioDataLoader, self).__init__(*args, **kwargs)
self.collate_fn = _collate_fn
class DSRandomSampler(Sampler):
"""
Implementation of a Random Sampler for sampling the dataset.
Added to ensure we reset the start index when an epoch is finished.
This is essential since we support saving/loading state during an epoch.
"""
def __init__(self, dataset, batch_size=1, start_index=0):
super().__init__(data_source=dataset)
self.dataset = dataset
self.start_index = start_index
self.batch_size = batch_size
ids = list(range(len(self.dataset)))
self.bins = [ids[i:i + self.batch_size] for i in range(0, len(ids), self.batch_size)]
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = (
torch.randperm(len(self.bins) - self.start_index, generator=g)
.add(self.start_index)
.tolist()
)
for x in indices:
batch_ids = self.bins[x]
np.random.shuffle(batch_ids)
yield batch_ids
def __len__(self):
return len(self.bins) - self.start_index
def set_epoch(self, epoch):
self.epoch = epoch
def reset_training_step(self, training_step):
self.start_index = training_step
class DSDistributedSampler(DistributedSampler):
"""
Overrides the DistributedSampler to ensure we reset the start index when an epoch is finished.
This is essential since we support saving/loading state during an epoch.
"""
def __init__(self, dataset, num_replicas=None, rank=None, start_index=0, batch_size=1):
super().__init__(dataset=dataset, num_replicas=num_replicas, rank=rank)
self.start_index = start_index
| parse_transcript | identifier_name |
data_loader.py |
# self.paths = path is not None and librosa.util.find_files(path)
with open(path) as f:
self.paths = f.readlines()
self.sample_rate = sample_rate
self.noise_levels = noise_levels
def inject_noise(self, data):
noise_info_dic = json.loads(np.random.choice(self.paths))
noise_path = noise_info_dic['audio_filepath']
noise_level = np.random.uniform(*self.noise_levels)
return self.inject_noise_sample(data, noise_path, noise_level)
def inject_noise_sample(self, data, noise_path, noise_level):
# noise_len = get_audio_length(noise_path)
noise_len = sox.file_info.duration(noise_path)
data_len = len(data) / self.sample_rate
noise_start = np.random.rand() * (noise_len - data_len)
noise_end = noise_start + data_len
noise_dst = audio_with_sox(noise_path, self.sample_rate, noise_start, noise_end)
if len(data) != len(noise_dst):
data += 0
else:
noise_energy = np.sqrt(noise_dst.dot(noise_dst) / noise_dst.size)
data_energy = np.sqrt(data.dot(data) / data.size)
data += noise_level * noise_dst * data_energy / noise_energy
return data
class SpectrogramParser(AudioParser):
def __init__(self,
audio_conf,
speed_volume_perturb=False,
reverberation=False):
"""
Parses audio file into spectrogram with optional normalization and various augmentations
:param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds
:param normalize(default False): Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
"""
super(SpectrogramParser, self).__init__()
self.sample_rate = audio_conf['sample_rate']
self.speed_volume_perturb = speed_volume_perturb
self.reverberation = reverberation
self.noiseInjector = NoiseInjection(audio_conf['noise_dir'], self.sample_rate,
audio_conf['noise_levels']) if audio_conf.get(
'noise_dir') is not None else None
self.noise_prob = audio_conf.get('noise_prob')
self.reverb_prob = audio_conf.get('reverb_prob')
self.reverb = ReverbAugmentor(min_distance=3, max_distance=5)
def parse_audio(self, audio_path):
# os.system("cp {} {}/wav".format(audio_path, os.getcwd()))
if self.speed_volume_perturb: | add_reverb = np.random.binomial(1, self.reverb_prob)
if add_reverb:
y = self.reverb.add_reverb(y)
# sf.write('wav/bbbb_{}'.format(os.path.basename(audio_path)), y, self.sample_rate)
if self.noiseInjector:
add_noise = np.random.binomial(1, self.noise_prob)
if add_noise:
y = self.noiseInjector.inject_noise(y)
# sf.write('wav/cccc_{}'.format(os.path.basename(audio_path)), y, self.sample_rate)
y = torch.FloatTensor(y)
return y
def parse_transcript(self, transcript_path):
raise NotImplementedError
class SpectrogramDataset(Dataset, SpectrogramParser):
def __init__(self,
audio_conf,
manifest_filepath,
labels,
word_form,
speed_volume_perturb=False,
reverberation=False,
min_durations=0.0,
max_durations=60.0):
"""
Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by
a comma. Each new line is a different sample. Example below:
/path/to/audio.wav,/path/to/audio.txt
...
:param audio_conf: Dictionary containing the sample rate
:param manifest_filepath: Path to manifest csv as describe above
:param labels: list containing all the possible characters to map to
:param normalize: Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
"""
self.word_form_fict = {"sinogram": "text", "pinyin": "pinyin", "english": "fully_pinyin"}
with open(manifest_filepath) as f:
ids = f.readlines()
ids = [i for i in ids if min_durations < float(json.loads(i)['duration']) <= max_durations]
self.ids = sorted(ids, key=lambda x: float(json.loads(x)['duration']), reverse=True)
self.size = len(ids)
self.word_form = word_form
self.labels_map = dict([(labels[i], i) for i in range(len(labels))])
super(SpectrogramDataset, self).__init__(audio_conf, speed_volume_perturb, reverberation)
def __getitem__(self, index):
sample = json.loads(self.ids[index])
# print("sample: {}".format(sample['duration']))
audio_path, transcripts = sample['audio_filepath'], sample[self.word_form_fict[self.word_form]]
raw_data = self.parse_audio(audio_path)
transcript_id = self.parse_transcript(transcripts)
return raw_data, transcript_id
def parse_transcript(self, transcript):
if self.word_form == 'pinyin':
transcript_id = [self.label_numerical(x) for x in transcript.split(' ')]
elif self.word_form == 'sinogram' or self.word_form == 'english':
transcript_id = list(filter(None, [self.labels_map.get(x) for x in list(transcript)]))
else:
raise ValueError('wrong word form: {}'.format(self.word_form))
return transcript_id
def __len__(self):
return self.size
def label_numerical(self, x):
if self.labels_map.get(x) is not None:
return self.labels_map.get(x)
else:
return self.labels_map.get('.')
def _collate_fn(batch):
def func(p):
return p[0].size(0)
batch = sorted(batch, key=lambda sample: sample[0].size(0), reverse=True)
longest_sample = max(batch, key=func)[0]
minibatch_size = len(batch)
max_seqlength = longest_sample.size(0)
inputs = torch.zeros(minibatch_size, max_seqlength)
input_percentages = torch.FloatTensor(minibatch_size)
target_sizes = torch.IntTensor(minibatch_size)
targets = []
for x in range(minibatch_size):
sample = batch[x]
tensor = sample[0]
target = sample[1]
seq_length = tensor.size(0)
inputs[x].narrow(0, 0, seq_length).copy_(tensor)
input_percentages[x] = seq_length / float(max_seqlength)
target_sizes[x] = len(target)
targets.extend(target)
targets = torch.IntTensor(targets)
return inputs, targets, input_percentages, target_sizes
class AudioDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
"""
Creates a data loader for AudioDatasets.
"""
super(AudioDataLoader, self).__init__(*args, **kwargs)
self.collate_fn = _collate_fn
class DSRandomSampler(Sampler):
"""
Implementation of a Random Sampler for sampling the dataset.
Added to ensure we reset the start index when an epoch is finished.
This is essential since we support saving/loading state during an epoch.
"""
def __init__(self, dataset, batch_size=1, start_index=0):
super().__init__(data_source=dataset)
self.dataset = dataset
self.start_index = start_index
self.batch_size = batch_size
ids = list(range(len(self.dataset)))
self.bins = [ids[i:i + self.batch_size] for i in range(0, len(ids), self.batch_size)]
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = (
torch.randperm(len(self.bins) - self.start_index, generator=g)
.add(self.start_index)
.tolist()
)
for x in indices:
batch_ids = self.bins[x]
np.random.shuffle(batch_ids)
yield batch_ids
def __len__(self):
return len(self.bins) - self.start_index
def set_epoch(self, epoch):
self.epoch = epoch
def reset_training_step(self, training_step):
self.start_index = training_step
class DSDistributedSampler(DistributedSampler):
"""
Overrides the DistributedSampler to ensure we reset the start index when an epoch is finished.
This is essential since we support saving/loading state during an epoch.
"""
def __init__(self, dataset, num_replicas=None, rank=None, start_index=0, batch_size=1):
super().__init__(dataset=dataset, num_replicas=num_replicas, rank=rank)
self.start_index = start_index
| y = load_randomly_augmented_audio(audio_path, self.sample_rate)
# sf.write('wav/aaaa_{}'.format(os.path.basename(audio_path)), y, self.sample_rate)
else:
y = load_audio(audio_path)
if self.reverberation: | random_line_split |
IMMA2nc1.py | ME','CHE','AM','AH','UM','UH','SBI','SA','RI')
attachment['95'] = 'REANALYSES QC/FEEDBACK ATTACHMENT'
parameters['95'] = ('ICNR','FNR','DPRO','DPRP','UFR','MFGR','MFGSR','MAR','MASR','BCR','ARCR','CDR','ASIR')
attachment['96'] = 'ICOADS VALUE-ADDED DATABASE ATTACHMENT'
parameters['96'] = ('ICNI','FNI','JVAD','VAD','IVAU1','JVAU1','VAU1','IVAU2','JVAU2','VAU2','IVAU3','JVAU3','VAU3','VQC','ARCI','CDI','ASII')
attachment['97'] = 'ERROR ATTACHMENT'
parameters['97'] = ('ICNE','FNE','CEF','ERRD','ARCE','CDE','ASIE')
attachment['98'] = 'UNIQUE ID ATTACHMENT'
parameters['98'] = ('UID','RN1','RN2','RN3','RSA','IRF')
attachment['99'] = 'SUPPLEMENTAL DATA ATTACHMENT'
parameters['99'] = ('ATTE','SUPD')
def get_var_att(var):
idx = abbr.index(var)
if att_doc == 1:
att = {'abbr':var,'longname':longname[idx],'min_v':min_values[idx],'max_v': max_values[idx],'unit':units[idx], 'comment': comments[idx]}
elif att_doc == 2:
att = {'abbr':var,'ancillary':ancillary[idx],'standardname':standardname[idx],'scaledtype':scaledtype[idx],'longname':longname[idx],'min_v':min_values[idx],'max_v': max_values[idx],'unit':units[idx], 'comment': comments[idx], 'flagvalues': flagvalues[idx], 'flagmeanings':flagmeanings[idx]}
else:
print('Error: No attribute document found.')
return att
def get_ancillary(anc_QC, check_list):
var = anc_QC.split(';')
var = [x.split('-')[0].strip() for x in var]
var = [x for x in var if x in check_list ]
return ' '.join(var)
def getParameters(i):
return parameters["%02d" % i]
def save(out_file,data, **kwargs):
def duration(seconds):
t= []
for dm in (60, 60, 24, 7):
seconds, m = divmod(seconds, dm)
t.append(m)
t.append(seconds)
return ''.join('%d%s' % (num, unit)
for num, unit in zip(t[::-1], 'W DT H M S'.split())
if num)
def get_keywords(data):
keywords = []
for var in data.data.keys():
if var in abbr:
idx = abbr.index(var)
if len(keywords_list[idx])>0:
keywords.append(keywords_list[idx])
# print var, keywords_list[idx]
keywords = list(set(keywords))
keywords = ['Earth Science > %s' %x for x in keywords]
keywords = ', '.join(keywords)
return keywords
def Add_gattrs(ff):
lon_min = min(data['LON'])
lon_max = max(data['LON'])
lat_min = min(data['LAT'])
lat_max = max(data['LAT'])
start_time = min(data.data['Julian'])
end_time = max(data.data['Julian'])
dur_time = (end_time-start_time)*24.0*3600.0
start_time = jdutil.jd_to_datetime(start_time)
start_time_s = "%s-%02d-%02dT%02d:%02d:%02dZ" %(start_time.year,start_time.month,start_time.day,start_time.hour,start_time.minute,start_time.second)
end_time = jdutil.jd_to_datetime(end_time)
end_time_s = "%s-%02d-%02dT%02d:%02d:%02dZ" %(end_time.year,end_time.month,end_time.day,end_time.hour,end_time.minute,end_time.second)
version = out_file.split('_')[1]
#start_time_s = time.strftime(time_fmt,time.gmtime(float(start_time)))
#end_time_s = time.strftime(time_fmt,time.gmtime(float(end_time)))
ff.ncei_template_version = "NCEI_NetCDF_Point_Template_v2.0"
ff.featureType = "point"
ff.title = "International Comprehensive Ocean-Atmosphere Data Set (ICOADS) %s data collected from %s to %s." %(version, start_time_s, end_time_s)
ff.summary = "This file contains ICOADS %s data in netCDF4 format collected from %s to %s. The International Comprehensive Ocean-Atmosphere Data Set (ICOADS) offers surface marine data spanning the past three centuries, and simple gridded monthly summary products for 2-degree latitude x 2-degree longitude boxes back to 1800 (and 1degreex1degree boxes since 1960)--these data and products are freely distributed worldwide. As it contains observations from many different observing systems encompassing the evolution of measurement technology over hundreds of years, ICOADS is probably the most complete and heterogeneous collection of surface marine data in existence." %(version, start_time_s, end_time_s)
ff.keywords = get_keywords(data);
ff.Conventions = "CF-1.6, ACDD-1.3"
ff.id = out_file.split('.nc')[0].replace('IMMA1','ICOADS')
ff.naming_authority = "gov.noaa.ncei"
#ff.source = "http://rda.ucar.edu/data/ds548.0/imma1_r3.0.0/%s.tar" %out_file.split('-')[0]
ff.source = "%s.gz" %out_file.split('.nc')[0]
ff.processing_level = "Restructured from IMMA1 format to NetCDF4 format."
ff.acknowledgement = "Conversion of ICOADS data from IMMA1 to netCDF format by NCEI is supported by the NOAA Big Earth Data Initiative (BEDI)."
ff.license = "These data may be redistributed and used without restriction."
ff.standard_name_vocabulary = "CF Standard Name Table v31"
ff.date_created = time.strftime(time_fmt,time.gmtime())
ff.creator_name = "NCEI"
ff.creator_email = "ncei.info@noaa.gov"
ff.creator_url = "https://www.ncei.noaa.gov/"
ff.institution = "National Centers for Environmental Information (NCEI), NOAA"
ff.project = "International Comprehensive Ocean-Atmosphere Data Set (ICOADS) Project"
ff.publisher_name = "NCEI"
ff.publisher_email = "ncei.info@noaa.gov"
ff.publisher_url = "https://www.ncei.noaa.gov/"
ff.geospatial_bounds = "POLYGON ((%.4f %.4f,%.4f %.4f,%.4f %.4f,%.4f %.4f,%.4f %.4f))" %(lon_min,lat_min,lon_min,lat_max,lon_max,lat_max,lon_max,lat_min,lon_min,lat_min)
ff.geospatial_bounds_crs = "EPSG:4326"
ff.geospatial_lat_min = float("%.4f" %(lat_min))
ff.geospatial_lat_max = float("%.4f" %(lat_max))
ff.geospatial_lon_min = float("%.4f" %(lon_min))
ff.geospatial_lon_max = float("%.4f" %(lon_max))
ff.geospatial_lat_units = "degrees_north"
ff.geospatial_lon_units = "degrees_east"
ff.time_coverage_start = start_time_s
ff.time_coverage_end = end_time_s
ff.time_coverage_duration = 'P' + duration(dur_time)
ff.time_coverage_resolution = "vary"
ff.uuid = str(uuid.uuid4())
ff.sea_name = "World-Wide Distribution"
ff.creator_type = "group"
ff.creator_institution = "NOAA National Centers for Environmental Information (NCEI)"
ff.publisher_type = "institution"
ff.publisher_institution = "NOAA National Centers for Environmental Information (NCEI)"
ff.program = ""
ff.contributor_name = "Zhankun Wang; ICOADS team"
ff.contributor_role = "ICOADS Data Conversion to NetCDF; ICOADS IMMA1 Data Provider"
ff.date_modified = time.strftime(time_fmt,time.gmtime())
ff.date_issued = time.strftime(time_fmt,time.gmtime())
ff.date_metadata_modified = time.strftime(time_fmt,time.gmtime())
ff.product_version = "ICOADS %s netCDF4" %version
ff.keywords_vocabulary = "Global Change Master Directory (GCMD) 2015. GCMD Keywords, Version 8.1."
ff.cdm_data_type = 'Point'
#ff.metadata_link = 'http://rda.ucar.edu/datasets/ds548.0/#!docs'
ff.metadata_link = ''
if len(set(data.data['IM'])) == 1:
ff.IMMA_Version = str(data.data['IM'][0])
else:
| print('%s: check IMMA version' %out_file) | conditional_block | |
IMMA2nc1.py | 19','QI20','QI21','HDG','COG','SOG','SLL','SLHH','RWD','RWS','QI22','QI23','QI24','QI25','QI26','QI27','QI28','QI29','RH','RHI','AWSI','IMONO')
attachment['06'] = 'MODEL QUALITY CONTROL ATTACHMENT'
parameters['06'] = ('CCCC','BUID','FBSRC','BMP','BSWU','SWU','BSWV','SWV','BSAT','BSRH','SRH','BSST','MST','MSH','BY','BM','BD','BH','BFL')
attachment['07'] = 'SHIP METADATA ATTACHMENT'
parameters['07'] = ('MDS','C1M','OPM','KOV','COR','TOB','TOT','EOT','LOT','TOH','EOH','SIM','LOV','DOS','HOP','HOT','HOB','HOA','SMF','SME','SMV')
attachment['08'] = 'NEAR-SURFACE OCEANOGRAPHIC DATA ATTACHMENT'
parameters['08'] = ('OTV','OTZ','OSV','OSZ','OOV','OOZ','OPV','OPZ','OSIV','OSIZ','ONV','ONZ','OPHV','OPHZ','OCV','OCZ','OAV','OAZ','OPCV','OPCZ','ODV','ODZ','PUID')
attachment['09'] = 'EDITED CLOUD REPORT ATTACHMENT'
parameters['09'] = ('CCE','WWE','NE','NHE','HE','CLE','CME','CHE','AM','AH','UM','UH','SBI','SA','RI')
attachment['95'] = 'REANALYSES QC/FEEDBACK ATTACHMENT'
parameters['95'] = ('ICNR','FNR','DPRO','DPRP','UFR','MFGR','MFGSR','MAR','MASR','BCR','ARCR','CDR','ASIR')
attachment['96'] = 'ICOADS VALUE-ADDED DATABASE ATTACHMENT'
parameters['96'] = ('ICNI','FNI','JVAD','VAD','IVAU1','JVAU1','VAU1','IVAU2','JVAU2','VAU2','IVAU3','JVAU3','VAU3','VQC','ARCI','CDI','ASII')
attachment['97'] = 'ERROR ATTACHMENT'
parameters['97'] = ('ICNE','FNE','CEF','ERRD','ARCE','CDE','ASIE')
attachment['98'] = 'UNIQUE ID ATTACHMENT'
parameters['98'] = ('UID','RN1','RN2','RN3','RSA','IRF')
attachment['99'] = 'SUPPLEMENTAL DATA ATTACHMENT'
parameters['99'] = ('ATTE','SUPD')
def get_var_att(var):
idx = abbr.index(var)
if att_doc == 1:
att = {'abbr':var,'longname':longname[idx],'min_v':min_values[idx],'max_v': max_values[idx],'unit':units[idx], 'comment': comments[idx]}
elif att_doc == 2:
att = {'abbr':var,'ancillary':ancillary[idx],'standardname':standardname[idx],'scaledtype':scaledtype[idx],'longname':longname[idx],'min_v':min_values[idx],'max_v': max_values[idx],'unit':units[idx], 'comment': comments[idx], 'flagvalues': flagvalues[idx], 'flagmeanings':flagmeanings[idx]}
else:
print('Error: No attribute document found.')
return att
def get_ancillary(anc_QC, check_list):
var = anc_QC.split(';')
var = [x.split('-')[0].strip() for x in var]
var = [x for x in var if x in check_list ]
return ' '.join(var)
def getParameters(i):
return parameters["%02d" % i]
def | (out_file,data, **kwargs):
def duration(seconds):
t= []
for dm in (60, 60, 24, 7):
seconds, m = divmod(seconds, dm)
t.append(m)
t.append(seconds)
return ''.join('%d%s' % (num, unit)
for num, unit in zip(t[::-1], 'W DT H M S'.split())
if num)
def get_keywords(data):
keywords = []
for var in data.data.keys():
if var in abbr:
idx = abbr.index(var)
if len(keywords_list[idx])>0:
keywords.append(keywords_list[idx])
# print var, keywords_list[idx]
keywords = list(set(keywords))
keywords = ['Earth Science > %s' %x for x in keywords]
keywords = ', '.join(keywords)
return keywords
def Add_gattrs(ff):
lon_min = min(data['LON'])
lon_max = max(data['LON'])
lat_min = min(data['LAT'])
lat_max = max(data['LAT'])
start_time = min(data.data['Julian'])
end_time = max(data.data['Julian'])
dur_time = (end_time-start_time)*24.0*3600.0
start_time = jdutil.jd_to_datetime(start_time)
start_time_s = "%s-%02d-%02dT%02d:%02d:%02dZ" %(start_time.year,start_time.month,start_time.day,start_time.hour,start_time.minute,start_time.second)
end_time = jdutil.jd_to_datetime(end_time)
end_time_s = "%s-%02d-%02dT%02d:%02d:%02dZ" %(end_time.year,end_time.month,end_time.day,end_time.hour,end_time.minute,end_time.second)
version = out_file.split('_')[1]
#start_time_s = time.strftime(time_fmt,time.gmtime(float(start_time)))
#end_time_s = time.strftime(time_fmt,time.gmtime(float(end_time)))
ff.ncei_template_version = "NCEI_NetCDF_Point_Template_v2.0"
ff.featureType = "point"
ff.title = "International Comprehensive Ocean-Atmosphere Data Set (ICOADS) %s data collected from %s to %s." %(version, start_time_s, end_time_s)
ff.summary = "This file contains ICOADS %s data in netCDF4 format collected from %s to %s. The International Comprehensive Ocean-Atmosphere Data Set (ICOADS) offers surface marine data spanning the past three centuries, and simple gridded monthly summary products for 2-degree latitude x 2-degree longitude boxes back to 1800 (and 1degreex1degree boxes since 1960)--these data and products are freely distributed worldwide. As it contains observations from many different observing systems encompassing the evolution of measurement technology over hundreds of years, ICOADS is probably the most complete and heterogeneous collection of surface marine data in existence." %(version, start_time_s, end_time_s)
ff.keywords = get_keywords(data);
ff.Conventions = "CF-1.6, ACDD-1.3"
ff.id = out_file.split('.nc')[0].replace('IMMA1','ICOADS')
ff.naming_authority = "gov.noaa.ncei"
#ff.source = "http://rda.ucar.edu/data/ds548.0/imma1_r3.0.0/%s.tar" %out_file.split('-')[0]
ff.source = "%s.gz" %out_file.split('.nc')[0]
ff.processing_level = "Restructured from IMMA1 format to NetCDF4 format."
ff.acknowledgement = "Conversion of ICOADS data from IMMA1 to netCDF format by NCEI is supported by the NOAA Big Earth Data Initiative (BEDI)."
ff.license = "These data may be redistributed and used without restriction."
ff.standard_name_vocabulary = "CF Standard Name Table v31"
ff.date_created = time.strftime(time_fmt,time.gmtime())
ff.creator_name = "NCEI"
ff.creator_email = "ncei.info@noaa.gov"
ff.creator_url = "https://www.ncei.noaa.gov/"
ff.institution = "National Centers for Environmental Information (NCEI), NOAA"
ff.project = "International Comprehensive Ocean-Atmosphere Data Set (ICOADS) Project"
ff.publisher_name = "NCEI"
ff.publisher_email = "ncei.info@noaa.gov"
ff.publisher_url = "https://www.ncei.noaa.gov/"
ff.geospatial_bounds = "POLYGON ((%.4f %.4f,%.4f %.4f,%.4f %.4f,%.4f %.4f,%.4f %.4f))" %(lon_min,lat_min,lon_min,lat_max,lon_max,lat_max,lon_max,lat_min,lon_min,lat_min)
ff.geospatial_bounds_crs = "EPSG:4326"
ff.geospatial_lat_min = float("%.4f" %(lat_min))
ff.geospatial_lat_max = float("%.4f" %(lat_max))
ff.geospatial_lon_min = float("%.4f" | save | identifier_name |
IMMA2nc1.py | 19','QI20','QI21','HDG','COG','SOG','SLL','SLHH','RWD','RWS','QI22','QI23','QI24','QI25','QI26','QI27','QI28','QI29','RH','RHI','AWSI','IMONO')
attachment['06'] = 'MODEL QUALITY CONTROL ATTACHMENT'
parameters['06'] = ('CCCC','BUID','FBSRC','BMP','BSWU','SWU','BSWV','SWV','BSAT','BSRH','SRH','BSST','MST','MSH','BY','BM','BD','BH','BFL')
attachment['07'] = 'SHIP METADATA ATTACHMENT'
parameters['07'] = ('MDS','C1M','OPM','KOV','COR','TOB','TOT','EOT','LOT','TOH','EOH','SIM','LOV','DOS','HOP','HOT','HOB','HOA','SMF','SME','SMV')
attachment['08'] = 'NEAR-SURFACE OCEANOGRAPHIC DATA ATTACHMENT'
parameters['08'] = ('OTV','OTZ','OSV','OSZ','OOV','OOZ','OPV','OPZ','OSIV','OSIZ','ONV','ONZ','OPHV','OPHZ','OCV','OCZ','OAV','OAZ','OPCV','OPCZ','ODV','ODZ','PUID')
attachment['09'] = 'EDITED CLOUD REPORT ATTACHMENT'
parameters['09'] = ('CCE','WWE','NE','NHE','HE','CLE','CME','CHE','AM','AH','UM','UH','SBI','SA','RI')
attachment['95'] = 'REANALYSES QC/FEEDBACK ATTACHMENT'
parameters['95'] = ('ICNR','FNR','DPRO','DPRP','UFR','MFGR','MFGSR','MAR','MASR','BCR','ARCR','CDR','ASIR')
attachment['96'] = 'ICOADS VALUE-ADDED DATABASE ATTACHMENT'
parameters['96'] = ('ICNI','FNI','JVAD','VAD','IVAU1','JVAU1','VAU1','IVAU2','JVAU2','VAU2','IVAU3','JVAU3','VAU3','VQC','ARCI','CDI','ASII')
attachment['97'] = 'ERROR ATTACHMENT'
parameters['97'] = ('ICNE','FNE','CEF','ERRD','ARCE','CDE','ASIE')
attachment['98'] = 'UNIQUE ID ATTACHMENT'
parameters['98'] = ('UID','RN1','RN2','RN3','RSA','IRF')
attachment['99'] = 'SUPPLEMENTAL DATA ATTACHMENT'
parameters['99'] = ('ATTE','SUPD')
def get_var_att(var):
idx = abbr.index(var)
if att_doc == 1:
att = {'abbr':var,'longname':longname[idx],'min_v':min_values[idx],'max_v': max_values[idx],'unit':units[idx], 'comment': comments[idx]}
elif att_doc == 2:
att = {'abbr':var,'ancillary':ancillary[idx],'standardname':standardname[idx],'scaledtype':scaledtype[idx],'longname':longname[idx],'min_v':min_values[idx],'max_v': max_values[idx],'unit':units[idx], 'comment': comments[idx], 'flagvalues': flagvalues[idx], 'flagmeanings':flagmeanings[idx]}
else:
print('Error: No attribute document found.')
return att
def get_ancillary(anc_QC, check_list):
var = anc_QC.split(';')
var = [x.split('-')[0].strip() for x in var]
var = [x for x in var if x in check_list ]
return ' '.join(var)
def getParameters(i):
return parameters["%02d" % i]
def save(out_file,data, **kwargs):
| return keywords
def Add_gattrs(ff):
lon_min = min(data['LON'])
lon_max = max(data['LON'])
lat_min = min(data['LAT'])
lat_max = max(data['LAT'])
start_time = min(data.data['Julian'])
end_time = max(data.data['Julian'])
dur_time = (end_time-start_time)*24.0*3600.0
start_time = jdutil.jd_to_datetime(start_time)
start_time_s = "%s-%02d-%02dT%02d:%02d:%02dZ" %(start_time.year,start_time.month,start_time.day,start_time.hour,start_time.minute,start_time.second)
end_time = jdutil.jd_to_datetime(end_time)
end_time_s = "%s-%02d-%02dT%02d:%02d:%02dZ" %(end_time.year,end_time.month,end_time.day,end_time.hour,end_time.minute,end_time.second)
version = out_file.split('_')[1]
#start_time_s = time.strftime(time_fmt,time.gmtime(float(start_time)))
#end_time_s = time.strftime(time_fmt,time.gmtime(float(end_time)))
ff.ncei_template_version = "NCEI_NetCDF_Point_Template_v2.0"
ff.featureType = "point"
ff.title = "International Comprehensive Ocean-Atmosphere Data Set (ICOADS) %s data collected from %s to %s." %(version, start_time_s, end_time_s)
ff.summary = "This file contains ICOADS %s data in netCDF4 format collected from %s to %s. The International Comprehensive Ocean-Atmosphere Data Set (ICOADS) offers surface marine data spanning the past three centuries, and simple gridded monthly summary products for 2-degree latitude x 2-degree longitude boxes back to 1800 (and 1degreex1degree boxes since 1960)--these data and products are freely distributed worldwide. As it contains observations from many different observing systems encompassing the evolution of measurement technology over hundreds of years, ICOADS is probably the most complete and heterogeneous collection of surface marine data in existence." %(version, start_time_s, end_time_s)
ff.keywords = get_keywords(data);
ff.Conventions = "CF-1.6, ACDD-1.3"
ff.id = out_file.split('.nc')[0].replace('IMMA1','ICOADS')
ff.naming_authority = "gov.noaa.ncei"
#ff.source = "http://rda.ucar.edu/data/ds548.0/imma1_r3.0.0/%s.tar" %out_file.split('-')[0]
ff.source = "%s.gz" %out_file.split('.nc')[0]
ff.processing_level = "Restructured from IMMA1 format to NetCDF4 format."
ff.acknowledgement = "Conversion of ICOADS data from IMMA1 to netCDF format by NCEI is supported by the NOAA Big Earth Data Initiative (BEDI)."
ff.license = "These data may be redistributed and used without restriction."
ff.standard_name_vocabulary = "CF Standard Name Table v31"
ff.date_created = time.strftime(time_fmt,time.gmtime())
ff.creator_name = "NCEI"
ff.creator_email = "ncei.info@noaa.gov"
ff.creator_url = "https://www.ncei.noaa.gov/"
ff.institution = "National Centers for Environmental Information (NCEI), NOAA"
ff.project = "International Comprehensive Ocean-Atmosphere Data Set (ICOADS) Project"
ff.publisher_name = "NCEI"
ff.publisher_email = "ncei.info@noaa.gov"
ff.publisher_url = "https://www.ncei.noaa.gov/"
ff.geospatial_bounds = "POLYGON ((%.4f %.4f,%.4f %.4f,%.4f %.4f,%.4f %.4f,%.4f %.4f))" %(lon_min,lat_min,lon_min,lat_max,lon_max,lat_max,lon_max,lat_min,lon_min,lat_min)
ff.geospatial_bounds_crs = "EPSG:4326"
ff.geospatial_lat_min = float("%.4f" %(lat_min))
ff.geospatial_lat_max = float("%.4f" %(lat_max))
ff.geospatial_lon_min = float("%.4f" %( | def duration(seconds):
t= []
for dm in (60, 60, 24, 7):
seconds, m = divmod(seconds, dm)
t.append(m)
t.append(seconds)
return ''.join('%d%s' % (num, unit)
for num, unit in zip(t[::-1], 'W DT H M S'.split())
if num)
def get_keywords(data):
keywords = []
for var in data.data.keys():
if var in abbr:
idx = abbr.index(var)
if len(keywords_list[idx])>0:
keywords.append(keywords_list[idx])
# print var, keywords_list[idx]
keywords = list(set(keywords))
keywords = ['Earth Science > %s' %x for x in keywords]
keywords = ', '.join(keywords) | identifier_body |
IMMA2nc1.py | I19','QI20','QI21','HDG','COG','SOG','SLL','SLHH','RWD','RWS','QI22','QI23','QI24','QI25','QI26','QI27','QI28','QI29','RH','RHI','AWSI','IMONO')
attachment['06'] = 'MODEL QUALITY CONTROL ATTACHMENT'
parameters['06'] = ('CCCC','BUID','FBSRC','BMP','BSWU','SWU','BSWV','SWV','BSAT','BSRH','SRH','BSST','MST','MSH','BY','BM','BD','BH','BFL')
attachment['07'] = 'SHIP METADATA ATTACHMENT'
parameters['07'] = ('MDS','C1M','OPM','KOV','COR','TOB','TOT','EOT','LOT','TOH','EOH','SIM','LOV','DOS','HOP','HOT','HOB','HOA','SMF','SME','SMV')
attachment['08'] = 'NEAR-SURFACE OCEANOGRAPHIC DATA ATTACHMENT'
parameters['08'] = ('OTV','OTZ','OSV','OSZ','OOV','OOZ','OPV','OPZ','OSIV','OSIZ','ONV','ONZ','OPHV','OPHZ','OCV','OCZ','OAV','OAZ','OPCV','OPCZ','ODV','ODZ','PUID')
attachment['09'] = 'EDITED CLOUD REPORT ATTACHMENT'
parameters['09'] = ('CCE','WWE','NE','NHE','HE','CLE','CME','CHE','AM','AH','UM','UH','SBI','SA','RI')
attachment['95'] = 'REANALYSES QC/FEEDBACK ATTACHMENT'
parameters['95'] = ('ICNR','FNR','DPRO','DPRP','UFR','MFGR','MFGSR','MAR','MASR','BCR','ARCR','CDR','ASIR')
attachment['96'] = 'ICOADS VALUE-ADDED DATABASE ATTACHMENT'
parameters['96'] = ('ICNI','FNI','JVAD','VAD','IVAU1','JVAU1','VAU1','IVAU2','JVAU2','VAU2','IVAU3','JVAU3','VAU3','VQC','ARCI','CDI','ASII')
attachment['97'] = 'ERROR ATTACHMENT'
parameters['97'] = ('ICNE','FNE','CEF','ERRD','ARCE','CDE','ASIE')
attachment['98'] = 'UNIQUE ID ATTACHMENT'
parameters['98'] = ('UID','RN1','RN2','RN3','RSA','IRF')
attachment['99'] = 'SUPPLEMENTAL DATA ATTACHMENT'
parameters['99'] = ('ATTE','SUPD')
def get_var_att(var):
idx = abbr.index(var)
if att_doc == 1:
att = {'abbr':var,'longname':longname[idx],'min_v':min_values[idx],'max_v': max_values[idx],'unit':units[idx], 'comment': comments[idx]}
elif att_doc == 2:
att = {'abbr':var,'ancillary':ancillary[idx],'standardname':standardname[idx],'scaledtype':scaledtype[idx],'longname':longname[idx],'min_v':min_values[idx],'max_v': max_values[idx],'unit':units[idx], 'comment': comments[idx], 'flagvalues': flagvalues[idx], 'flagmeanings':flagmeanings[idx]}
else:
print('Error: No attribute document found.')
return att
def get_ancillary(anc_QC, check_list):
var = anc_QC.split(';')
var = [x.split('-')[0].strip() for x in var]
var = [x for x in var if x in check_list ]
return ' '.join(var)
def getParameters(i): | def save(out_file,data, **kwargs):
def duration(seconds):
t= []
for dm in (60, 60, 24, 7):
seconds, m = divmod(seconds, dm)
t.append(m)
t.append(seconds)
return ''.join('%d%s' % (num, unit)
for num, unit in zip(t[::-1], 'W DT H M S'.split())
if num)
def get_keywords(data):
keywords = []
for var in data.data.keys():
if var in abbr:
idx = abbr.index(var)
if len(keywords_list[idx])>0:
keywords.append(keywords_list[idx])
# print var, keywords_list[idx]
keywords = list(set(keywords))
keywords = ['Earth Science > %s' %x for x in keywords]
keywords = ', '.join(keywords)
return keywords
def Add_gattrs(ff):
lon_min = min(data['LON'])
lon_max = max(data['LON'])
lat_min = min(data['LAT'])
lat_max = max(data['LAT'])
start_time = min(data.data['Julian'])
end_time = max(data.data['Julian'])
dur_time = (end_time-start_time)*24.0*3600.0
start_time = jdutil.jd_to_datetime(start_time)
start_time_s = "%s-%02d-%02dT%02d:%02d:%02dZ" %(start_time.year,start_time.month,start_time.day,start_time.hour,start_time.minute,start_time.second)
end_time = jdutil.jd_to_datetime(end_time)
end_time_s = "%s-%02d-%02dT%02d:%02d:%02dZ" %(end_time.year,end_time.month,end_time.day,end_time.hour,end_time.minute,end_time.second)
version = out_file.split('_')[1]
#start_time_s = time.strftime(time_fmt,time.gmtime(float(start_time)))
#end_time_s = time.strftime(time_fmt,time.gmtime(float(end_time)))
ff.ncei_template_version = "NCEI_NetCDF_Point_Template_v2.0"
ff.featureType = "point"
ff.title = "International Comprehensive Ocean-Atmosphere Data Set (ICOADS) %s data collected from %s to %s." %(version, start_time_s, end_time_s)
ff.summary = "This file contains ICOADS %s data in netCDF4 format collected from %s to %s. The International Comprehensive Ocean-Atmosphere Data Set (ICOADS) offers surface marine data spanning the past three centuries, and simple gridded monthly summary products for 2-degree latitude x 2-degree longitude boxes back to 1800 (and 1degreex1degree boxes since 1960)--these data and products are freely distributed worldwide. As it contains observations from many different observing systems encompassing the evolution of measurement technology over hundreds of years, ICOADS is probably the most complete and heterogeneous collection of surface marine data in existence." %(version, start_time_s, end_time_s)
ff.keywords = get_keywords(data);
ff.Conventions = "CF-1.6, ACDD-1.3"
ff.id = out_file.split('.nc')[0].replace('IMMA1','ICOADS')
ff.naming_authority = "gov.noaa.ncei"
#ff.source = "http://rda.ucar.edu/data/ds548.0/imma1_r3.0.0/%s.tar" %out_file.split('-')[0]
ff.source = "%s.gz" %out_file.split('.nc')[0]
ff.processing_level = "Restructured from IMMA1 format to NetCDF4 format."
ff.acknowledgement = "Conversion of ICOADS data from IMMA1 to netCDF format by NCEI is supported by the NOAA Big Earth Data Initiative (BEDI)."
ff.license = "These data may be redistributed and used without restriction."
ff.standard_name_vocabulary = "CF Standard Name Table v31"
ff.date_created = time.strftime(time_fmt,time.gmtime())
ff.creator_name = "NCEI"
ff.creator_email = "ncei.info@noaa.gov"
ff.creator_url = "https://www.ncei.noaa.gov/"
ff.institution = "National Centers for Environmental Information (NCEI), NOAA"
ff.project = "International Comprehensive Ocean-Atmosphere Data Set (ICOADS) Project"
ff.publisher_name = "NCEI"
ff.publisher_email = "ncei.info@noaa.gov"
ff.publisher_url = "https://www.ncei.noaa.gov/"
ff.geospatial_bounds = "POLYGON ((%.4f %.4f,%.4f %.4f,%.4f %.4f,%.4f %.4f,%.4f %.4f))" %(lon_min,lat_min,lon_min,lat_max,lon_max,lat_max,lon_max,lat_min,lon_min,lat_min)
ff.geospatial_bounds_crs = "EPSG:4326"
ff.geospatial_lat_min = float("%.4f" %(lat_min))
ff.geospatial_lat_max = float("%.4f" %(lat_max))
ff.geospatial_lon_min = float("%.4f" | return parameters["%02d" % i]
| random_line_split |
JsPopup.js | Id);
$(this).after(miSign);
miSign.click(function(){
$(this).removeClass('shake');
});
});
var clearAbleObjs = container.find('.clear_able');
clearAbleObjs.each(function(){
var inputId = _pad_check_temp_id_to_jobj($(this));
var miGroup = _pad_check_input_group_parent($(this));
var inner_btn_clear = $('<span class="input_inner_btn icon-remove red"></span>');
inner_btn_clear.attr('input_id',inputId);
$(this).after(inner_btn_clear);
miGroup.hover(
function(){
inner_btn_clear.show();
}
,function(){
inner_btn_clear.hide();
}
);
inner_btn_clear.click(function(e){
e.stopPropagation();
e.preventDefault();
var targetId = $(this).attr('input_id');
var targetObj = $('#'+targetId);
targetObj.prop("value",'');
//targetObj.val('');
targetObj.removeData();
});
});
var _cp_colorPicker = $('#_cp_color_select_div');
if(_cp_colorPicker.length>0){
var colorableObjs = container.find('.color_picker');
colorableObjs.each(function(){
var inputId = _pad_check_temp_id_to_jobj($(this));
$(this).click(function(e){
e.stopPropagation();
e.preventDefault();
});
});
}
}
function _pad_check_temp_id_to_jobj(jobj){
var inputId = jobj.attr('id');
if(!inputId || inputId==''){
inputId = 'input_temp_id_'+_pad_temp_input_id_idx;
_pad_temp_input_id_idx ++;
jobj.attr('id',inputId);
}
return inputId;
}
function _pad_check_input_group_parent(jobj){
var parent = jobj.parent();
var retobj = null;
if(parent.is('.mi_group')){
retobj = parent;
}else{
retobj = $('<div class="mi_group"></div>');
jobj.wrap(retobj);
}
return jobj.parent();
}
function _pad_clear_container_old_data(containerId){
if(containerId.substring(0,1)!='#')
containerId = '#' + containerId;
var container = $(containerId);
container.attr('content_url','');
//不去掉就没法设置pageSize
//container.removeAttr(_pad_grid_page_size);
container.removeAttr('pageNo');
container.removeData(_pad_adv_filter_id);
container.removeData(_pad_search_params_id);
container.removeData(_pad_page_base_params_id);
try{
container.removeData(_grid_row_selected_row_ids);
}catch(e){}
try{
//_all_gridSearchClear(containerId); //页面暂时没有这个逻辑,vix中有
}catch(e){
alert(e);
}
}
function _pad_findGridByContainerId(containerId){
if(containerId.substring(0,1)!='#')
containerId = '#' + containerId;
var containerObj = $(containerId);
var anyGrid = containerObj.find('table.table:first');
if(anyGrid.length>0){
var ori_containerId = anyGrid.attr('containerId');
if(!ori_containerId || ori_containerId==''){
anyGrid.attr('containerId',containerId);
}
}
return anyGrid;
}
function _pad_add_pageInfo_to_loadPageHtml(jqHtml, pageContainerId, url){
var container = find_jquery_object(pageContainerId);
container.attr('content_url',url);
}
function _pad_mergeJsonObject(baseData, newData){
if(!baseData)
return newData;
if(!newData)
return baseData;
var resultJsonObject={};
for(var attr in baseData){
resultJsonObject[attr]=baseData[attr];
}
for(var attr in newData){
resultJsonObject[attr]=newData[attr];
}
return resultJsonObject;
}
function find_jquery_object(obj){
var jObj = null;
//check if obj is just id
if(obj instanceof jQuery){
jObj = obj;
}else{
if(typeof(obj)=='string'){
if(obj.substring(0,1)!='#')
obj = '#' + obj;
jObj = $(obj);
}else{
jObj = $(obj);
}
}
return jObj;
}
function _pad_add_param_to_post_data(data, paramName, paramValue){
if(!data || data.length==0){
data = paramName+'='+paramValue;
}else{
if(typeof(data)=='string'){
if(data!=''){
if(data.substring(0,1)=='&'){
data = data.substring(1);
}
if(data.substring(data.length-1)=='&'){
data = data.substring(0,data.length-1);
}
if(data!=''){
var param_arr = data.split('&');
data = {};
for(var pi=0;pi<param_arr.length;pi++){
var p_key_val = param_arr[pi].split('=');
if(p_key_val.length>1){
data[p_key_val[0]] = p_key_val[1];
}
}
}
}
}
data[paramName] = paramValue;
}
return data;
}
function isJson(obj){
return typeof(obj) == "object" && Object.prototype.toString.call(obj).toLowerCase() == "[object object]" && !obj.length;
}
/**
* 弹框显示内容
* @param ma_mark 弹框标识(非id),相同标识只弹出一个窗口
* @param title 窗口标题
* @param url 加载内容url,优先使用url再使用content
* @param content 窗口显示内容,当url有效时,可作为提交参数(数组格式)
* @param position 位置信息数组,可设置width,height,left/right,top/bottom
* @param callback 回调函数,可设置 afterinit,beforeclose(点击右上角关闭时),finishwork(在显示内容内根据自定义情况执行)
* @private
*/
function _add_moveable_popup(ma_mark,title,url,content,position,callback){
//此判断无法处理并发情况,暂时不深入处理
if(ma_mark && $('.moveable_popup_win[identity="'+ma_mark+'"]').length>0){
_close_moveable_popup(ma_mark);//改为关闭之前的
}
var mapop = $('<div class="moveable_popup_win any_focus_pop my_focus_pop"><div class="title_bar"></div><div class="close_btn icon-remove" ma_mark="'+ma_mark+'"></div><div class="content_here"></div><div class="button_here"></div></div>');
var ma_id = 'ma_pop_'+_ma_pop_idx;
mapop.attr('id',ma_id);
mapop.attr('idx',_ma_pop_idx);
if(!ma_mark || ma_mark==''){
ma_mark = ma_id;
}
if(ma_mark){
mapop.attr('identity',ma_mark);
}
$('.my_focus_pop').removeClass('my_focus_pop');
$('#main-content').append(mapop);
var content_obj = mapop.find('.content_here:first');
var ma_content_id = 'ma_pop_content_'+_ma_pop_idx;
content_obj.attr('id',ma_content_id);
_ma_pop_idx ++;
var titlebar = mapop.find('.title_bar:first');
titlebar.html(title);
if(!callback){
callback = {};
_ma_pop_callback[ma_mark] = null;
_ma_pop_callback_beforeclose[ma_mark] = null;
}else{
if(callback.finishwork){
_ma_pop_callback[ma_mark] = callback.finishwork;
}
if(callback.beforeclose) {
_ma_pop_callback_beforeclose[ma_mark] = callback.beforeclose;
}
}
mapop.resizable();
mapop.delegate('.close_btn','click',function(){
_close_moveable_popup($(this).attr('ma_mark'));
});
var mp_data = {};
mp_data.moveable_pop_mark = ma_mark;
if(url){
mp_data = _pad_mergeJsonObject(mp_data,content);
_pad_all_loadPage(url,ma_content_id,true,mp_data,function(){
_ma_check_button(mapop);
mapop.show();
_reposition_moveable_pop(mapop,position);
if(callback.afterinit){
callback.afterinit(ma_id,ma_content_id);
}
});
}else{
content_obj.html(content);
_reposition_moveable_pop(mapop,position);
_ma_check_button(mapop);
mapop.show();
if(callback.afterinit){
callback.afterinit(ma_id,ma_content_id);
}
}
_mp_last_focus_shift_time_millsec = new Date().getTime();
mapop.draggabilly({
handle: '.title_bar'
});
}
function _ma_check_button(mapop){
var button_old = mapop.find('.pop_win_buttons:first');
var button_new = mapop.find('.button_here:first');
var fixed_buttons = button_new.find('.ma_fixed_button');
var fixed_btn_group = $('<span class="ma_fixed_button_group"></span>');
fixed_buttons.each(function(){
fixed_btn_group.append($(this));
});
var close_btn = $('<a class="btn close_btn">关闭</a>');
close_btn.attr('ma_mark', mapop.attr('identity'));
if(button_old.length>0){
button_new.html('');
button_new.append(fixed_btn_group);
button_new.append(button_old.html());
button_new.appe | nd(close_btn);
| identifier_name | |
JsPopup.js | 0,1)!='#')
containerId = '#' + containerId;
var container = $(containerId);
container.attr('content_url','');
//不去掉就没法设置pageSize
//container.removeAttr(_pad_grid_page_size);
container.removeAttr('pageNo');
container.removeData(_pad_adv_filter_id);
container.removeData(_pad_search_params_id);
container.removeData(_pad_page_base_params_id);
try{
container.removeData(_grid_row_selected_row_ids);
}catch(e){}
try{
//_all_gridSearchClear(containerId); //页面暂时没有这个逻辑,vix中有
}catch(e){
alert(e);
}
}
function _pad_findGridByContainerId(containerId){
if(containerId.substring(0,1)!='#')
containerId = '#' + containerId;
var containerObj = $(containerId);
var anyGrid = containerObj.find('table.table:first');
if(anyGrid.length>0){
var ori_containerId = anyGrid.attr('containerId');
if(!ori_containerId || ori_containerId==''){
anyGrid.attr('containerId',containerId);
}
}
return anyGrid;
}
function _pad_add_pageInfo_to_loadPageHtml(jqHtml, pageContainerId, url){
var container = find_jquery_object(pageContainerId);
container.attr('content_url',url);
}
function _pad_mergeJsonObject(baseData, newData){
if(!baseData)
return newData;
if(!newData)
return baseData;
var resultJsonObject={};
for(var attr in baseData){
resultJsonObject[attr]=baseData[attr];
}
for(var attr in newData){
resultJsonObject[attr]=newData[attr];
}
return resultJsonObject;
}
function find_jquery_object(obj){
var jObj = null;
//check if obj is just id
if(obj instanceof jQuery){
jObj = obj;
}else{
if(typeof(obj)=='string'){
if(obj.substring(0,1)!='#')
obj = '#' + obj;
jObj = $(obj);
}else{
jObj = $(obj);
}
}
return jObj;
}
function _pad_add_param_to_post_data(data, paramName, paramValue){
if(!data || data.length==0){
data = paramName+'='+paramValue;
}else{
if(typeof(data)=='string'){
if(data!=''){
if(data.substring(0,1)=='&'){
data = data.substring(1);
}
if(data.substring(data.length-1)=='&'){
data = data.substring(0,data.length-1);
}
if(data!=''){
var param_arr = data.split('&');
data = {};
for(var pi=0;pi<param_arr.length;pi++){
var p_key_val = param_arr[pi].split('=');
if(p_key_val.length>1){
data[p_key_val[0]] = p_key_val[1];
}
}
}
}
}
data[paramName] = paramValue;
}
return data;
}
function isJson(obj){
return typeof(obj) == "object" && Object.prototype.toString.call(obj).toLowerCase() == "[object object]" && !obj.length;
}
/**
* 弹框显示内容
* @param ma_mark 弹框标识(非id),相同标识只弹出一个窗口
* @param title 窗口标题
* @param url 加载内容url,优先使用url再使用content
* @param content 窗口显示内容,当url有效时,可作为提交参数(数组格式)
* @param position 位置信息数组,可设置width,height,left/right,top/bottom
* @param callback 回调函数,可设置 afterinit,beforeclose(点击右上角关闭时),finishwork(在显示内容内根据自定义情况执行)
* @private
*/
function _add_moveable_popup(ma_mark,title,url,content,position,callback){
//此判断无法处理并发情况,暂时不深入处理
if(ma_mark && $('.moveable_popup_win[identity="'+ma_mark+'"]').length>0){
_close_moveable_popup(ma_mark);//改为关闭之前的
}
var mapop = $('<div class="moveable_popup_win any_focus_pop my_focus_pop"><div class="title_bar"></div><div class="close_btn icon-remove" ma_mark="'+ma_mark+'"></div><div class="content_here"></div><div class="button_here"></div></div>');
var ma_id = 'ma_pop_'+_ma_pop_idx;
mapop.attr('id',ma_id);
mapop.attr('idx',_ma_pop_idx);
if(!ma_mark || ma_mark==''){
ma_mark = ma_id;
}
if(ma_mark){
mapop.attr('identity',ma_mark);
}
$('.my_focus_pop').removeClass('my_focus_pop');
$('#main-content').append(mapop);
var content_obj = mapop.find('.content_here:first');
var ma_content_id = 'ma_pop_content_'+_ma_pop_idx;
content_obj.attr('id',ma_content_id);
_ma_pop_idx ++;
var titlebar = mapop.find('.title_bar:first');
titlebar.html(title);
if(!callback){
callback = {};
_ma_pop_callback[ma_mark] = null;
_ma_pop_callback_beforeclose[ma_mark] = null;
}else{
if(callback.finishwork){
_ma_pop_callback[ma_mark] = callback.finishwork;
}
if(callback.beforeclose) {
_ma_pop_callback_beforeclose[ma_mark] = callback.beforeclose;
}
}
mapop.resizable();
mapop.delegate('.close_btn','click',function(){
_close_moveable_popup($(this).attr('ma_mark'));
});
var mp_data = {};
mp_data.moveable_pop_mark = ma_mark;
if(url){
mp_data = _pad_mergeJsonObject(mp_data,content);
_pad_all_loadPage(url,ma_content_id,true,mp_data,function(){
_ma_check_button(mapop);
mapop.show();
_reposition_moveable_pop(mapop,position);
if(callback.afterinit){
callback.afterinit(ma_id,ma_content_id);
}
});
}else{
content_obj.html(content);
_reposition_moveable_pop(mapop,position);
_ma_check_button(mapop);
mapop.show();
if(callback.afterinit){
callback.afterinit(ma_id,ma_content_id);
}
}
_mp_last_focus_shift_time_millsec = new Date().getTime();
mapop.draggabilly({
handle: '.title_bar'
});
}
function _ma_check_button(mapop){
var button_old = mapop.find('.pop_win_buttons:first');
var button_new = mapop.find('.button_here:first');
var fixed_buttons = button_new.find('.ma_fixed_button');
var fixed_btn_group = $('<span class="ma_fixed_button_group"></span>');
fixed_buttons.each(function(){
fixed_btn_group.append($(this));
});
var close_btn = $('<a class="btn close_btn">关闭</a>');
close_btn.attr('ma_mark', mapop.attr('identity'));
if(button_old.length>0){
button_new.html('');
button_new.append(fixed_btn_group);
button_new.append(button_old.html());
button_new.append(close_btn);
button_old.remove();
}else{
button_new.html(close_btn);//默认添加关闭按键
button_new.prepend(fixed_btn_group);
}
}
function _reload_moveable_pop(ma_mark, more_parems){
if(ma_mark && $('.moveable_popup_win[identity="'+ma_mark+'"]').length>0){
_pad_all_reloadPage($('.moveable_popup_win[identity="'+ma_mark+'"]').find('.content_here:first').attr('id'),more_parems, function(){
_ma_check_button($('.moveable_popup_win[identity="'+ma_mark+'"]'));
});
}
}
function _find_moveable_pop_by_ma_mark(ma_mark){
return $('.moveable_popup_win[identity="'+ma_mark+'"]');
}
function _reposition_moveable_pop(mapop,position){
if(position){
if(position.width>0){
position.width = position.width + 30;
mapop.css('width',position.width);
}
var p_height = position.height;
if(isNaN(p_height)){
p_height = 0;
}
if(position.auto_height){
var content_container = mapop.find('.content_here:first');
var content_height = content_container[0].scrollHeight;
if(!isNaN(content_height)){
if(p_height>content_height){
p_height = content_height + 80;
}
if(p_height<230){
p_height = 230;
| }
}
mapop.css('height', p_height);
}else{
mapop.css('height', p_height+50);
}
if(!isNaN(position.left)){
mapop.css('left',position.left);
}else if(!isNaN(position.right)){
var width = mapop.outerWidth();
var win_width = $(document).innerWidth();
var left = win_width - width - position.right;
mapop.css('left',left);
}
if(!isNaN(position.top)){
mapop.css('top',position.top);
}else if(!isNaN(position.bottom)){
var height = mapop.outerHeight();
var win_height = $(window).innerHeight();
var top = win_height - height - position.bottom; | identifier_body | |
JsPopup.js | + containerId;
var container = $(containerId);
container.attr('content_url','');
//不去掉就没法设置pageSize
//container.removeAttr(_pad_grid_page_size);
container.removeAttr('pageNo');
container.removeData(_pad_adv_filter_id);
container.removeData(_pad_search_params_id);
container.removeData(_pad_page_base_params_id);
try{
container.removeData(_grid_row_selected_row_ids);
}catch(e){}
try{
//_all_gridSearchClear(containerId); //页面暂时没有这个逻辑,vix中有
}catch(e){
alert(e);
}
}
function _pad_findGridByContainerId(containerId){
if(containerId.substring(0,1)!='#')
containerId = '#' + containerId;
var containerObj = $(containerId);
var anyGrid = containerObj.find('table.table:first');
if(anyGrid.length>0){
var ori_containerId = anyGrid.attr('containerId');
if(!ori_containerId || ori_containerId==''){
anyGrid.attr('containerId',containerId);
}
}
return anyGrid;
}
function _pad_add_pageInfo_to_loadPageHtml(jqHtml, pageContainerId, url){
var container = find_jquery_object(pageContainerId);
container.attr('content_url',url);
}
function _pad_mergeJsonObject(baseData, newData){
if(!baseData)
return newData;
if(!newData)
return baseData;
var resultJsonObject={};
for(var attr in baseData){
resultJsonObject[attr]=baseData[attr];
}
for(var attr in newData){
resultJsonObject[attr]=newData[attr];
}
return resultJsonObject;
}
function find_jquery_object(obj){
var jObj = null;
//check if obj is just id
if(obj instanceof jQuery){
jObj = obj;
}else{
if(typeof(obj)=='string'){
if(obj.substring(0,1)!='#')
obj = '#' + obj;
jObj = $(obj);
}else{
jObj = $(obj);
}
}
return jObj;
}
function _pad_add_param_to_post_data(data, paramName, paramValue){
if(!data || data.length==0){
data = paramName+'='+paramValue;
}else{
if(typeof(data)=='string'){
if(data!=''){
if(data.substring(0,1)=='&'){
data = data.substring(1);
}
if(data.substring(data.length-1)=='&'){
data = data.substring(0,data.length-1);
}
if(data!=''){
var param_arr = data.split('&');
data = {};
for(var pi=0;pi<param_arr.length;pi++){
var p_key_val = param_arr[pi].split('=');
if(p_key_val.length>1){
data[p_key_val[0]] = p_key_val[1];
}
}
}
}
}
data[paramName] = paramValue;
}
return data;
}
function isJson(obj){
return typeof(obj) == "object" && Object.prototype.toString.call(obj).toLowerCase() == "[object object]" && !obj.length;
}
/**
* 弹框显示内容
* @param ma_mark 弹框标识(非id),相同标识只弹出一个窗口
* @param title 窗口标题
* @param url 加载内容url,优先使用url再使用content
* @param content 窗口显示内容,当url有效时,可作为提交参数(数组格式)
* @param position 位置信息数组,可设置width,height,left/right,top/bottom
* @param callback 回调函数,可设置 afterinit,beforeclose(点击右上角关闭时),finishwork(在显示内容内根据自定义情况执行)
* @private
*/
function _add_moveable_popup(ma_mark,title,url,content,position,callback){
//此判断无法处理并发情况,暂时不深入处理
if(ma_mark && $('.moveable_popup_win[identity="'+ma_mark+'"]').length>0){
_close_moveable_popup(ma_mark);//改为关闭之前的
}
var mapop = $('<div class="moveable_popup_win any_focus_pop my_focus_pop"><div class="title_bar"></div><div class="close_btn icon-remove" ma_mark="'+ma_mark+'"></div><div class="content_here"></div><div class="button_here"></div></div>');
var ma_id = 'ma_pop_'+_ma_pop_idx;
mapop.attr('id',ma_id);
mapop.attr('idx',_ma_pop_idx);
if(!ma_mark || ma_mark==''){
ma_mark = ma_id;
}
if(ma_mark){
mapop.attr('identity',ma_mark);
}
$('.my_focus_pop').removeClass('my_focus_pop');
$('#main-content').append(mapop);
var content_obj = mapop.find('.content_here:first');
var ma_content_id = 'ma_pop_content_'+_ma_pop_idx;
content_obj.attr('id',ma_content_id);
_ma_pop_idx ++;
var titlebar = mapop.find('.title_bar:first');
titlebar.html(title);
if(!callback){
callback = {};
_ma_pop_callback[ma_mark] = null;
_ma_pop_callback_beforeclose[ma_mark] = null;
}else{
if(callback.finishwork){
_ma_pop_callback[ma_mark] = callback.finishwork;
}
if(callback.beforeclose) {
_ma_pop_callback_beforeclose[ma_mark] = callback.beforeclose;
}
}
mapop.resizable();
mapop.delegate('.close_btn','click',function(){
_close_moveable_popup($(this).attr('ma_mark'));
});
var mp_data = {};
mp_data.moveable_pop_mark = ma_mark;
if(url){
mp_data = _pad_mergeJsonObject(mp_data,content);
_pad_all_loadPage(url,ma_content_id,true,mp_data,function(){
_ma_check_button(mapop);
mapop.show();
_reposition_moveable_pop(mapop,position);
if(callback.afterinit){
callback.afterinit(ma_id,ma_content_id);
}
});
}else{
content_obj.html(content);
_reposition_moveable_pop(mapop,position);
_ma_check_button(mapop);
mapop.show();
if(callback.afterinit){
callback.afterinit(ma_id,ma_content_id);
}
}
_mp_last_focus_shift_time_millsec = new Date().getTime();
mapop.draggabilly({
handle: '.title_bar'
});
}
function _ma_check_button(mapop){
var button_old = mapop.find('.pop_win_buttons:first');
var button_new = mapop.find('.button_here:first');
var fixed_buttons = button_new.find('.ma_fixed_button');
var fixed_btn_group = $('<span class="ma_fixed_button_group"></span>');
fixed_buttons.each(function(){
fixed_btn_group.append($(this));
});
var close_btn = $('<a class="btn close_btn">关闭</a>');
close_btn.attr('ma_mark', mapop.attr('identity'));
if(button_old.length>0){
button_new.html('');
button_new.append(fixed_btn_group);
button_new.append(button_old.html());
button_new.append(close_btn);
button_old.remove();
}else{
button_new.html(close_btn);//默认添加关闭按键
button_new.prepend(fixed_btn_group);
}
}
function _reload_moveable_pop(ma_mark, more_parems){
if(ma_mark && $('.moveable_popup_win[identity="'+ma_mark+'"]').length>0){
_pad_all_reloadPage($('.moveable_popup_win[identity="'+ma_mark+'"]').find('.content_here:first').attr('id'),more_parems, function(){
_ma_check_button($('.moveable_popup_win[identity="'+ma_mark+'"]'));
});
}
}
function _find_moveable_pop_by_ma_mark(ma_mark){
return $('.moveable_popup_win[identity="'+ma_mark+'"]');
}
function _reposition_moveable_pop(mapop,position){
if(position){
if(position.width>0){
position.width = position.width + 30;
mapop.css('width',position.width);
}
var p_height = position.height;
if(isNaN(p_height)){
p_height = 0;
}
if(position.auto_height){
var content_container = mapop.find('.content_here:first');
var content_height = content_container[0].scrollHeight;
if(!isNaN(content_height)){
if(p_height>content_height){
p_height = content_height + 80;
}
if(p_height<230){
p_height = 230;
}
}
mapop.css('height', p_height);
}else{
mapop.css('height', p_height+50);
}
if(!isNaN(position.left)){
mapop.css('left',position.left);
}else if(!isNaN(position.right)){
var width = mapop.outerWidth();
var win_width = $(document).innerWidth();
var left = win_width - width - position.right;
mapop.css('left',left);
}
if(!isNaN(position.top)){
mapop.css('top',position.top);
}else if(!isNaN(position.bottom)){
var height = mapop.outerHeight();
var win_height = $(window).innerHeight();
var top = win_height - height - position.bottom;
| if(top<0){
top = 0;
| conditional_block | |
JsPopup.js | 异常
if(html.err_text){
alert(html.err_text);
_hide_top_loading();
}
return;
}
_pad_add_pageInfo_to_loadPageHtml(html, pageContainerId, url); | if(tempIdx1!=-1){
tempIdx1 = tempIdx1 + 6;
var tempIdx2 = html.indexOf('>',tempIdx1);
var tempIdx3 = html.indexOf('table',tempIdx1);
if(tempIdx3!=-1 && tempIdx3< tempIdx2){
//尝试准确的定位"table.table:first"的table
html = html.substring(0,tempIdx1) + ' containerId="#'+pageContainerId+'"' + html.substring(tempIdx1);
}
}
containerObj.html(html);
containerObj.trigger('new_content_load');
//添加输入框相关效果,如 必填 等等
_pad_add_input_element(containerObj);
_update_pager_click_event(containerObj);
var anyGrid = _pad_findGridByContainerId(pageContainerId);
if(anyGrid.length>0){
add_event_for_jm_table(anyGrid);
}
//添加clearable输入框清楚按键
_pad_add_clearable_input_btn(pageContainerId);
if(callBack){
callBack(pageContainerId);
}
}
}).always(function(){});
}
function _pad_all_reloadPage(pageContainerId,more_parems,callback){
var container = find_jquery_object(pageContainerId);
if(container){
var url = container.attr('content_url');
_pad_all_loadPage(url, pageContainerId, true, more_parems, callback);
}
}
function _pad_add_clearable_input_btn(containerId){
var container = find_jquery_object(containerId);
container.find('input.clearable').each(function(){
$(this).wrap('<div class="clearable_container"></div>');
var clear_btn = $('<i class="icon-remove clear_btn"></i>');
clear_btn.click(function(){
$(this).prev('input').val('');
});
$(this).after(clear_btn);
});
}
function add_event_for_jm_table(gridTable){
var gridContainerId = gridTable.attr('containerId');
add_event_for_jm_table_sort(gridContainerId);
}
function add_event_for_jm_table_sort(containerId){
if(containerId.substring(0,1)!='#')
containerId = '#' + containerId;
var container = $(containerId);
var page_params = container.data(_pad_page_base_params_id);
if(page_params && page_params.sort_column){
var on_sorting = container.find('[sort_column="'+page_params.sort_column+'"]');
if(on_sorting.length==1){
on_sorting.attr('sort_type', page_params.sort_type);
}
}
container.find('.tab_sorter').each(function(){
var column = $(this);
var sort_column = column.attr('sort_column');
if(sort_column!=''){
var column_table = column.parents('[content_url]:first');
var sort_type = column.attr('sort_type');
column_table.removeClass('icon-sort-up icon-sort-down');
if(sort_type=='asc'){
column.addClass('icon-sort-up');
}else if(sort_type=='desc'){
column.addClass('icon-sort-down');
}else{
column.addClass('icon-sort');
}
column.unbind('click').bind('click',function(){
_show_top_loading();
var thiscolumn = $(this);
var thissort = thiscolumn.attr('sort_column');
var thistype = thiscolumn.attr('sort_type');
if(thiscolumn.is('.icon-sort-down')){
thistype = 'asc';
}else if(thiscolumn.is('.icon-sort-up')){
thistype = '';
}else{
thistype = 'desc';
}
thiscolumn.attr('sort_type',thistype);
var table_id = column_table.attr('id');
var table_url = column_table.attr('content_url');
var params = column_table.data(_pad_page_base_params_id);
if(!params){
params = {};
}
params = _pad_add_param_to_post_data(params,'sort_column',thissort);
params = _pad_add_param_to_post_data(params,'sort_type',thistype);
_p_a_load(table_url, table_id, null, params, function(){
_hide_top_loading();
});
});
}
});
}
function _hide_top_loading() {}
function _show_top_loading() {}
function _update_pager_click_event(container){
var pagerObjs = container.find('.pagination.in_tab');
pagerObjs.each(function(){
$(this).find('a').each(function() {
$(this).click(function (event) {
var url = $(this).attr('href');
_pad_all_loadPage(url,container.attr('id'),true);
return false;//阻止链接跳转
});
});
});
}
var _pad_temp_input_id_idx = 1;
function _pad_add_input_element(container){
var mustObjs = container.find('.must_input');
mustObjs.each(function(){
var inputId = _pad_check_temp_id_to_jobj($(this));
var miGroup = _pad_check_input_group_parent($(this));
var miSign = $('<i class="icon-warning-sign mi_sign" title="必填"> 必填</i>');
miSign.attr('input_id',inputId);
$(this).after(miSign);
miSign.click(function(){
$(this).removeClass('shake');
});
});
var clearAbleObjs = container.find('.clear_able');
clearAbleObjs.each(function(){
var inputId = _pad_check_temp_id_to_jobj($(this));
var miGroup = _pad_check_input_group_parent($(this));
var inner_btn_clear = $('<span class="input_inner_btn icon-remove red"></span>');
inner_btn_clear.attr('input_id',inputId);
$(this).after(inner_btn_clear);
miGroup.hover(
function(){
inner_btn_clear.show();
}
,function(){
inner_btn_clear.hide();
}
);
inner_btn_clear.click(function(e){
e.stopPropagation();
e.preventDefault();
var targetId = $(this).attr('input_id');
var targetObj = $('#'+targetId);
targetObj.prop("value",'');
//targetObj.val('');
targetObj.removeData();
});
});
var _cp_colorPicker = $('#_cp_color_select_div');
if(_cp_colorPicker.length>0){
var colorableObjs = container.find('.color_picker');
colorableObjs.each(function(){
var inputId = _pad_check_temp_id_to_jobj($(this));
$(this).click(function(e){
e.stopPropagation();
e.preventDefault();
});
});
}
}
function _pad_check_temp_id_to_jobj(jobj){
var inputId = jobj.attr('id');
if(!inputId || inputId==''){
inputId = 'input_temp_id_'+_pad_temp_input_id_idx;
_pad_temp_input_id_idx ++;
jobj.attr('id',inputId);
}
return inputId;
}
function _pad_check_input_group_parent(jobj){
var parent = jobj.parent();
var retobj = null;
if(parent.is('.mi_group')){
retobj = parent;
}else{
retobj = $('<div class="mi_group"></div>');
jobj.wrap(retobj);
}
return jobj.parent();
}
function _pad_clear_container_old_data(containerId){
if(containerId.substring(0,1)!='#')
containerId = '#' + containerId;
var container = $(containerId);
container.attr('content_url','');
//不去掉就没法设置pageSize
//container.removeAttr(_pad_grid_page_size);
container.removeAttr('pageNo');
container.removeData(_pad_adv_filter_id);
container.removeData(_pad_search_params_id);
container.removeData(_pad_page_base_params_id);
try{
container.removeData(_grid_row_selected_row_ids);
}catch(e){}
try{
//_all_gridSearchClear(containerId); //页面暂时没有这个逻辑,vix中有
}catch(e){
alert(e);
}
}
function _pad_findGridByContainerId(containerId){
if(containerId.substring(0,1)!='#')
containerId = '#' + containerId;
var containerObj = $(containerId);
var anyGrid = containerObj.find('table.table:first');
if(anyGrid.length>0){
var ori_containerId = anyGrid.attr('containerId');
if(!ori_containerId || ori_containerId==''){
anyGrid.attr('containerId',containerId);
}
}
return anyGrid;
}
function _pad_add_pageInfo_to_loadPageHtml(jqHtml, pageContainerId, url){
var container = find_jquery_object(pageContainerId);
container.attr('content_url',url);
}
function _pad_mergeJsonObject(baseData, newData){
if(!baseData)
return newData;
if(!newData)
return baseData;
var resultJsonObject={};
for(var attr in baseData){
resultJsonObject[attr]=baseData[attr];
}
for(var attr in newData){
resultJsonObject[attr]=newData[attr];
}
return resultJsonObject;
}
function find_jquery_object(obj){
var jObj = null;
//check if obj is just id
if(obj instanceof jQuery){
jObj = obj;
} | //处理如果html中有grid,为grid加上containerId
var tempIdx1 = html.indexOf('<table'); | random_line_split |
plot_utils.py | (disp, scanline_index, color, title):
coords = get_disparity_plot_coords(disp, scanline_index = scanline_index)
plt.plot(coords)
def get_disparity_plot_coords(disp, scanline_index=0):
current = next = disp[0,0]
current_plot_coords = [0,0]
for j in range ((disp.shape[1])):
next = disp[scanline_index, j]
coordinate_diff = get_disparity_scanline_move(current, next)
current_plot_coords.append(
(current_plot_coords[-1][0] + coordinate_diff[0],
current_plot_coords[-1][1] + coordinate_diff[1])
)
current = next
return current_plot_coords
def get_disparity_scanline_move(current, next):
if next==0:
return (1,0)
if(current==next):
return (1,1)
#if it is brighter?
if(next>current):
return (np.abs(next-current)+1, 1)
#if it is darker?
return (1,np.abs(next - current)+1)
def scatter_3d_results(x_label, y_label, metrix, FILE_PATH_OR_DATAFRAME, cmm="viridis"):
if(FILE_PATH_OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
x,y,z = data[x_label], data[y_label], data[metrix]
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(metrix)
ax.scatter(x,y, z, c=z, cmap = "viridis")
plt.show()
def polyine_3d(x_label, y_label, metrix, data, occl_counted=False):
scenes = data["scene"].unique()
X = np.array(data[x_label].unique())
Y= np.array(data[y_label].unique())
data = data[data["are_occlusions_errors"] == occl_counted]
data = data.sort_values(by=[x_label, y_label])
z = pd.pivot_table(data, values=[metrix], columns=[x_label], index=[y_label]).values
Z = np.nan_to_num(z, nan=2000)
verts = []
mins = []
for i in range(X.shape[0]):
current_column = Z[:, i]
min_loc, min_val = X[np.argmin(current_column)], current_column.min()
temp = list(zip(Y, current_column))
verts.append(temp)
mins.append((min_loc, min_val, X[i]))
stop_here = 1
poly = PolyCollection(verts, facecolors=[get_random_color() for x in X])
poly.set_alpha(1)
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
surf_x, surf_y = np.meshgrid(Y, X)
surf_z = np.empty((len(X), len(Y)))
surf_z[:, :] = data[metrix].min()
ax.plot_surface(surf_x, surf_y, surf_z, color=(0.3,0.3,0.9, 0.6))
ax.add_collection3d(poly, zs = X, zdir='y')
#annotating minimums for enhanced readibility
for x,z,y in mins:
label = 'min: %.2f (%d, %d)' % (z, x, y)
ax.text(x, y, z, label)
ax.set_xlabel(y_label)
ax.set_xlim3d(0, Y[-1])
ax.set_ylabel(x_label)
ax.set_ylim3d(0, X[-1])
ax.set_zlabel(metrix)
ax.set_zlim3d(0, 2000)
plt.grid()
plt.show()
return fig, ax
def bar_3d_by_scenes(x_label, y_label, metrix, FILE_PATH_OR_DATAFRAME, occl_counted=False):
if (FILE_PATH_OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
scenes = data["scene"].unique()
data = data[data["are_occlusions_errors"]==occl_counted]
data = data.sort_values(by=[x_label, y_label])
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
colors = [get_random_color() for scene in scenes]
for x_param in data[x_label].unique():
temp_outer = data[data[x_label]==x_param]
for i, scene in enumerate(scenes):
temp_inner = temp_outer[(temp_outer["scene"]==scene)]
ax.bar(temp_inner[x_label], temp_inner[metrix], zs=temp_inner[y_label], zdir="y", color=colors[i], alpha=0.8)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(metrix)
plt.show()
def get_random_color(alpha=0.8):
r = np.random.rand()
g = np.random.rand()
b = np.random.rand()
return (r,g,b, alpha)
def plot_3d_results(x_label,y_label,metrix, FILE_PATH_OR_DATAFRAME, steps=None):
if(FILE_PATH_OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
x = np.array(data[x_label].unique())[:, np.newaxis]
y = np.array(data[y_label].unique())[:, np.newaxis]
z = pd.pivot_table(data, values=[metrix], columns=[x_label], index=[y_label]).values
z = np.nan_to_num(z, nan=1000000)
print("Z's shape is: {0}".format(z.shape))
x_diff_step = (x.max() - x.min()) / z.shape[1] if steps is None else steps[1]
y_diff_step = (y.max()-y.min())/z.shape[0] if steps is None else steps[0]
X = np.arange(x.min(), x.max()+1, x_diff_step)[:, np.newaxis]
Y = np.arange(y.min(), y.max() +1, y_diff_step)[:, np.newaxis]
X, Y = np.meshgrid(X, Y)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_zlim(data[metrix].min()-100, data[metrix].max()+100)
ax.plot_surface(X, Y, z, cmap=cm.tab20b)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(metrix)
plt.show()
row_with_min = data[metrix].idxmin()
min_row = data.loc[row_with_min]
return min_row
#it is 4d in reality
def plotly_4d_results(x_label,y_label, z_label, metrix, FILE_PATH_OR_DATAFRAME, ):
if(FILE_PATH_OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
data = data[[x_label, y_label, z_label, metrix]]
data = data.sort_values(by=[x_label, y_label, z_label], ascending = True)
values = np.nan_to_num(data[[metrix]].values)
x = np.array(data[x_label].unique())[:, np.newaxis]
y = np.array(data[y_label].unique())[:, np.newaxis]
z = np.array(data[z_label].unique())[:, np.newaxis]
x_step = (x.max() - x.min()) / (x.shape[0]-1)
y_step = (y.max() - y.min()) / (y.shape[0]-1)
z_step =(z.max() - z.min()) / (z.shape[0]-1)
# good cmap = px.colors.diverging.Spectral
# px.colors.sequential.Rainbow
# px.colors.sequential.Angset
X, Y, Z = np.mgrid[x.min():x.max()+x_step:x_step, y.min():y.max()+y_step:y_step, z.min():z.max()+z_step:z_step]
fig = go.Figure(data=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=values.flatten(),
cmin=values.min(),
cmax=values.max(),
opacity=0.3, # needs to be small to see through all surfaces
surface_count=20,
colorscale=px.colors.diverging.Spectral
))
fig.show()
def plot_disparity_3d(disparity, cmm = cm.viridis):
x = np.arange(0, disparity.shape[0])[:, np.newaxis]
y = np.arange(0, disparity | plot_disp_line | identifier_name | |
plot_utils.py |
#if it is brighter?
if(next>current):
return (np.abs(next-current)+1, 1)
#if it is darker?
return (1,np.abs(next - current)+1)
def scatter_3d_results(x_label, y_label, metrix, FILE_PATH_OR_DATAFRAME, cmm="viridis"):
if(FILE_PATH_OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
x,y,z = data[x_label], data[y_label], data[metrix]
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(metrix)
ax.scatter(x,y, z, c=z, cmap = "viridis")
plt.show()
def polyine_3d(x_label, y_label, metrix, data, occl_counted=False):
scenes = data["scene"].unique()
X = np.array(data[x_label].unique())
Y= np.array(data[y_label].unique())
data = data[data["are_occlusions_errors"] == occl_counted]
data = data.sort_values(by=[x_label, y_label])
z = pd.pivot_table(data, values=[metrix], columns=[x_label], index=[y_label]).values
Z = np.nan_to_num(z, nan=2000)
verts = []
mins = []
for i in range(X.shape[0]):
current_column = Z[:, i]
min_loc, min_val = X[np.argmin(current_column)], current_column.min()
temp = list(zip(Y, current_column))
verts.append(temp)
mins.append((min_loc, min_val, X[i]))
stop_here = 1
poly = PolyCollection(verts, facecolors=[get_random_color() for x in X])
poly.set_alpha(1)
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
surf_x, surf_y = np.meshgrid(Y, X)
surf_z = np.empty((len(X), len(Y)))
surf_z[:, :] = data[metrix].min()
ax.plot_surface(surf_x, surf_y, surf_z, color=(0.3,0.3,0.9, 0.6))
ax.add_collection3d(poly, zs = X, zdir='y')
#annotating minimums for enhanced readibility
for x,z,y in mins:
label = 'min: %.2f (%d, %d)' % (z, x, y)
ax.text(x, y, z, label)
ax.set_xlabel(y_label)
ax.set_xlim3d(0, Y[-1])
ax.set_ylabel(x_label)
ax.set_ylim3d(0, X[-1])
ax.set_zlabel(metrix)
ax.set_zlim3d(0, 2000)
plt.grid()
plt.show()
return fig, ax
def bar_3d_by_scenes(x_label, y_label, metrix, FILE_PATH_OR_DATAFRAME, occl_counted=False):
if (FILE_PATH_OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
scenes = data["scene"].unique()
data = data[data["are_occlusions_errors"]==occl_counted]
data = data.sort_values(by=[x_label, y_label])
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
colors = [get_random_color() for scene in scenes]
for x_param in data[x_label].unique():
temp_outer = data[data[x_label]==x_param]
for i, scene in enumerate(scenes):
temp_inner = temp_outer[(temp_outer["scene"]==scene)]
ax.bar(temp_inner[x_label], temp_inner[metrix], zs=temp_inner[y_label], zdir="y", color=colors[i], alpha=0.8)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(metrix)
plt.show()
def get_random_color(alpha=0.8):
r = np.random.rand()
g = np.random.rand()
b = np.random.rand()
return (r,g,b, alpha)
def plot_3d_results(x_label,y_label,metrix, FILE_PATH_OR_DATAFRAME, steps=None):
if(FILE_PATH_OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
x = np.array(data[x_label].unique())[:, np.newaxis]
y = np.array(data[y_label].unique())[:, np.newaxis]
z = pd.pivot_table(data, values=[metrix], columns=[x_label], index=[y_label]).values
z = np.nan_to_num(z, nan=1000000)
print("Z's shape is: {0}".format(z.shape))
x_diff_step = (x.max() - x.min()) / z.shape[1] if steps is None else steps[1]
y_diff_step = (y.max()-y.min())/z.shape[0] if steps is None else steps[0]
X = np.arange(x.min(), x.max()+1, x_diff_step)[:, np.newaxis]
Y = np.arange(y.min(), y.max() +1, y_diff_step)[:, np.newaxis]
X, Y = np.meshgrid(X, Y)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_zlim(data[metrix].min()-100, data[metrix].max()+100)
ax.plot_surface(X, Y, z, cmap=cm.tab20b)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(metrix)
plt.show()
row_with_min = data[metrix].idxmin()
min_row = data.loc[row_with_min]
return min_row
#it is 4d in reality
def plotly_4d_results(x_label,y_label, z_label, metrix, FILE_PATH_OR_DATAFRAME, ):
if(FILE_PATH_OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
data = data[[x_label, y_label, z_label, metrix]]
data = data.sort_values(by=[x_label, y_label, z_label], ascending = True)
values = np.nan_to_num(data[[metrix]].values)
x = np.array(data[x_label].unique())[:, np.newaxis]
y = np.array(data[y_label].unique())[:, np.newaxis]
z = np.array(data[z_label].unique())[:, np.newaxis]
x_step = (x.max() - x.min()) / (x.shape[0]-1)
y_step = (y.max() - y.min()) / (y.shape[0]-1)
z_step =(z.max() - z.min()) / (z.shape[0]-1)
# good cmap = px.colors.diverging.Spectral
# px.colors.sequential.Rainbow
# px.colors.sequential.Angset
X, Y, Z = np.mgrid[x.min():x.max()+x_step:x_step, y.min():y.max()+y_step:y_step, z.min():z.max()+z_step:z_step]
fig = go.Figure(data=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=values.flatten(),
cmin=values.min(),
cmax=values.max(),
opacity=0.3, # needs to be small to see through all surfaces
surface_count=20,
colorscale=px.colors.diverging.Spectral
))
fig.show()
def plot_disparity_3d(disparity, cmm = cm.viridis):
x = np.arange(0, disparity.shape[0])[:, np.newaxis]
y = np.arange(0, disparity.shape[1])[:, np.newaxis]
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.plot_surface(x,y.T, disparity, cmap = cmm)
def plot_images(imgs, titles, cmode = "gray", ncols= 4, hspace=0.5, wspace=0.5):
assert len(imgs) == len(titles)
n = len(imgs)
row_number = math.ceil(n / ncols)
fig = plt.subplots(figsize=[20, int(4*row_number)])
plt.subplots_adjust(hspace=hspace, wspace=wspace)
for i, img in enumerate(imgs):
ax = plt.subplot(row_number, ncols, i + 1)
ax.set_title("%s\n (%dx%d)" % (titles[i], img.shape[1], img.shape[0]))
plt.imshow(img, c | return (1,1) | conditional_block | |
plot_utils.py | _OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
x,y,z = data[x_label], data[y_label], data[metrix]
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(metrix)
ax.scatter(x,y, z, c=z, cmap = "viridis")
plt.show()
def polyine_3d(x_label, y_label, metrix, data, occl_counted=False):
scenes = data["scene"].unique()
X = np.array(data[x_label].unique())
Y= np.array(data[y_label].unique())
data = data[data["are_occlusions_errors"] == occl_counted]
data = data.sort_values(by=[x_label, y_label])
z = pd.pivot_table(data, values=[metrix], columns=[x_label], index=[y_label]).values
Z = np.nan_to_num(z, nan=2000)
verts = []
mins = []
for i in range(X.shape[0]):
current_column = Z[:, i]
min_loc, min_val = X[np.argmin(current_column)], current_column.min()
temp = list(zip(Y, current_column))
verts.append(temp)
mins.append((min_loc, min_val, X[i]))
stop_here = 1
poly = PolyCollection(verts, facecolors=[get_random_color() for x in X])
poly.set_alpha(1)
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
surf_x, surf_y = np.meshgrid(Y, X)
surf_z = np.empty((len(X), len(Y)))
surf_z[:, :] = data[metrix].min()
ax.plot_surface(surf_x, surf_y, surf_z, color=(0.3,0.3,0.9, 0.6))
ax.add_collection3d(poly, zs = X, zdir='y')
#annotating minimums for enhanced readibility
for x,z,y in mins:
label = 'min: %.2f (%d, %d)' % (z, x, y)
ax.text(x, y, z, label)
ax.set_xlabel(y_label)
ax.set_xlim3d(0, Y[-1])
ax.set_ylabel(x_label)
ax.set_ylim3d(0, X[-1])
ax.set_zlabel(metrix)
ax.set_zlim3d(0, 2000)
plt.grid()
plt.show()
return fig, ax
| def bar_3d_by_scenes(x_label, y_label, metrix, FILE_PATH_OR_DATAFRAME, occl_counted=False):
if (FILE_PATH_OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
scenes = data["scene"].unique()
data = data[data["are_occlusions_errors"]==occl_counted]
data = data.sort_values(by=[x_label, y_label])
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
colors = [get_random_color() for scene in scenes]
for x_param in data[x_label].unique():
temp_outer = data[data[x_label]==x_param]
for i, scene in enumerate(scenes):
temp_inner = temp_outer[(temp_outer["scene"]==scene)]
ax.bar(temp_inner[x_label], temp_inner[metrix], zs=temp_inner[y_label], zdir="y", color=colors[i], alpha=0.8)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(metrix)
plt.show()
def get_random_color(alpha=0.8):
r = np.random.rand()
g = np.random.rand()
b = np.random.rand()
return (r,g,b, alpha)
def plot_3d_results(x_label,y_label,metrix, FILE_PATH_OR_DATAFRAME, steps=None):
if(FILE_PATH_OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
x = np.array(data[x_label].unique())[:, np.newaxis]
y = np.array(data[y_label].unique())[:, np.newaxis]
z = pd.pivot_table(data, values=[metrix], columns=[x_label], index=[y_label]).values
z = np.nan_to_num(z, nan=1000000)
print("Z's shape is: {0}".format(z.shape))
x_diff_step = (x.max() - x.min()) / z.shape[1] if steps is None else steps[1]
y_diff_step = (y.max()-y.min())/z.shape[0] if steps is None else steps[0]
X = np.arange(x.min(), x.max()+1, x_diff_step)[:, np.newaxis]
Y = np.arange(y.min(), y.max() +1, y_diff_step)[:, np.newaxis]
X, Y = np.meshgrid(X, Y)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_zlim(data[metrix].min()-100, data[metrix].max()+100)
ax.plot_surface(X, Y, z, cmap=cm.tab20b)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(metrix)
plt.show()
row_with_min = data[metrix].idxmin()
min_row = data.loc[row_with_min]
return min_row
#it is 4d in reality
def plotly_4d_results(x_label,y_label, z_label, metrix, FILE_PATH_OR_DATAFRAME, ):
if(FILE_PATH_OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
data = data[[x_label, y_label, z_label, metrix]]
data = data.sort_values(by=[x_label, y_label, z_label], ascending = True)
values = np.nan_to_num(data[[metrix]].values)
x = np.array(data[x_label].unique())[:, np.newaxis]
y = np.array(data[y_label].unique())[:, np.newaxis]
z = np.array(data[z_label].unique())[:, np.newaxis]
x_step = (x.max() - x.min()) / (x.shape[0]-1)
y_step = (y.max() - y.min()) / (y.shape[0]-1)
z_step =(z.max() - z.min()) / (z.shape[0]-1)
# good cmap = px.colors.diverging.Spectral
# px.colors.sequential.Rainbow
# px.colors.sequential.Angset
X, Y, Z = np.mgrid[x.min():x.max()+x_step:x_step, y.min():y.max()+y_step:y_step, z.min():z.max()+z_step:z_step]
fig = go.Figure(data=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=values.flatten(),
cmin=values.min(),
cmax=values.max(),
opacity=0.3, # needs to be small to see through all surfaces
surface_count=20,
colorscale=px.colors.diverging.Spectral
))
fig.show()
def plot_disparity_3d(disparity, cmm = cm.viridis):
x = np.arange(0, disparity.shape[0])[:, np.newaxis]
y = np.arange(0, disparity.shape[1])[:, np.newaxis]
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.plot_surface(x,y.T, disparity, cmap = cmm)
def plot_images(imgs, titles, cmode = "gray", ncols= 4, hspace=0.5, wspace=0.5):
assert len(imgs) == len(titles)
n = len(imgs)
row_number = math.ceil(n / ncols)
fig = plt.subplots(figsize=[20, int(4*row_number)])
plt.subplots_adjust(hspace=hspace, wspace=wspace)
for i, img in enumerate(imgs):
ax = plt.subplot(row_number, ncols, i + 1)
ax.set_title("%s\n (%dx%d)" % (titles[i], img.shape[1], img.shape[0]))
plt.imshow(img, cmode)
return fig
if __name__ == "__main__":
import sys
import os
from components.utils import middlebury_utils as mbu
import project_helpers
sys.path.append(os.path.join("..", ".."))
ROOT_PATH = project_helpers.get_project_dir()
EXPERIMENT_TITLE = "EXP_000-Baseline"
DATASET_FOLDER = os.path.join(ROOT | random_line_split | |
plot_utils.py |
def scatter_3d_results(x_label, y_label, metrix, FILE_PATH_OR_DATAFRAME, cmm="viridis"):
if(FILE_PATH_OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
x,y,z = data[x_label], data[y_label], data[metrix]
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(metrix)
ax.scatter(x,y, z, c=z, cmap = "viridis")
plt.show()
def polyine_3d(x_label, y_label, metrix, data, occl_counted=False):
scenes = data["scene"].unique()
X = np.array(data[x_label].unique())
Y= np.array(data[y_label].unique())
data = data[data["are_occlusions_errors"] == occl_counted]
data = data.sort_values(by=[x_label, y_label])
z = pd.pivot_table(data, values=[metrix], columns=[x_label], index=[y_label]).values
Z = np.nan_to_num(z, nan=2000)
verts = []
mins = []
for i in range(X.shape[0]):
current_column = Z[:, i]
min_loc, min_val = X[np.argmin(current_column)], current_column.min()
temp = list(zip(Y, current_column))
verts.append(temp)
mins.append((min_loc, min_val, X[i]))
stop_here = 1
poly = PolyCollection(verts, facecolors=[get_random_color() for x in X])
poly.set_alpha(1)
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
surf_x, surf_y = np.meshgrid(Y, X)
surf_z = np.empty((len(X), len(Y)))
surf_z[:, :] = data[metrix].min()
ax.plot_surface(surf_x, surf_y, surf_z, color=(0.3,0.3,0.9, 0.6))
ax.add_collection3d(poly, zs = X, zdir='y')
#annotating minimums for enhanced readibility
for x,z,y in mins:
label = 'min: %.2f (%d, %d)' % (z, x, y)
ax.text(x, y, z, label)
ax.set_xlabel(y_label)
ax.set_xlim3d(0, Y[-1])
ax.set_ylabel(x_label)
ax.set_ylim3d(0, X[-1])
ax.set_zlabel(metrix)
ax.set_zlim3d(0, 2000)
plt.grid()
plt.show()
return fig, ax
def bar_3d_by_scenes(x_label, y_label, metrix, FILE_PATH_OR_DATAFRAME, occl_counted=False):
if (FILE_PATH_OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
scenes = data["scene"].unique()
data = data[data["are_occlusions_errors"]==occl_counted]
data = data.sort_values(by=[x_label, y_label])
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
colors = [get_random_color() for scene in scenes]
for x_param in data[x_label].unique():
temp_outer = data[data[x_label]==x_param]
for i, scene in enumerate(scenes):
temp_inner = temp_outer[(temp_outer["scene"]==scene)]
ax.bar(temp_inner[x_label], temp_inner[metrix], zs=temp_inner[y_label], zdir="y", color=colors[i], alpha=0.8)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(metrix)
plt.show()
def get_random_color(alpha=0.8):
r = np.random.rand()
g = np.random.rand()
b = np.random.rand()
return (r,g,b, alpha)
def plot_3d_results(x_label,y_label,metrix, FILE_PATH_OR_DATAFRAME, steps=None):
if(FILE_PATH_OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
x = np.array(data[x_label].unique())[:, np.newaxis]
y = np.array(data[y_label].unique())[:, np.newaxis]
z = pd.pivot_table(data, values=[metrix], columns=[x_label], index=[y_label]).values
z = np.nan_to_num(z, nan=1000000)
print("Z's shape is: {0}".format(z.shape))
x_diff_step = (x.max() - x.min()) / z.shape[1] if steps is None else steps[1]
y_diff_step = (y.max()-y.min())/z.shape[0] if steps is None else steps[0]
X = np.arange(x.min(), x.max()+1, x_diff_step)[:, np.newaxis]
Y = np.arange(y.min(), y.max() +1, y_diff_step)[:, np.newaxis]
X, Y = np.meshgrid(X, Y)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_zlim(data[metrix].min()-100, data[metrix].max()+100)
ax.plot_surface(X, Y, z, cmap=cm.tab20b)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(metrix)
plt.show()
row_with_min = data[metrix].idxmin()
min_row = data.loc[row_with_min]
return min_row
#it is 4d in reality
def plotly_4d_results(x_label,y_label, z_label, metrix, FILE_PATH_OR_DATAFRAME, ):
if(FILE_PATH_OR_DATAFRAME.__class__.__name__ == 'str'):
data = pd.read_csv(FILE_PATH_OR_DATAFRAME)
data.columns = np.array([str.strip(col) for col in data.columns])
else:
data = FILE_PATH_OR_DATAFRAME
data = data[[x_label, y_label, z_label, metrix]]
data = data.sort_values(by=[x_label, y_label, z_label], ascending = True)
values = np.nan_to_num(data[[metrix]].values)
x = np.array(data[x_label].unique())[:, np.newaxis]
y = np.array(data[y_label].unique())[:, np.newaxis]
z = np.array(data[z_label].unique())[:, np.newaxis]
x_step = (x.max() - x.min()) / (x.shape[0]-1)
y_step = (y.max() - y.min()) / (y.shape[0]-1)
z_step =(z.max() - z.min()) / (z.shape[0]-1)
# good cmap = px.colors.diverging.Spectral
# px.colors.sequential.Rainbow
# px.colors.sequential.Angset
X, Y, Z = np.mgrid[x.min():x.max()+x_step:x_step, y.min():y.max()+y_step:y_step, z.min():z.max()+z_step:z_step]
fig = go.Figure(data=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=values.flatten(),
cmin=values.min(),
cmax=values.max(),
opacity=0.3, # needs to be small to see through all surfaces
surface_count=20,
colorscale=px.colors.diverging.Spectral
))
fig.show()
def plot_disparity_3d(disparity, cmm = cm.viridis):
x = np.arange(0, disparity.shape[0])[:, np.newaxis]
y = np.arange(0, disparity.shape[1])[:, np.newaxis]
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.plot_surface(x,y.T, disparity, cmap = cmm)
def plot_images(imgs, titles, cmode = "gray", ncols= 4, hspace=0.5, wspace=0.5):
assert len(imgs) == len(titles)
n = len(imgs)
row_number = math.ceil(n / ncols)
fig = plt.subplots(figsize=[20, int(4*row_number)])
plt.subplots_adjust(hspace=hspace, wspace=wspace)
for i, img in enumerate(imgs):
ax = plt.subplot(row_number, ncols, i + 1)
ax.set_title("%s\n (%dx%d)" % ( | if next==0:
return (1,0)
if(current==next):
return (1,1)
#if it is brighter?
if(next>current):
return (np.abs(next-current)+1, 1)
#if it is darker?
return (1,np.abs(next - current)+1) | identifier_body | |
gui.py | () == '':
return 0
return 1
def apply(self):
print("apply hit")
print(str(self.new_course_ID.get()))
# self.parent.withdraw()
# TODO: Save this to the actual course data. Then draw window.
course_window(self.new_course_ID.get().strip(), self.new_course_name.get().strip())
class New_Student(simpledialog.Dialog): # inherit tkinter.simpledialog
def __init__(self, parent):
# inherited constructor needs original window
super().__init__(parent, title="Enter Student Information:")
def body(self, master):
tkinter.Label(master, text="Last Name").grid(
column=0, row=0, sticky='w')
tkinter.Label(master, text="First Name:").grid(
column=0, row=1)
self.last_name_entry = tkinter.Entry(master)
self.first_name_entry = tkinter.Entry(master)
self.last_name_entry.grid(column=1, row=0)
self.first_name_entry.grid(column=1, row=1)
return None
def validate(self):
if self.last_name_entry.get().strip() == '' or self.first_name_entry.get().strip() == '':
return 0
return 1
def apply(self):
print("apply hit")
self.last_name = self.last_name_entry.get().strip()
self.first_name = self.first_name_entry.get().strip()
class Section_Tree(ttk.Treeview): #table view. possibly rewrite with inheritance
def __init__(self, master, section=Course('MAC000', 'test_000').sectionList[0]):
# self.section_tree = section_tree
self.section = section
self.master = master
self.student_grade_list = self.section.student_grade_list
super().__init__(master)
#Tree view
#formatting columns
header_name_dict = {
# 'student_id':'ID',
'student_name': 'Name',
'attendance': 'Attendance',
'homework': 'Homework',
'quiz': 'Quiz',
'exam': 'Exam'
}
self['columns'] = list(header_name_dict.keys())
for key, value in header_name_dict.items():
self.column(key, width=50)
self.heading(key, text=value)
self.heading('#0', text='ID') #ID pertains to student ID
self.column('#0', width=40)
self.column('attendance', width=70)
self.column('homework', width=70)
self.column('student_name', width=180)
#inserting existing values from section
for student_grade in self.student_grade_list:
a, b, text, values = self.gen_child(student_grade)
#b, c, and d, e should be attendances, homeworks, quizzes, exams
self.insert(a, b, text=text, values=values)
def gen_child(self, student_grade, last_name = None, first_name = None):
#if options are provided, overwrite info with given info (should reflect in data as well)
if last_name != None and first_name != None:
student_grade['student'].last_name = last_name
student_grade['student'].first_name = first_name
student_grade['student'].updateLF()
student_ID = student_grade['student'].student_id
student_last = student_grade['student'].last_name
student_first = student_grade['student'].first_name
student_last_first = student_grade['student'].last_first
return ('', 'end', student_ID, (student_last_first , 'a', 'b', 'c', 'd'))
def add_student(self, last_name = 'test', first_name = 'name_man'):
# first gets the info. Then adds it to the section. then inserts the data into the tree.
# it's possible that the data may be reconstructed. so it's properly synced (need to decide best route)
# new_student = New_Student(self.master)
new_student = New_Student(self)
last_name = new_student.last_name
first_name = new_student.first_name
print('adding student:', last_name, first_name)
new_student_grade = self.section.addStudentGrade(last_name, first_name) #this adds a student_grade, not Student
print(new_student_grade)
a, b, text, values = self.gen_child(new_student_grade, last_name, first_name)
self.insert(a, b, text=text, values=values)
def edit_student(self):
# TODO: Have the whole student_grade be stored as a hidden value
focus = self.focus()
# print(focus[]
#this is dirty. doesn't save information properly
#Having the student as an argument would be best student would be best
l_f = self.item(focus)['values'][0].split(",")
last = l_f[0]
first = l_f[1]
print('test', l_f)
new_student = Edit_Student(self.master, last, first)
last_first = new_student.last_name + ', ' + new_student.first_name
self.item(focus, values=(last_first, new_student.att_avg, new_student.hw_avg, new_student.exam_avg, new_student.quiz_avg))
print(self.item(self.focus()))
class Edit_Student(simpledialog.Dialog): # inherit tkinter.simpledialog
def __init__(self, parent, last_name='l', first_name='f'):
# inherited constructor needs original window
self.last_name = last_name
self.first_name = first_name
super().__init__(parent, title="Edit Student Information:")
def body(self, master):
tkinter.Label(master, text="Last Name").grid(
column=0, row=0, sticky='e')
tkinter.Label(master, text="First Name:").grid(
column=0, row=1, sticky='e')
print(self.last_name, self.first_name)
self.last_name_entry = tkinter.Entry(master)
self.last_name_entry.insert(0, self.last_name)
self.first_name_entry = tkinter.Entry(master)
self.first_name_entry.insert(0, self.first_name)
self.last_name_entry.grid(column=1, row=0)
self.first_name_entry.grid(column=1, row=1)
tkinter.Label(master, text="Attendance\n(0 for absent, 1 for present):").grid(row=2)
tkinter.Label(master, text="Homework:\n(from 0 - 100)").grid(row=2, column=1)
tkinter.Label(master, text="Quiz:\n(from 0 - 100)").grid(row=2, column=2)
tkinter.Label(master, text="Exam:\n(from 0 - 100)").grid(row=2, column=3)
self.att_entry = []
for i in range(3,15):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column = 0)
self.att_entry.append(entry)
self.hw_entry = []
for i in range(3, 15):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column=1)
self.hw_entry.append(entry)
self.quiz_entry = []
for i in range(3, 15):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column=2)
self.quiz_entry.append(entry)
self.exam_entry = []
for i in range(3, 7):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column=3)
self.exam_entry.append(entry)
return None
def validate(self):
try:
for entry in self.att_entry:
value = int(entry.get().strip())
if value > 1 or value < 0:
return 0
for entry in self.hw_entry:
value = int(entry.get().strip())
if value > 100 or value < 0:
return 0
for entry in self.quiz_entry:
value = int(entry.get().strip())
if value > 100 or value < 0:
return 0
for entry in self.exam_entry:
|
except ValueError:
return 0
if self.last_name_entry.get().strip() == '' or self.first_name_entry.get().strip() == '':
return 0
return 1
def apply(self):
print("apply hit")
self.last_name = self.last_name_entry.get().strip()
self.first_name = self.first_name_entry.get().strip()
self.att_avg = 100 # reports back as percentage
att_sum = 0
for i in self.att_entry:
att_sum += int(i.get().strip())
print(att_sum)
self.att_avg = str(round(att_sum / len(self.att_entry) * 100, 2))+"%"
hw_sum = 0
for i in self.hw_entry:
hw_sum += int(i.get().strip()) / 100
self.hw_avg = str(round(hw_sum / len(self.hw_entry) * 100, 2))+"%"
quiz_sum = | value = int(entry.get().strip())
if value > 100 or value < 0:
return 0 | conditional_block |
gui.py | () == '':
return 0
return 1
def apply(self):
print("apply hit")
print(str(self.new_course_ID.get()))
# self.parent.withdraw()
# TODO: Save this to the actual course data. Then draw window.
course_window(self.new_course_ID.get().strip(), self.new_course_name.get().strip())
class New_Student(simpledialog.Dialog): # inherit tkinter.simpledialog
def __init__(self, parent):
# inherited constructor needs original window
super().__init__(parent, title="Enter Student Information:")
def body(self, master):
tkinter.Label(master, text="Last Name").grid(
column=0, row=0, sticky='w')
tkinter.Label(master, text="First Name:").grid(
column=0, row=1)
self.last_name_entry = tkinter.Entry(master)
self.first_name_entry = tkinter.Entry(master)
self.last_name_entry.grid(column=1, row=0)
self.first_name_entry.grid(column=1, row=1)
return None
def validate(self):
if self.last_name_entry.get().strip() == '' or self.first_name_entry.get().strip() == '':
return 0
return 1
def apply(self):
print("apply hit")
self.last_name = self.last_name_entry.get().strip()
self.first_name = self.first_name_entry.get().strip()
class Section_Tree(ttk.Treeview): #table view. possibly rewrite with inheritance
def __init__(self, master, section=Course('MAC000', 'test_000').sectionList[0]):
# self.section_tree = section_tree
self.section = section
self.master = master
self.student_grade_list = self.section.student_grade_list
super().__init__(master)
#Tree view
#formatting columns
header_name_dict = {
# 'student_id':'ID',
'student_name': 'Name',
'attendance': 'Attendance',
'homework': 'Homework',
'quiz': 'Quiz',
'exam': 'Exam'
}
self['columns'] = list(header_name_dict.keys())
for key, value in header_name_dict.items():
self.column(key, width=50)
self.heading(key, text=value)
self.heading('#0', text='ID') #ID pertains to student ID
self.column('#0', width=40)
self.column('attendance', width=70)
self.column('homework', width=70)
self.column('student_name', width=180)
#inserting existing values from section
for student_grade in self.student_grade_list:
a, b, text, values = self.gen_child(student_grade)
#b, c, and d, e should be attendances, homeworks, quizzes, exams
self.insert(a, b, text=text, values=values)
def gen_child(self, student_grade, last_name = None, first_name = None):
#if options are provided, overwrite info with given info (should reflect in data as well)
if last_name != None and first_name != None:
student_grade['student'].last_name = last_name
student_grade['student'].first_name = first_name
student_grade['student'].updateLF()
student_ID = student_grade['student'].student_id
student_last = student_grade['student'].last_name
student_first = student_grade['student'].first_name
student_last_first = student_grade['student'].last_first
return ('', 'end', student_ID, (student_last_first , 'a', 'b', 'c', 'd'))
def add_student(self, last_name = 'test', first_name = 'name_man'):
# first gets the info. Then adds it to the section. then inserts the data into the tree.
# it's possible that the data may be reconstructed. so it's properly synced (need to decide best route)
# new_student = New_Student(self.master)
new_student = New_Student(self)
last_name = new_student.last_name
first_name = new_student.first_name
print('adding student:', last_name, first_name)
new_student_grade = self.section.addStudentGrade(last_name, first_name) #this adds a student_grade, not Student
print(new_student_grade)
a, b, text, values = self.gen_child(new_student_grade, last_name, first_name)
self.insert(a, b, text=text, values=values)
def edit_student(self):
# TODO: Have the whole student_grade be stored as a hidden value
focus = self.focus()
# print(focus[]
#this is dirty. doesn't save information properly
#Having the student as an argument would be best student would be best
l_f = self.item(focus)['values'][0].split(",")
last = l_f[0]
first = l_f[1]
print('test', l_f)
new_student = Edit_Student(self.master, last, first)
last_first = new_student.last_name + ', ' + new_student.first_name
self.item(focus, values=(last_first, new_student.att_avg, new_student.hw_avg, new_student.exam_avg, new_student.quiz_avg))
print(self.item(self.focus()))
class Edit_Student(simpledialog.Dialog): # inherit tkinter.simpledialog
def __init__(self, parent, last_name='l', first_name='f'):
# inherited constructor needs original window
self.last_name = last_name
self.first_name = first_name
super().__init__(parent, title="Edit Student Information:")
def body(self, master):
tkinter.Label(master, text="Last Name").grid(
column=0, row=0, sticky='e')
tkinter.Label(master, text="First Name:").grid(
column=0, row=1, sticky='e')
print(self.last_name, self.first_name)
self.last_name_entry = tkinter.Entry(master)
self.last_name_entry.insert(0, self.last_name)
self.first_name_entry = tkinter.Entry(master)
self.first_name_entry.insert(0, self.first_name)
self.last_name_entry.grid(column=1, row=0)
self.first_name_entry.grid(column=1, row=1)
tkinter.Label(master, text="Attendance\n(0 for absent, 1 for present):").grid(row=2)
tkinter.Label(master, text="Homework:\n(from 0 - 100)").grid(row=2, column=1)
tkinter.Label(master, text="Quiz:\n(from 0 - 100)").grid(row=2, column=2)
tkinter.Label(master, text="Exam:\n(from 0 - 100)").grid(row=2, column=3)
self.att_entry = []
for i in range(3,15):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column = 0)
self.att_entry.append(entry)
self.hw_entry = []
for i in range(3, 15):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column=1)
self.hw_entry.append(entry)
self.quiz_entry = []
for i in range(3, 15):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column=2)
self.quiz_entry.append(entry)
self.exam_entry = []
for i in range(3, 7):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column=3)
self.exam_entry.append(entry)
return None
def validate(self):
try:
for entry in self.att_entry:
value = int(entry.get().strip())
if value > 1 or value < 0:
return 0
for entry in self.hw_entry:
value = int(entry.get().strip())
if value > 100 or value < 0:
return 0
for entry in self.quiz_entry:
value = int(entry.get().strip())
if value > 100 or value < 0:
return 0
for entry in self.exam_entry:
value = int(entry.get().strip())
if value > 100 or value < 0:
return 0
except ValueError:
return 0
if self.last_name_entry.get().strip() == '' or self.first_name_entry.get().strip() == '':
return 0
return 1
def | (self):
print("apply hit")
self.last_name = self.last_name_entry.get().strip()
self.first_name = self.first_name_entry.get().strip()
self.att_avg = 100 # reports back as percentage
att_sum = 0
for i in self.att_entry:
att_sum += int(i.get().strip())
print(att_sum)
self.att_avg = str(round(att_sum / len(self.att_entry) * 100, 2))+"%"
hw_sum = 0
for i in self.hw_entry:
hw_sum += int(i.get().strip()) / 100
self.hw_avg = str(round(hw_sum / len(self.hw_entry) * 100, 2))+"%"
quiz_sum | apply | identifier_name |
gui.py | () == '':
return 0
return 1
def apply(self):
print("apply hit")
print(str(self.new_course_ID.get()))
# self.parent.withdraw()
# TODO: Save this to the actual course data. Then draw window.
course_window(self.new_course_ID.get().strip(), self.new_course_name.get().strip())
class New_Student(simpledialog.Dialog): # inherit tkinter.simpledialog
def __init__(self, parent):
# inherited constructor needs original window |
tkinter.Label(master, text="Last Name").grid(
column=0, row=0, sticky='w')
tkinter.Label(master, text="First Name:").grid(
column=0, row=1)
self.last_name_entry = tkinter.Entry(master)
self.first_name_entry = tkinter.Entry(master)
self.last_name_entry.grid(column=1, row=0)
self.first_name_entry.grid(column=1, row=1)
return None
def validate(self):
if self.last_name_entry.get().strip() == '' or self.first_name_entry.get().strip() == '':
return 0
return 1
def apply(self):
print("apply hit")
self.last_name = self.last_name_entry.get().strip()
self.first_name = self.first_name_entry.get().strip()
class Section_Tree(ttk.Treeview): #table view. possibly rewrite with inheritance
def __init__(self, master, section=Course('MAC000', 'test_000').sectionList[0]):
# self.section_tree = section_tree
self.section = section
self.master = master
self.student_grade_list = self.section.student_grade_list
super().__init__(master)
#Tree view
#formatting columns
header_name_dict = {
# 'student_id':'ID',
'student_name': 'Name',
'attendance': 'Attendance',
'homework': 'Homework',
'quiz': 'Quiz',
'exam': 'Exam'
}
self['columns'] = list(header_name_dict.keys())
for key, value in header_name_dict.items():
self.column(key, width=50)
self.heading(key, text=value)
self.heading('#0', text='ID') #ID pertains to student ID
self.column('#0', width=40)
self.column('attendance', width=70)
self.column('homework', width=70)
self.column('student_name', width=180)
#inserting existing values from section
for student_grade in self.student_grade_list:
a, b, text, values = self.gen_child(student_grade)
#b, c, and d, e should be attendances, homeworks, quizzes, exams
self.insert(a, b, text=text, values=values)
def gen_child(self, student_grade, last_name = None, first_name = None):
#if options are provided, overwrite info with given info (should reflect in data as well)
if last_name != None and first_name != None:
student_grade['student'].last_name = last_name
student_grade['student'].first_name = first_name
student_grade['student'].updateLF()
student_ID = student_grade['student'].student_id
student_last = student_grade['student'].last_name
student_first = student_grade['student'].first_name
student_last_first = student_grade['student'].last_first
return ('', 'end', student_ID, (student_last_first , 'a', 'b', 'c', 'd'))
def add_student(self, last_name = 'test', first_name = 'name_man'):
# first gets the info. Then adds it to the section. then inserts the data into the tree.
# it's possible that the data may be reconstructed. so it's properly synced (need to decide best route)
# new_student = New_Student(self.master)
new_student = New_Student(self)
last_name = new_student.last_name
first_name = new_student.first_name
print('adding student:', last_name, first_name)
new_student_grade = self.section.addStudentGrade(last_name, first_name) #this adds a student_grade, not Student
print(new_student_grade)
a, b, text, values = self.gen_child(new_student_grade, last_name, first_name)
self.insert(a, b, text=text, values=values)
def edit_student(self):
# TODO: Have the whole student_grade be stored as a hidden value
focus = self.focus()
# print(focus[]
#this is dirty. doesn't save information properly
#Having the student as an argument would be best student would be best
l_f = self.item(focus)['values'][0].split(",")
last = l_f[0]
first = l_f[1]
print('test', l_f)
new_student = Edit_Student(self.master, last, first)
last_first = new_student.last_name + ', ' + new_student.first_name
self.item(focus, values=(last_first, new_student.att_avg, new_student.hw_avg, new_student.exam_avg, new_student.quiz_avg))
print(self.item(self.focus()))
class Edit_Student(simpledialog.Dialog): # inherit tkinter.simpledialog
def __init__(self, parent, last_name='l', first_name='f'):
# inherited constructor needs original window
self.last_name = last_name
self.first_name = first_name
super().__init__(parent, title="Edit Student Information:")
def body(self, master):
tkinter.Label(master, text="Last Name").grid(
column=0, row=0, sticky='e')
tkinter.Label(master, text="First Name:").grid(
column=0, row=1, sticky='e')
print(self.last_name, self.first_name)
self.last_name_entry = tkinter.Entry(master)
self.last_name_entry.insert(0, self.last_name)
self.first_name_entry = tkinter.Entry(master)
self.first_name_entry.insert(0, self.first_name)
self.last_name_entry.grid(column=1, row=0)
self.first_name_entry.grid(column=1, row=1)
tkinter.Label(master, text="Attendance\n(0 for absent, 1 for present):").grid(row=2)
tkinter.Label(master, text="Homework:\n(from 0 - 100)").grid(row=2, column=1)
tkinter.Label(master, text="Quiz:\n(from 0 - 100)").grid(row=2, column=2)
tkinter.Label(master, text="Exam:\n(from 0 - 100)").grid(row=2, column=3)
self.att_entry = []
for i in range(3,15):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column = 0)
self.att_entry.append(entry)
self.hw_entry = []
for i in range(3, 15):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column=1)
self.hw_entry.append(entry)
self.quiz_entry = []
for i in range(3, 15):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column=2)
self.quiz_entry.append(entry)
self.exam_entry = []
for i in range(3, 7):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column=3)
self.exam_entry.append(entry)
return None
def validate(self):
try:
for entry in self.att_entry:
value = int(entry.get().strip())
if value > 1 or value < 0:
return 0
for entry in self.hw_entry:
value = int(entry.get().strip())
if value > 100 or value < 0:
return 0
for entry in self.quiz_entry:
value = int(entry.get().strip())
if value > 100 or value < 0:
return 0
for entry in self.exam_entry:
value = int(entry.get().strip())
if value > 100 or value < 0:
return 0
except ValueError:
return 0
if self.last_name_entry.get().strip() == '' or self.first_name_entry.get().strip() == '':
return 0
return 1
def apply(self):
print("apply hit")
self.last_name = self.last_name_entry.get().strip()
self.first_name = self.first_name_entry.get().strip()
self.att_avg = 100 # reports back as percentage
att_sum = 0
for i in self.att_entry:
att_sum += int(i.get().strip())
print(att_sum)
self.att_avg = str(round(att_sum / len(self.att_entry) * 100, 2))+"%"
hw_sum = 0
for i in self.hw_entry:
hw_sum += int(i.get().strip()) / 100
self.hw_avg = str(round(hw_sum / len(self.hw_entry) * 100, 2))+"%"
quiz_sum = | super().__init__(parent, title="Enter Student Information:")
def body(self, master): | random_line_split |
gui.py | () == '':
return 0
return 1
def apply(self):
print("apply hit")
print(str(self.new_course_ID.get()))
# self.parent.withdraw()
# TODO: Save this to the actual course data. Then draw window.
course_window(self.new_course_ID.get().strip(), self.new_course_name.get().strip())
class New_Student(simpledialog.Dialog): # inherit tkinter.simpledialog
def __init__(self, parent):
# inherited constructor needs original window
super().__init__(parent, title="Enter Student Information:")
def body(self, master):
tkinter.Label(master, text="Last Name").grid(
column=0, row=0, sticky='w')
tkinter.Label(master, text="First Name:").grid(
column=0, row=1)
self.last_name_entry = tkinter.Entry(master)
self.first_name_entry = tkinter.Entry(master)
self.last_name_entry.grid(column=1, row=0)
self.first_name_entry.grid(column=1, row=1)
return None
def validate(self):
if self.last_name_entry.get().strip() == '' or self.first_name_entry.get().strip() == '':
return 0
return 1
def apply(self):
|
class Section_Tree(ttk.Treeview): #table view. possibly rewrite with inheritance
def __init__(self, master, section=Course('MAC000', 'test_000').sectionList[0]):
# self.section_tree = section_tree
self.section = section
self.master = master
self.student_grade_list = self.section.student_grade_list
super().__init__(master)
#Tree view
#formatting columns
header_name_dict = {
# 'student_id':'ID',
'student_name': 'Name',
'attendance': 'Attendance',
'homework': 'Homework',
'quiz': 'Quiz',
'exam': 'Exam'
}
self['columns'] = list(header_name_dict.keys())
for key, value in header_name_dict.items():
self.column(key, width=50)
self.heading(key, text=value)
self.heading('#0', text='ID') #ID pertains to student ID
self.column('#0', width=40)
self.column('attendance', width=70)
self.column('homework', width=70)
self.column('student_name', width=180)
#inserting existing values from section
for student_grade in self.student_grade_list:
a, b, text, values = self.gen_child(student_grade)
#b, c, and d, e should be attendances, homeworks, quizzes, exams
self.insert(a, b, text=text, values=values)
def gen_child(self, student_grade, last_name = None, first_name = None):
#if options are provided, overwrite info with given info (should reflect in data as well)
if last_name != None and first_name != None:
student_grade['student'].last_name = last_name
student_grade['student'].first_name = first_name
student_grade['student'].updateLF()
student_ID = student_grade['student'].student_id
student_last = student_grade['student'].last_name
student_first = student_grade['student'].first_name
student_last_first = student_grade['student'].last_first
return ('', 'end', student_ID, (student_last_first , 'a', 'b', 'c', 'd'))
def add_student(self, last_name = 'test', first_name = 'name_man'):
# first gets the info. Then adds it to the section. then inserts the data into the tree.
# it's possible that the data may be reconstructed. so it's properly synced (need to decide best route)
# new_student = New_Student(self.master)
new_student = New_Student(self)
last_name = new_student.last_name
first_name = new_student.first_name
print('adding student:', last_name, first_name)
new_student_grade = self.section.addStudentGrade(last_name, first_name) #this adds a student_grade, not Student
print(new_student_grade)
a, b, text, values = self.gen_child(new_student_grade, last_name, first_name)
self.insert(a, b, text=text, values=values)
def edit_student(self):
# TODO: Have the whole student_grade be stored as a hidden value
focus = self.focus()
# print(focus[]
#this is dirty. doesn't save information properly
#Having the student as an argument would be best student would be best
l_f = self.item(focus)['values'][0].split(",")
last = l_f[0]
first = l_f[1]
print('test', l_f)
new_student = Edit_Student(self.master, last, first)
last_first = new_student.last_name + ', ' + new_student.first_name
self.item(focus, values=(last_first, new_student.att_avg, new_student.hw_avg, new_student.exam_avg, new_student.quiz_avg))
print(self.item(self.focus()))
class Edit_Student(simpledialog.Dialog): # inherit tkinter.simpledialog
def __init__(self, parent, last_name='l', first_name='f'):
# inherited constructor needs original window
self.last_name = last_name
self.first_name = first_name
super().__init__(parent, title="Edit Student Information:")
def body(self, master):
tkinter.Label(master, text="Last Name").grid(
column=0, row=0, sticky='e')
tkinter.Label(master, text="First Name:").grid(
column=0, row=1, sticky='e')
print(self.last_name, self.first_name)
self.last_name_entry = tkinter.Entry(master)
self.last_name_entry.insert(0, self.last_name)
self.first_name_entry = tkinter.Entry(master)
self.first_name_entry.insert(0, self.first_name)
self.last_name_entry.grid(column=1, row=0)
self.first_name_entry.grid(column=1, row=1)
tkinter.Label(master, text="Attendance\n(0 for absent, 1 for present):").grid(row=2)
tkinter.Label(master, text="Homework:\n(from 0 - 100)").grid(row=2, column=1)
tkinter.Label(master, text="Quiz:\n(from 0 - 100)").grid(row=2, column=2)
tkinter.Label(master, text="Exam:\n(from 0 - 100)").grid(row=2, column=3)
self.att_entry = []
for i in range(3,15):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column = 0)
self.att_entry.append(entry)
self.hw_entry = []
for i in range(3, 15):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column=1)
self.hw_entry.append(entry)
self.quiz_entry = []
for i in range(3, 15):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column=2)
self.quiz_entry.append(entry)
self.exam_entry = []
for i in range(3, 7):
entry = tkinter.Entry(master)
entry.insert(0, '0')
entry.grid(row=i, column=3)
self.exam_entry.append(entry)
return None
def validate(self):
try:
for entry in self.att_entry:
value = int(entry.get().strip())
if value > 1 or value < 0:
return 0
for entry in self.hw_entry:
value = int(entry.get().strip())
if value > 100 or value < 0:
return 0
for entry in self.quiz_entry:
value = int(entry.get().strip())
if value > 100 or value < 0:
return 0
for entry in self.exam_entry:
value = int(entry.get().strip())
if value > 100 or value < 0:
return 0
except ValueError:
return 0
if self.last_name_entry.get().strip() == '' or self.first_name_entry.get().strip() == '':
return 0
return 1
def apply(self):
print("apply hit")
self.last_name = self.last_name_entry.get().strip()
self.first_name = self.first_name_entry.get().strip()
self.att_avg = 100 # reports back as percentage
att_sum = 0
for i in self.att_entry:
att_sum += int(i.get().strip())
print(att_sum)
self.att_avg = str(round(att_sum / len(self.att_entry) * 100, 2))+"%"
hw_sum = 0
for i in self.hw_entry:
hw_sum += int(i.get().strip()) / 100
self.hw_avg = str(round(hw_sum / len(self.hw_entry) * 100, 2))+"%"
quiz_sum | print("apply hit")
self.last_name = self.last_name_entry.get().strip()
self.first_name = self.first_name_entry.get().strip() | identifier_body |
txn.go | }
}
txn.queuing = queueDuration(h, txn.start)
}
txn.attrs.agent.HostDisplayName = txn.Config.HostDisplayName
return txn
}
func (txn *txn) txnEventsEnabled() bool {
return txn.Config.TransactionEvents.Enabled &&
txn.Reply.CollectAnalyticsEvents
}
func (txn *txn) errorEventsEnabled() bool {
return txn.Config.ErrorCollector.CaptureEvents &&
txn.Reply.CollectErrorEvents
}
func (txn *txn) freezeName() {
if txn.ignore || ("" != txn.finalName) {
return
}
txn.finalName = CreateFullTxnName(txn.name, txn.Reply, txn.isWeb)
if "" == txn.finalName {
txn.ignore = true
}
}
func (txn *txn) getsApdex() bool {
return txn.isWeb
}
type createTxnMetricsArgs struct {
isWeb bool
duration time.Duration
exclusive time.Duration
name string
zone apdexZone
apdexThreshold time.Duration
errorsSeen uint64
}
func createTxnMetrics(args createTxnMetricsArgs, metrics *metricTable) {
// Duration Metrics
rollup := backgroundRollup
if args.isWeb {
rollup = webRollup
metrics.addDuration(dispatcherMetric, "", args.duration, 0, forced)
}
metrics.addDuration(args.name, "", args.duration, args.exclusive, forced)
metrics.addDuration(rollup, "", args.duration, args.exclusive, forced)
// Apdex Metrics
if args.zone != apdexNone {
metrics.addApdex(apdexRollup, "", args.apdexThreshold, args.zone, forced)
mname := apdexPrefix + removeFirstSegment(args.name)
metrics.addApdex(mname, "", args.apdexThreshold, args.zone, unforced)
}
// Error Metrics
if args.errorsSeen > 0 {
metrics.addSingleCount(errorsAll, forced)
if args.isWeb {
metrics.addSingleCount(errorsWeb, forced)
} else {
metrics.addSingleCount(errorsBackground, forced)
}
metrics.addSingleCount(errorsPrefix+args.name, forced)
}
}
func (txn *txn) mergeIntoHarvest(h *harvest) {
exclusive := time.Duration(0)
children := tracerRootChildren(&txn.tracer)
if txn.duration > children {
exclusive = txn.duration - children
}
createTxnMetrics(createTxnMetricsArgs{
isWeb: txn.isWeb,
duration: txn.duration,
exclusive: exclusive,
name: txn.finalName,
zone: txn.zone,
apdexThreshold: txn.apdexThreshold,
errorsSeen: txn.errorsSeen,
}, h.metrics)
if txn.queuing > 0 {
h.metrics.addDuration(queueMetric, "", txn.queuing, txn.queuing, forced)
}
mergeBreakdownMetrics(&txn.tracer, h.metrics, txn.finalName, txn.isWeb)
if txn.txnEventsEnabled() {
h.txnEvents.AddTxnEvent(&txnEvent{
Name: txn.finalName,
Timestamp: txn.start,
Duration: txn.duration,
queuing: txn.queuing,
zone: txn.zone,
attrs: txn.attrs,
datastoreExternalTotals: txn.tracer.datastoreExternalTotals,
})
}
requestURI := ""
if nil != txn.Request && nil != txn.Request.URL {
requestURI = safeURL(txn.Request.URL)
}
mergeTxnErrors(h.errorTraces, txn.errors, txn.finalName, requestURI, txn.attrs)
if txn.errorEventsEnabled() {
for _, e := range txn.errors {
h.errorEvents.Add(&errorEvent{
klass: e.klass,
msg: e.msg,
when: e.when,
txnName: txn.finalName,
duration: txn.duration,
queuing: txn.queuing,
attrs: txn.attrs,
datastoreExternalTotals: txn.tracer.datastoreExternalTotals,
})
}
}
}
func responseCodeIsError(cfg *api.Config, code int) bool {
if code < http.StatusBadRequest { // 400
return false
}
for _, ignoreCode := range cfg.ErrorCollector.IgnoreStatusCodes {
if code == ignoreCode {
return false
}
}
return true
}
var (
// statusCodeLookup avoids a strconv.Itoa call.
statusCodeLookup = map[int]string{
100: "100", 101: "101",
200: "200", 201: "201", 202: "202", 203: "203", 204: "204", 205: "205", 206: "206",
300: "300", 301: "301", 302: "302", 303: "303", 304: "304", 305: "305", 307: "307",
400: "400", 401: "401", 402: "402", 403: "403", 404: "404", 405: "405", 406: "406",
407: "407", 408: "408", 409: "409", 410: "410", 411: "411", 412: "412", 413: "413",
414: "414", 415: "415", 416: "416", 417: "417", 418: "418", 428: "428", 429: "429",
431: "431", 451: "451",
500: "500", 501: "501", 502: "502", 503: "503", 504: "504", 505: "505", 511: "511",
}
)
func headersJustWritten(txn *txn, code int) {
if txn.finished {
return
}
if txn.wroteHeader {
return
}
txn.wroteHeader = true
h := txn.W.Header()
txn.attrs.agent.ResponseHeadersContentType = h.Get("Content-Type")
if val := h.Get("Content-Length"); "" != val {
if x, err := strconv.Atoi(val); nil == err {
txn.attrs.agent.ResponseHeadersContentLength = x
}
}
txn.attrs.agent.ResponseCode = statusCodeLookup[code]
if txn.attrs.agent.ResponseCode == "" {
txn.attrs.agent.ResponseCode = strconv.Itoa(code)
}
if responseCodeIsError(&txn.Config, code) {
e := txnErrorFromResponseCode(code)
e.stack = getStackTrace(1)
txn.noticeErrorInternal(e)
}
}
func (txn *txn) Header() http.Header { return txn.W.Header() }
func (txn *txn) Write(b []byte) (int, error) {
n, err := txn.W.Write(b)
txn.Lock()
defer txn.Unlock()
headersJustWritten(txn, http.StatusOK)
return n, err
}
func (txn *txn) WriteHeader(code int) {
txn.W.WriteHeader(code)
txn.Lock()
defer txn.Unlock()
headersJustWritten(txn, code)
}
var (
// ErrAlreadyEnded is returned by public txn methods if End() has
// already been called.
ErrAlreadyEnded = errors.New("transaction has already ended")
)
func (txn *txn) End() error {
txn.Lock()
defer txn.Unlock()
if txn.finished {
return ErrAlreadyEnded
}
txn.finished = true
r := recover()
if nil != r {
e := txnErrorFromPanic(r)
e.stack = getStackTrace(0)
txn.noticeErrorInternal | {
txn := &txn{
txnInput: input,
start: time.Now(),
name: name,
isWeb: nil != input.Request,
attrs: newAttributes(input.attrConfig),
}
if nil != txn.Request {
h := input.Request.Header
txn.attrs.agent.RequestMethod = input.Request.Method
txn.attrs.agent.RequestAcceptHeader = h.Get("Accept")
txn.attrs.agent.RequestContentType = h.Get("Content-Type")
txn.attrs.agent.RequestHeadersHost = h.Get("Host")
txn.attrs.agent.RequestHeadersUserAgent = h.Get("User-Agent")
txn.attrs.agent.RequestHeadersReferer = safeURLFromString(h.Get("Referer"))
if cl := h.Get("Content-Length"); "" != cl {
if x, err := strconv.Atoi(cl); nil == err {
txn.attrs.agent.RequestContentLength = x | identifier_body | |
txn.go | ("Host")
txn.attrs.agent.RequestHeadersUserAgent = h.Get("User-Agent")
txn.attrs.agent.RequestHeadersReferer = safeURLFromString(h.Get("Referer"))
if cl := h.Get("Content-Length"); "" != cl {
if x, err := strconv.Atoi(cl); nil == err {
txn.attrs.agent.RequestContentLength = x
}
}
txn.queuing = queueDuration(h, txn.start)
}
txn.attrs.agent.HostDisplayName = txn.Config.HostDisplayName
return txn
}
func (txn *txn) txnEventsEnabled() bool {
return txn.Config.TransactionEvents.Enabled &&
txn.Reply.CollectAnalyticsEvents
}
func (txn *txn) errorEventsEnabled() bool {
return txn.Config.ErrorCollector.CaptureEvents &&
txn.Reply.CollectErrorEvents
}
func (txn *txn) freezeName() {
if txn.ignore || ("" != txn.finalName) {
return
}
txn.finalName = CreateFullTxnName(txn.name, txn.Reply, txn.isWeb)
if "" == txn.finalName {
txn.ignore = true
}
}
func (txn *txn) getsApdex() bool {
return txn.isWeb
}
type createTxnMetricsArgs struct {
isWeb bool
duration time.Duration
exclusive time.Duration
name string
zone apdexZone
apdexThreshold time.Duration
errorsSeen uint64
}
func createTxnMetrics(args createTxnMetricsArgs, metrics *metricTable) {
// Duration Metrics
rollup := backgroundRollup
if args.isWeb {
rollup = webRollup
metrics.addDuration(dispatcherMetric, "", args.duration, 0, forced)
}
metrics.addDuration(args.name, "", args.duration, args.exclusive, forced)
metrics.addDuration(rollup, "", args.duration, args.exclusive, forced)
// Apdex Metrics
if args.zone != apdexNone {
metrics.addApdex(apdexRollup, "", args.apdexThreshold, args.zone, forced)
mname := apdexPrefix + removeFirstSegment(args.name)
metrics.addApdex(mname, "", args.apdexThreshold, args.zone, unforced)
}
// Error Metrics
if args.errorsSeen > 0 {
metrics.addSingleCount(errorsAll, forced)
if args.isWeb {
metrics.addSingleCount(errorsWeb, forced)
} else {
metrics.addSingleCount(errorsBackground, forced)
}
metrics.addSingleCount(errorsPrefix+args.name, forced)
}
}
func (txn *txn) mergeIntoHarvest(h *harvest) {
exclusive := time.Duration(0)
children := tracerRootChildren(&txn.tracer)
if txn.duration > children {
exclusive = txn.duration - children
}
createTxnMetrics(createTxnMetricsArgs{
isWeb: txn.isWeb,
duration: txn.duration,
exclusive: exclusive,
name: txn.finalName,
zone: txn.zone,
apdexThreshold: txn.apdexThreshold,
errorsSeen: txn.errorsSeen,
}, h.metrics)
if txn.queuing > 0 {
h.metrics.addDuration(queueMetric, "", txn.queuing, txn.queuing, forced)
}
mergeBreakdownMetrics(&txn.tracer, h.metrics, txn.finalName, txn.isWeb)
if txn.txnEventsEnabled() {
h.txnEvents.AddTxnEvent(&txnEvent{
Name: txn.finalName,
Timestamp: txn.start,
Duration: txn.duration,
queuing: txn.queuing,
zone: txn.zone,
attrs: txn.attrs,
datastoreExternalTotals: txn.tracer.datastoreExternalTotals,
})
}
requestURI := ""
if nil != txn.Request && nil != txn.Request.URL {
requestURI = safeURL(txn.Request.URL)
}
mergeTxnErrors(h.errorTraces, txn.errors, txn.finalName, requestURI, txn.attrs)
if txn.errorEventsEnabled() {
for _, e := range txn.errors {
h.errorEvents.Add(&errorEvent{
klass: e.klass,
msg: e.msg,
when: e.when,
txnName: txn.finalName,
duration: txn.duration,
queuing: txn.queuing,
attrs: txn.attrs,
datastoreExternalTotals: txn.tracer.datastoreExternalTotals,
})
}
}
}
func responseCodeIsError(cfg *api.Config, code int) bool {
if code < http.StatusBadRequest { // 400
return false
}
for _, ignoreCode := range cfg.ErrorCollector.IgnoreStatusCodes {
if code == ignoreCode {
return false
}
}
return true
}
var (
// statusCodeLookup avoids a strconv.Itoa call.
statusCodeLookup = map[int]string{
100: "100", 101: "101",
200: "200", 201: "201", 202: "202", 203: "203", 204: "204", 205: "205", 206: "206",
300: "300", 301: "301", 302: "302", 303: "303", 304: "304", 305: "305", 307: "307",
400: "400", 401: "401", 402: "402", 403: "403", 404: "404", 405: "405", 406: "406",
407: "407", 408: "408", 409: "409", 410: "410", 411: "411", 412: "412", 413: "413",
414: "414", 415: "415", 416: "416", 417: "417", 418: "418", 428: "428", 429: "429",
431: "431", 451: "451",
500: "500", 501: "501", 502: "502", 503: "503", 504: "504", 505: "505", 511: "511",
}
)
func headersJustWritten(txn *txn, code int) {
if txn.finished {
return
}
if txn.wroteHeader {
return
}
txn.wroteHeader = true
h := txn.W.Header()
txn.attrs.agent.ResponseHeadersContentType = h.Get("Content-Type") | }
txn.attrs.agent.ResponseCode = statusCodeLookup[code]
if txn.attrs.agent.ResponseCode == "" {
txn.attrs.agent.ResponseCode = strconv.Itoa(code)
}
if responseCodeIsError(&txn.Config, code) {
e := txnErrorFromResponseCode(code)
e.stack = getStackTrace(1)
txn.noticeErrorInternal(e)
}
}
func (txn *txn) Header() http.Header { return txn.W.Header() }
func (txn *txn) Write(b []byte) (int, error) {
n, err := txn.W.Write(b)
txn.Lock()
defer txn.Unlock()
headersJustWritten(txn, http.StatusOK)
return n, err
}
func (txn *txn) WriteHeader(code int) {
txn.W.WriteHeader(code)
txn.Lock()
defer txn.Unlock()
headersJustWritten(txn, code)
}
var (
// ErrAlreadyEnded is returned by public txn methods if End() has
// already been called.
ErrAlreadyEnded = errors.New("transaction has already ended")
)
func (txn *txn) End() error {
txn.Lock()
defer txn.Unlock()
if txn.finished {
return ErrAlreadyEnded
}
txn.finished = true
r := recover()
if nil != r {
e := txnErrorFromPanic(r)
e.stack = getStackTrace(0)
txn.noticeErrorInternal(e)
}
txn.stop = time.Now()
txn.duration = txn.stop.Sub(txn.start)
txn.freezeName()
if txn.getsApdex() {
txn.apdexThreshold = calculateApdexThreshold(txn.Reply, txn.finalName)
if txn.errorsSeen > 0 {
txn.zone = apdexFailing
} else {
txn.zone = calculateApdexZone(txn.apdexThreshold, txn.duration)
}
} else {
txn.zone = apdex |
if val := h.Get("Content-Length"); "" != val {
if x, err := strconv.Atoi(val); nil == err {
txn.attrs.agent.ResponseHeadersContentLength = x
} | random_line_split |
txn.go | .isWeb,
duration: txn.duration,
exclusive: exclusive,
name: txn.finalName,
zone: txn.zone,
apdexThreshold: txn.apdexThreshold,
errorsSeen: txn.errorsSeen,
}, h.metrics)
if txn.queuing > 0 {
h.metrics.addDuration(queueMetric, "", txn.queuing, txn.queuing, forced)
}
mergeBreakdownMetrics(&txn.tracer, h.metrics, txn.finalName, txn.isWeb)
if txn.txnEventsEnabled() {
h.txnEvents.AddTxnEvent(&txnEvent{
Name: txn.finalName,
Timestamp: txn.start,
Duration: txn.duration,
queuing: txn.queuing,
zone: txn.zone,
attrs: txn.attrs,
datastoreExternalTotals: txn.tracer.datastoreExternalTotals,
})
}
requestURI := ""
if nil != txn.Request && nil != txn.Request.URL {
requestURI = safeURL(txn.Request.URL)
}
mergeTxnErrors(h.errorTraces, txn.errors, txn.finalName, requestURI, txn.attrs)
if txn.errorEventsEnabled() {
for _, e := range txn.errors {
h.errorEvents.Add(&errorEvent{
klass: e.klass,
msg: e.msg,
when: e.when,
txnName: txn.finalName,
duration: txn.duration,
queuing: txn.queuing,
attrs: txn.attrs,
datastoreExternalTotals: txn.tracer.datastoreExternalTotals,
})
}
}
}
func responseCodeIsError(cfg *api.Config, code int) bool {
if code < http.StatusBadRequest { // 400
return false
}
for _, ignoreCode := range cfg.ErrorCollector.IgnoreStatusCodes {
if code == ignoreCode {
return false
}
}
return true
}
var (
// statusCodeLookup avoids a strconv.Itoa call.
statusCodeLookup = map[int]string{
100: "100", 101: "101",
200: "200", 201: "201", 202: "202", 203: "203", 204: "204", 205: "205", 206: "206",
300: "300", 301: "301", 302: "302", 303: "303", 304: "304", 305: "305", 307: "307",
400: "400", 401: "401", 402: "402", 403: "403", 404: "404", 405: "405", 406: "406",
407: "407", 408: "408", 409: "409", 410: "410", 411: "411", 412: "412", 413: "413",
414: "414", 415: "415", 416: "416", 417: "417", 418: "418", 428: "428", 429: "429",
431: "431", 451: "451",
500: "500", 501: "501", 502: "502", 503: "503", 504: "504", 505: "505", 511: "511",
}
)
func headersJustWritten(txn *txn, code int) {
if txn.finished {
return
}
if txn.wroteHeader {
return
}
txn.wroteHeader = true
h := txn.W.Header()
txn.attrs.agent.ResponseHeadersContentType = h.Get("Content-Type")
if val := h.Get("Content-Length"); "" != val {
if x, err := strconv.Atoi(val); nil == err {
txn.attrs.agent.ResponseHeadersContentLength = x
}
}
txn.attrs.agent.ResponseCode = statusCodeLookup[code]
if txn.attrs.agent.ResponseCode == "" {
txn.attrs.agent.ResponseCode = strconv.Itoa(code)
}
if responseCodeIsError(&txn.Config, code) {
e := txnErrorFromResponseCode(code)
e.stack = getStackTrace(1)
txn.noticeErrorInternal(e)
}
}
func (txn *txn) Header() http.Header { return txn.W.Header() }
func (txn *txn) Write(b []byte) (int, error) {
n, err := txn.W.Write(b)
txn.Lock()
defer txn.Unlock()
headersJustWritten(txn, http.StatusOK)
return n, err
}
func (txn *txn) WriteHeader(code int) {
txn.W.WriteHeader(code)
txn.Lock()
defer txn.Unlock()
headersJustWritten(txn, code)
}
var (
// ErrAlreadyEnded is returned by public txn methods if End() has
// already been called.
ErrAlreadyEnded = errors.New("transaction has already ended")
)
func (txn *txn) End() error {
txn.Lock()
defer txn.Unlock()
if txn.finished {
return ErrAlreadyEnded
}
txn.finished = true
r := recover()
if nil != r {
e := txnErrorFromPanic(r)
e.stack = getStackTrace(0)
txn.noticeErrorInternal(e)
}
txn.stop = time.Now()
txn.duration = txn.stop.Sub(txn.start)
txn.freezeName()
if txn.getsApdex() {
txn.apdexThreshold = calculateApdexThreshold(txn.Reply, txn.finalName)
if txn.errorsSeen > 0 {
txn.zone = apdexFailing
} else {
txn.zone = calculateApdexZone(txn.apdexThreshold, txn.duration)
}
} else {
txn.zone = apdexNone
}
if log.DebugEnabled() {
log.Debug("transaction ended", log.Context{
"name": txn.finalName,
"duration_ms": txn.duration.Seconds() * 1000.0,
"ignored": txn.ignore,
"run": txn.Reply.RunID,
})
}
if !txn.ignore {
txn.Consumer.consume(txn.Reply.RunID, txn)
}
// Note that if a consumer uses `panic(nil)`, the panic will not
// propogate.
if nil != r {
panic(r)
}
return nil
}
func (txn *txn) AddAttribute(name string, value interface{}) error {
txn.Lock()
defer txn.Unlock()
if txn.finished {
return ErrAlreadyEnded
}
return addUserAttribute(txn.attrs, name, value, destAll)
}
var (
// ErrorsLocallyDisabled is returned if error capture is disabled by
// local configuration.
ErrorsLocallyDisabled = errors.New("errors locally disabled")
// ErrorsRemotelyDisabled is returned if error capture is disabled
// by remote configuration.
ErrorsRemotelyDisabled = errors.New("errors remotely disabled")
// ErrNilError is returned if the provided error is nil.
ErrNilError = errors.New("nil error")
)
const (
// HighSecurityErrorMsg is used in place of the error's message
// (err.String()) when high security moed is enabled.
HighSecurityErrorMsg = "message removed by high security setting"
)
func (txn *txn) noticeErrorInternal(err txnError) error {
// Increment errorsSeen even if errors are disabled: Error metrics do
// not depend on whether or not errors are enabled.
txn.errorsSeen++
if !txn.Config.ErrorCollector.Enabled {
return ErrorsLocallyDisabled
}
if !txn.Reply.CollectErrors {
return ErrorsRemotelyDisabled
}
if nil == txn.errors {
txn.errors = newTxnErrors(maxTxnErrors)
}
if txn.Config.HighSecurity {
err.msg = HighSecurityErrorMsg
}
err.when = time.Now()
txn.errors.Add(&err)
return nil
}
func (txn *txn) NoticeError(err error) error {
txn.Lock()
defer txn.Unlock()
if txn.finished {
return ErrAlreadyEnded
}
if nil == err {
return ErrNilError
}
e := txnErrorFromError(err)
e.stack = getStackTrace(2)
return txn.noticeErrorInternal(e)
}
func (txn *txn) SetName(name string) error {
txn.Lock()
defer txn.Unlock()
if txn.finished {
return ErrAlreadyEnded
}
txn.name = name
return nil
}
func (txn *txn) Ignore() error {
txn.Lock()
defer txn.Unlock()
if txn.finished {
return ErrAlreadyEnded
}
txn.ignore = true
return nil
}
func (txn *txn) | StartSegment | identifier_name | |
txn.go | ("Host")
txn.attrs.agent.RequestHeadersUserAgent = h.Get("User-Agent")
txn.attrs.agent.RequestHeadersReferer = safeURLFromString(h.Get("Referer"))
if cl := h.Get("Content-Length"); "" != cl {
if x, err := strconv.Atoi(cl); nil == err {
txn.attrs.agent.RequestContentLength = x
}
}
txn.queuing = queueDuration(h, txn.start)
}
txn.attrs.agent.HostDisplayName = txn.Config.HostDisplayName
return txn
}
func (txn *txn) txnEventsEnabled() bool {
return txn.Config.TransactionEvents.Enabled &&
txn.Reply.CollectAnalyticsEvents
}
func (txn *txn) errorEventsEnabled() bool {
return txn.Config.ErrorCollector.CaptureEvents &&
txn.Reply.CollectErrorEvents
}
func (txn *txn) freezeName() {
if txn.ignore || ("" != txn.finalName) {
return
}
txn.finalName = CreateFullTxnName(txn.name, txn.Reply, txn.isWeb)
if "" == txn.finalName {
txn.ignore = true
}
}
func (txn *txn) getsApdex() bool {
return txn.isWeb
}
type createTxnMetricsArgs struct {
isWeb bool
duration time.Duration
exclusive time.Duration
name string
zone apdexZone
apdexThreshold time.Duration
errorsSeen uint64
}
func createTxnMetrics(args createTxnMetricsArgs, metrics *metricTable) {
// Duration Metrics
rollup := backgroundRollup
if args.isWeb {
rollup = webRollup
metrics.addDuration(dispatcherMetric, "", args.duration, 0, forced)
}
metrics.addDuration(args.name, "", args.duration, args.exclusive, forced)
metrics.addDuration(rollup, "", args.duration, args.exclusive, forced)
// Apdex Metrics
if args.zone != apdexNone {
metrics.addApdex(apdexRollup, "", args.apdexThreshold, args.zone, forced)
mname := apdexPrefix + removeFirstSegment(args.name)
metrics.addApdex(mname, "", args.apdexThreshold, args.zone, unforced)
}
// Error Metrics
if args.errorsSeen > 0 {
metrics.addSingleCount(errorsAll, forced)
if args.isWeb | else {
metrics.addSingleCount(errorsBackground, forced)
}
metrics.addSingleCount(errorsPrefix+args.name, forced)
}
}
func (txn *txn) mergeIntoHarvest(h *harvest) {
exclusive := time.Duration(0)
children := tracerRootChildren(&txn.tracer)
if txn.duration > children {
exclusive = txn.duration - children
}
createTxnMetrics(createTxnMetricsArgs{
isWeb: txn.isWeb,
duration: txn.duration,
exclusive: exclusive,
name: txn.finalName,
zone: txn.zone,
apdexThreshold: txn.apdexThreshold,
errorsSeen: txn.errorsSeen,
}, h.metrics)
if txn.queuing > 0 {
h.metrics.addDuration(queueMetric, "", txn.queuing, txn.queuing, forced)
}
mergeBreakdownMetrics(&txn.tracer, h.metrics, txn.finalName, txn.isWeb)
if txn.txnEventsEnabled() {
h.txnEvents.AddTxnEvent(&txnEvent{
Name: txn.finalName,
Timestamp: txn.start,
Duration: txn.duration,
queuing: txn.queuing,
zone: txn.zone,
attrs: txn.attrs,
datastoreExternalTotals: txn.tracer.datastoreExternalTotals,
})
}
requestURI := ""
if nil != txn.Request && nil != txn.Request.URL {
requestURI = safeURL(txn.Request.URL)
}
mergeTxnErrors(h.errorTraces, txn.errors, txn.finalName, requestURI, txn.attrs)
if txn.errorEventsEnabled() {
for _, e := range txn.errors {
h.errorEvents.Add(&errorEvent{
klass: e.klass,
msg: e.msg,
when: e.when,
txnName: txn.finalName,
duration: txn.duration,
queuing: txn.queuing,
attrs: txn.attrs,
datastoreExternalTotals: txn.tracer.datastoreExternalTotals,
})
}
}
}
func responseCodeIsError(cfg *api.Config, code int) bool {
if code < http.StatusBadRequest { // 400
return false
}
for _, ignoreCode := range cfg.ErrorCollector.IgnoreStatusCodes {
if code == ignoreCode {
return false
}
}
return true
}
var (
// statusCodeLookup avoids a strconv.Itoa call.
statusCodeLookup = map[int]string{
100: "100", 101: "101",
200: "200", 201: "201", 202: "202", 203: "203", 204: "204", 205: "205", 206: "206",
300: "300", 301: "301", 302: "302", 303: "303", 304: "304", 305: "305", 307: "307",
400: "400", 401: "401", 402: "402", 403: "403", 404: "404", 405: "405", 406: "406",
407: "407", 408: "408", 409: "409", 410: "410", 411: "411", 412: "412", 413: "413",
414: "414", 415: "415", 416: "416", 417: "417", 418: "418", 428: "428", 429: "429",
431: "431", 451: "451",
500: "500", 501: "501", 502: "502", 503: "503", 504: "504", 505: "505", 511: "511",
}
)
func headersJustWritten(txn *txn, code int) {
if txn.finished {
return
}
if txn.wroteHeader {
return
}
txn.wroteHeader = true
h := txn.W.Header()
txn.attrs.agent.ResponseHeadersContentType = h.Get("Content-Type")
if val := h.Get("Content-Length"); "" != val {
if x, err := strconv.Atoi(val); nil == err {
txn.attrs.agent.ResponseHeadersContentLength = x
}
}
txn.attrs.agent.ResponseCode = statusCodeLookup[code]
if txn.attrs.agent.ResponseCode == "" {
txn.attrs.agent.ResponseCode = strconv.Itoa(code)
}
if responseCodeIsError(&txn.Config, code) {
e := txnErrorFromResponseCode(code)
e.stack = getStackTrace(1)
txn.noticeErrorInternal(e)
}
}
func (txn *txn) Header() http.Header { return txn.W.Header() }
func (txn *txn) Write(b []byte) (int, error) {
n, err := txn.W.Write(b)
txn.Lock()
defer txn.Unlock()
headersJustWritten(txn, http.StatusOK)
return n, err
}
func (txn *txn) WriteHeader(code int) {
txn.W.WriteHeader(code)
txn.Lock()
defer txn.Unlock()
headersJustWritten(txn, code)
}
var (
// ErrAlreadyEnded is returned by public txn methods if End() has
// already been called.
ErrAlreadyEnded = errors.New("transaction has already ended")
)
func (txn *txn) End() error {
txn.Lock()
defer txn.Unlock()
if txn.finished {
return ErrAlreadyEnded
}
txn.finished = true
r := recover()
if nil != r {
e := txnErrorFromPanic(r)
e.stack = getStackTrace(0)
txn.noticeErrorInternal(e)
}
txn.stop = time.Now()
txn.duration = txn.stop.Sub(txn.start)
txn.freezeName()
if txn.getsApdex() {
txn.apdexThreshold = calculateApdexThreshold(txn.Reply, txn.finalName)
if txn.errorsSeen > 0 {
txn.zone = apdexFailing
} else {
txn.zone = calculateApdexZone(txn.apdexThreshold, txn.duration)
}
} else {
txn.zone = apdex | {
metrics.addSingleCount(errorsWeb, forced)
} | conditional_block |
tools.py | self.encoder(batch_image_tensors)
class KeyPointHeatmapEncoder(nn.Module):
def __init__(self, layer_params, input_channel_num=1):
# layer_params {'filter num': [1, 2], 'operator':['conv2d','max_pool'], 'kernel sizes': [3, 3], 'strides': [1, 2]}
super(KeyPointHeatmapEncoder, self).__init__()
layers = []
layer_params['filter num'] = [input_channel_num] + layer_params['filter num']
for i in range(len(layer_params['filter num']) - 1):
if layer_params['operator'] == 'conv2d':
layers.append(nn.Conv2d(layer_params['filter num'][i], layer_params['filter num'][i + 1],
kernel_size=layer_params['kernel sizes'][i], stride=layer_params['strides'][i]))
layers.append(nn.BatchNorm2d(layer_params['filter num'][i+1]))
layers.append(nn.ReLU(inplace=True))
else:
layers.append(nn.MaxPool2d(kernel_size=layer_params['kernel sizes'][i], stride=layer_params['strides'][i]))
layers.append(View(-1, ))
self.encoder_conv = nn.Sequential(*layers)
def forward(self, batch_heatmap_tensor):
return self.encoder_conv(batch_heatmap_tensor)
#########################################################
# Graph Neural Network Library, a simple implementation #
"Any other graph neural network library could be used "
"TODO: use pytorchGeometrics lib"
"https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html"
#########################################################
class PairMessageGenerator(nn.Module):
def __init__(self, dim_hv, dim_hw, msg_dim):
"""
generate pair message between node Hv and Hw.
since the cat operation, msgs from hv -> hw and hw -> hv are different
"""
super(PairMessageGenerator, self).__init__()
self.dim_hv, self.dim_hw, self.msg_dim = dim_hv, dim_hw, msg_dim
self.in_dim = dim_hv + dim_hw # row * feature_dim, 2048
self.mlp = nn.Sequential(
nn.LayerNorm(self.in_dim), # this layer norm is important to create diversity
nn.Linear(self.in_dim, self.msg_dim),
nn.LeakyReLU(0.2)
)
def forward(self, Hv, Hw):
"""
Hv: m v nodes : node feature
Hw: m w nodes : node feature
"""
inputs = torch.cat((Hv, Hw), 1)
m_vw = self.mlp(inputs)
return m_vw
class MessagePassing(nn.Module):
def __init__(self, dim_h, msg_dim, msg_aggrgt='AVG'):
"""
input:
1 generate pair message between all connected nodes
2 do message aggregate
3 gru update, output all nodes' next h, only do one step
"""
super(MessagePassing, self).__init__()
self.dim_h = dim_h
self.msg_aggrgt = msg_aggrgt
self.msg_dim = msg_dim
self.msg_generator = PairMessageGenerator(dim_h, dim_h, msg_dim) # parameters shared
self.update = nn.GRUCell(msg_dim, dim_h) # parameters shared
def forward(self, Ht_batch, A):
"""
intput: Ht, hidden state of all nodes at time t, n instances
Ht, 3D matrix, instances : nodes : node vectors (dim_h)
A, Adjacent matrix, assuming all have the same adjacent matrix as all represent in one task kernel
steps: 1 generate pair message stored in a hash table (for undirected graph here)
2 aggregate msgs mt+1 for each node.
3 feed ht and mt+1 in GRU update module
output: Ht+1, hidden state of all nodes at time t+1, n instances
: 3D matrix, instance entries : nodes: node vectors
"""
# generate pair message
pair_msgs = {} ## hash msgs
device = Ht_batch.device
node_Ht_next_step = torch.zeros(Ht_batch.size()).to(device) # n instances : nodes_num : node vectors (dim_h)
# scan adjacent matrix
for i in range(A.shape[0]): # all the nodes
pair_msgs[i] = [] # connected_msg_num * n * msg_dim
Hv = Ht_batch[:, i, :] # n*dim_h
for j in range(A.shape[1]): # scan the other nodes
if A[i, j] == 1: ## connected nodeds
Hw = Ht_batch[:, j, :] # n*dim_h
# msg from Hv to Hw
msg_i_j = self.msg_generator(Hv, Hw) # n * dim_msgs
pair_msgs[i].append(msg_i_j)
# aggregate all connected msgs
msg_next_step = self.aggregate_msgs(pair_msgs[i]).to(device) # n*msg_dim
# update function, get next hidden state
Ht_next_step = self.update(msg_next_step, Hv) # n * dim_h
node_Ht_next_step[:, i, :] = Ht_next_step
return node_Ht_next_step
def aggregate_msgs(self, connected_msgs_list):
|
class GraphReadOut(nn.Module):
"""
that's the readout function
input: all nodes final hidden state
output: readout vector representing the graph
"""
def __init__(self, input_dim, node_num, output_dim):
super(GraphReadOut, self).__init__()
self.input_dim = input_dim # 1024
self.output_dim = output_dim
self.node_num = node_num
self.mlp = nn.Sequential(
View((-1, self.node_num * self.input_dim)),
nn.LayerNorm(self.node_num*self.input_dim), # layer norm is good
nn.Linear(self.node_num * self.input_dim, self.output_dim),
nn.LeakyReLU(0.2),
)
def forward(self, nodes_final_hidden):
"""
nodes_final_hidden: n graphs : node_num (as the sequence) : node_hidden_state_dim
output: n scalar values representing weight of each graph
"""
return self.mlp(nodes_final_hidden)
# ****************************************************************
# --Test-- : Fri, 16:02:00, Jun 7, 2019, MDT
# --Result-- : PASS / NG, Jun Jin, 16:02:00, Jun 7, 2019, MDT
# ****************************************************************
# Ht = torch.randn((120, 2, 128))
# readout = GraphReadOut(128, 2)
# outputs = readout(Ht)
# print (outputs.shape)
# error_vec = torch.ones(120)
# outputs.backward(error_vec)
# print(list(readout.parameters())[0].grad)
# # when include lstm, forward function is OK. but after backward, grad is None
# images = np.load('../raw/sample_img.npy')
# transform = T.Compose([
# T.ToPILImage(),
# T.ToTensor(),
# T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
# images = transform(images).to(device).unsqueeze(0)
# images = torch.randn((10,3,128,128)).to(device)
# deep_geometry_set = deep_geometry_set.to(device)
# basis_weighted_layer = deep_geometry_set(images) # dim N * (7*_gnn_output_dim)
def init_weights(m):
if type(m) == nn.Linear:
# torch.nn.init.xavier_uniform(m.weight)
torch.nn.init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='relu')
def tie_weights_of_two_networks(src, tgt):
i = 0
params = list(tgt.parameters())
assert len(list(src.parameters())) == len(params), "failed, length not match!"
for f in src.parameters(): # set weights
f.data = params[i].data
i += 1
#########################################################
# baseline encoders #
"baseline encoders for ablation study"
#########################################################
class PixelE2E(nn.Module):
def __init__(self, input_channel_num=3, layer_params=None, output_dim=896):
# {'filter | """
# for n graph instances
# given edge index number
# this connected_msgs_list contains all connnected edges msg, say m connected edges
# connected_msgs_list: m items, each item is n*msg_dim matrix
# return n*msg_dim matrix, representing next step msg of this edge index
:param connected_msgs_list:
:return:
"""
msg_num = len(connected_msgs_list)
agg_msg = connected_msgs_list[0]
for i in range(1, msg_num):
agg_msg += connected_msgs_list[i]
if self.msg_aggrgt == 'AVG':
return agg_msg / msg_num
elif self.msg_aggrgt == 'SUM':
return agg_msg | identifier_body |
tools.py | self.encoder(batch_image_tensors)
class | (nn.Module):
def __init__(self, layer_params, input_channel_num=1):
# layer_params {'filter num': [1, 2], 'operator':['conv2d','max_pool'], 'kernel sizes': [3, 3], 'strides': [1, 2]}
super(KeyPointHeatmapEncoder, self).__init__()
layers = []
layer_params['filter num'] = [input_channel_num] + layer_params['filter num']
for i in range(len(layer_params['filter num']) - 1):
if layer_params['operator'] == 'conv2d':
layers.append(nn.Conv2d(layer_params['filter num'][i], layer_params['filter num'][i + 1],
kernel_size=layer_params['kernel sizes'][i], stride=layer_params['strides'][i]))
layers.append(nn.BatchNorm2d(layer_params['filter num'][i+1]))
layers.append(nn.ReLU(inplace=True))
else:
layers.append(nn.MaxPool2d(kernel_size=layer_params['kernel sizes'][i], stride=layer_params['strides'][i]))
layers.append(View(-1, ))
self.encoder_conv = nn.Sequential(*layers)
def forward(self, batch_heatmap_tensor):
return self.encoder_conv(batch_heatmap_tensor)
#########################################################
# Graph Neural Network Library, a simple implementation #
"Any other graph neural network library could be used "
"TODO: use pytorchGeometrics lib"
"https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html"
#########################################################
class PairMessageGenerator(nn.Module):
def __init__(self, dim_hv, dim_hw, msg_dim):
"""
generate pair message between node Hv and Hw.
since the cat operation, msgs from hv -> hw and hw -> hv are different
"""
super(PairMessageGenerator, self).__init__()
self.dim_hv, self.dim_hw, self.msg_dim = dim_hv, dim_hw, msg_dim
self.in_dim = dim_hv + dim_hw # row * feature_dim, 2048
self.mlp = nn.Sequential(
nn.LayerNorm(self.in_dim), # this layer norm is important to create diversity
nn.Linear(self.in_dim, self.msg_dim),
nn.LeakyReLU(0.2)
)
def forward(self, Hv, Hw):
"""
Hv: m v nodes : node feature
Hw: m w nodes : node feature
"""
inputs = torch.cat((Hv, Hw), 1)
m_vw = self.mlp(inputs)
return m_vw
class MessagePassing(nn.Module):
def __init__(self, dim_h, msg_dim, msg_aggrgt='AVG'):
"""
input:
1 generate pair message between all connected nodes
2 do message aggregate
3 gru update, output all nodes' next h, only do one step
"""
super(MessagePassing, self).__init__()
self.dim_h = dim_h
self.msg_aggrgt = msg_aggrgt
self.msg_dim = msg_dim
self.msg_generator = PairMessageGenerator(dim_h, dim_h, msg_dim) # parameters shared
self.update = nn.GRUCell(msg_dim, dim_h) # parameters shared
def forward(self, Ht_batch, A):
"""
intput: Ht, hidden state of all nodes at time t, n instances
Ht, 3D matrix, instances : nodes : node vectors (dim_h)
A, Adjacent matrix, assuming all have the same adjacent matrix as all represent in one task kernel
steps: 1 generate pair message stored in a hash table (for undirected graph here)
2 aggregate msgs mt+1 for each node.
3 feed ht and mt+1 in GRU update module
output: Ht+1, hidden state of all nodes at time t+1, n instances
: 3D matrix, instance entries : nodes: node vectors
"""
# generate pair message
pair_msgs = {} ## hash msgs
device = Ht_batch.device
node_Ht_next_step = torch.zeros(Ht_batch.size()).to(device) # n instances : nodes_num : node vectors (dim_h)
# scan adjacent matrix
for i in range(A.shape[0]): # all the nodes
pair_msgs[i] = [] # connected_msg_num * n * msg_dim
Hv = Ht_batch[:, i, :] # n*dim_h
for j in range(A.shape[1]): # scan the other nodes
if A[i, j] == 1: ## connected nodeds
Hw = Ht_batch[:, j, :] # n*dim_h
# msg from Hv to Hw
msg_i_j = self.msg_generator(Hv, Hw) # n * dim_msgs
pair_msgs[i].append(msg_i_j)
# aggregate all connected msgs
msg_next_step = self.aggregate_msgs(pair_msgs[i]).to(device) # n*msg_dim
# update function, get next hidden state
Ht_next_step = self.update(msg_next_step, Hv) # n * dim_h
node_Ht_next_step[:, i, :] = Ht_next_step
return node_Ht_next_step
def aggregate_msgs(self, connected_msgs_list):
"""
# for n graph instances
# given edge index number
# this connected_msgs_list contains all connnected edges msg, say m connected edges
# connected_msgs_list: m items, each item is n*msg_dim matrix
# return n*msg_dim matrix, representing next step msg of this edge index
:param connected_msgs_list:
:return:
"""
msg_num = len(connected_msgs_list)
agg_msg = connected_msgs_list[0]
for i in range(1, msg_num):
agg_msg += connected_msgs_list[i]
if self.msg_aggrgt == 'AVG':
return agg_msg / msg_num
elif self.msg_aggrgt == 'SUM':
return agg_msg
class GraphReadOut(nn.Module):
"""
that's the readout function
input: all nodes final hidden state
output: readout vector representing the graph
"""
def __init__(self, input_dim, node_num, output_dim):
super(GraphReadOut, self).__init__()
self.input_dim = input_dim # 1024
self.output_dim = output_dim
self.node_num = node_num
self.mlp = nn.Sequential(
View((-1, self.node_num * self.input_dim)),
nn.LayerNorm(self.node_num*self.input_dim), # layer norm is good
nn.Linear(self.node_num * self.input_dim, self.output_dim),
nn.LeakyReLU(0.2),
)
def forward(self, nodes_final_hidden):
"""
nodes_final_hidden: n graphs : node_num (as the sequence) : node_hidden_state_dim
output: n scalar values representing weight of each graph
"""
return self.mlp(nodes_final_hidden)
# ****************************************************************
# --Test-- : Fri, 16:02:00, Jun 7, 2019, MDT
# --Result-- : PASS / NG, Jun Jin, 16:02:00, Jun 7, 2019, MDT
# ****************************************************************
# Ht = torch.randn((120, 2, 128))
# readout = GraphReadOut(128, 2)
# outputs = readout(Ht)
# print (outputs.shape)
# error_vec = torch.ones(120)
# outputs.backward(error_vec)
# print(list(readout.parameters())[0].grad)
# # when include lstm, forward function is OK. but after backward, grad is None
# images = np.load('../raw/sample_img.npy')
# transform = T.Compose([
# T.ToPILImage(),
# T.ToTensor(),
# T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
# images = transform(images).to(device).unsqueeze(0)
# images = torch.randn((10,3,128,128)).to(device)
# deep_geometry_set = deep_geometry_set.to(device)
# basis_weighted_layer = deep_geometry_set(images) # dim N * (7*_gnn_output_dim)
def init_weights(m):
if type(m) == nn.Linear:
# torch.nn.init.xavier_uniform(m.weight)
torch.nn.init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='relu')
def tie_weights_of_two_networks(src, tgt):
i = 0
params = list(tgt.parameters())
assert len(list(src.parameters())) == len(params), "failed, length not match!"
for f in src.parameters(): # set weights
f.data = params[i].data
i += 1
#########################################################
# baseline encoders #
"baseline encoders for ablation study"
#########################################################
class PixelE2E(nn.Module):
def __init__(self, input_channel_num=3, layer_params=None, output_dim=896):
# {' | KeyPointHeatmapEncoder | identifier_name |
tools.py | self.encoder(batch_image_tensors)
class KeyPointHeatmapEncoder(nn.Module):
def __init__(self, layer_params, input_channel_num=1):
# layer_params {'filter num': [1, 2], 'operator':['conv2d','max_pool'], 'kernel sizes': [3, 3], 'strides': [1, 2]}
super(KeyPointHeatmapEncoder, self).__init__()
layers = []
layer_params['filter num'] = [input_channel_num] + layer_params['filter num']
for i in range(len(layer_params['filter num']) - 1):
if layer_params['operator'] == 'conv2d':
|
else:
layers.append(nn.MaxPool2d(kernel_size=layer_params['kernel sizes'][i], stride=layer_params['strides'][i]))
layers.append(View(-1, ))
self.encoder_conv = nn.Sequential(*layers)
def forward(self, batch_heatmap_tensor):
return self.encoder_conv(batch_heatmap_tensor)
#########################################################
# Graph Neural Network Library, a simple implementation #
"Any other graph neural network library could be used "
"TODO: use pytorchGeometrics lib"
"https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html"
#########################################################
class PairMessageGenerator(nn.Module):
def __init__(self, dim_hv, dim_hw, msg_dim):
"""
generate pair message between node Hv and Hw.
since the cat operation, msgs from hv -> hw and hw -> hv are different
"""
super(PairMessageGenerator, self).__init__()
self.dim_hv, self.dim_hw, self.msg_dim = dim_hv, dim_hw, msg_dim
self.in_dim = dim_hv + dim_hw # row * feature_dim, 2048
self.mlp = nn.Sequential(
nn.LayerNorm(self.in_dim), # this layer norm is important to create diversity
nn.Linear(self.in_dim, self.msg_dim),
nn.LeakyReLU(0.2)
)
def forward(self, Hv, Hw):
"""
Hv: m v nodes : node feature
Hw: m w nodes : node feature
"""
inputs = torch.cat((Hv, Hw), 1)
m_vw = self.mlp(inputs)
return m_vw
class MessagePassing(nn.Module):
def __init__(self, dim_h, msg_dim, msg_aggrgt='AVG'):
"""
input:
1 generate pair message between all connected nodes
2 do message aggregate
3 gru update, output all nodes' next h, only do one step
"""
super(MessagePassing, self).__init__()
self.dim_h = dim_h
self.msg_aggrgt = msg_aggrgt
self.msg_dim = msg_dim
self.msg_generator = PairMessageGenerator(dim_h, dim_h, msg_dim) # parameters shared
self.update = nn.GRUCell(msg_dim, dim_h) # parameters shared
def forward(self, Ht_batch, A):
"""
intput: Ht, hidden state of all nodes at time t, n instances
Ht, 3D matrix, instances : nodes : node vectors (dim_h)
A, Adjacent matrix, assuming all have the same adjacent matrix as all represent in one task kernel
steps: 1 generate pair message stored in a hash table (for undirected graph here)
2 aggregate msgs mt+1 for each node.
3 feed ht and mt+1 in GRU update module
output: Ht+1, hidden state of all nodes at time t+1, n instances
: 3D matrix, instance entries : nodes: node vectors
"""
# generate pair message
pair_msgs = {} ## hash msgs
device = Ht_batch.device
node_Ht_next_step = torch.zeros(Ht_batch.size()).to(device) # n instances : nodes_num : node vectors (dim_h)
# scan adjacent matrix
for i in range(A.shape[0]): # all the nodes
pair_msgs[i] = [] # connected_msg_num * n * msg_dim
Hv = Ht_batch[:, i, :] # n*dim_h
for j in range(A.shape[1]): # scan the other nodes
if A[i, j] == 1: ## connected nodeds
Hw = Ht_batch[:, j, :] # n*dim_h
# msg from Hv to Hw
msg_i_j = self.msg_generator(Hv, Hw) # n * dim_msgs
pair_msgs[i].append(msg_i_j)
# aggregate all connected msgs
msg_next_step = self.aggregate_msgs(pair_msgs[i]).to(device) # n*msg_dim
# update function, get next hidden state
Ht_next_step = self.update(msg_next_step, Hv) # n * dim_h
node_Ht_next_step[:, i, :] = Ht_next_step
return node_Ht_next_step
def aggregate_msgs(self, connected_msgs_list):
"""
# for n graph instances
# given edge index number
# this connected_msgs_list contains all connnected edges msg, say m connected edges
# connected_msgs_list: m items, each item is n*msg_dim matrix
# return n*msg_dim matrix, representing next step msg of this edge index
:param connected_msgs_list:
:return:
"""
msg_num = len(connected_msgs_list)
agg_msg = connected_msgs_list[0]
for i in range(1, msg_num):
agg_msg += connected_msgs_list[i]
if self.msg_aggrgt == 'AVG':
return agg_msg / msg_num
elif self.msg_aggrgt == 'SUM':
return agg_msg
class GraphReadOut(nn.Module):
"""
that's the readout function
input: all nodes final hidden state
output: readout vector representing the graph
"""
def __init__(self, input_dim, node_num, output_dim):
super(GraphReadOut, self).__init__()
self.input_dim = input_dim # 1024
self.output_dim = output_dim
self.node_num = node_num
self.mlp = nn.Sequential(
View((-1, self.node_num * self.input_dim)),
nn.LayerNorm(self.node_num*self.input_dim), # layer norm is good
nn.Linear(self.node_num * self.input_dim, self.output_dim),
nn.LeakyReLU(0.2),
)
def forward(self, nodes_final_hidden):
"""
nodes_final_hidden: n graphs : node_num (as the sequence) : node_hidden_state_dim
output: n scalar values representing weight of each graph
"""
return self.mlp(nodes_final_hidden)
# ****************************************************************
# --Test-- : Fri, 16:02:00, Jun 7, 2019, MDT
# --Result-- : PASS / NG, Jun Jin, 16:02:00, Jun 7, 2019, MDT
# ****************************************************************
# Ht = torch.randn((120, 2, 128))
# readout = GraphReadOut(128, 2)
# outputs = readout(Ht)
# print (outputs.shape)
# error_vec = torch.ones(120)
# outputs.backward(error_vec)
# print(list(readout.parameters())[0].grad)
# # when include lstm, forward function is OK. but after backward, grad is None
# images = np.load('../raw/sample_img.npy')
# transform = T.Compose([
# T.ToPILImage(),
# T.ToTensor(),
# T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
# images = transform(images).to(device).unsqueeze(0)
# images = torch.randn((10,3,128,128)).to(device)
# deep_geometry_set = deep_geometry_set.to(device)
# basis_weighted_layer = deep_geometry_set(images) # dim N * (7*_gnn_output_dim)
def init_weights(m):
if type(m) == nn.Linear:
# torch.nn.init.xavier_uniform(m.weight)
torch.nn.init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='relu')
def tie_weights_of_two_networks(src, tgt):
i = 0
params = list(tgt.parameters())
assert len(list(src.parameters())) == len(params), "failed, length not match!"
for f in src.parameters(): # set weights
f.data = params[i].data
i += 1
#########################################################
# baseline encoders #
"baseline encoders for ablation study"
#########################################################
class PixelE2E(nn.Module):
def __init__(self, input_channel_num=3, layer_params=None, output_dim=896):
# {' | layers.append(nn.Conv2d(layer_params['filter num'][i], layer_params['filter num'][i + 1],
kernel_size=layer_params['kernel sizes'][i], stride=layer_params['strides'][i]))
layers.append(nn.BatchNorm2d(layer_params['filter num'][i+1]))
layers.append(nn.ReLU(inplace=True)) | conditional_block |
tools.py | def __init__(self, size):
super(View, self).__init__()
self.size = size
def forward(self, tensor):
return tensor.view(self.size)
#########################################################
# image encoders #
"Any image encoders could replace my implementation here"
#########################################################
class ImageEncoder(nn.Module):
def __init__(self, input_channel_num=3, layer_params=None):
# {'filter num': [16, 16, 32, 32], 'kernel sizes': [7, 3, 3, 3], 'strides': [1, 1, 2, 1]}
super(ImageEncoder, self).__init__()
layers = []
layer_params['filter num'] = [input_channel_num] + list(layer_params['filter num'])
for i in range(len(layer_params['filter num'])-1):
layers.append(nn.Conv2d(layer_params['filter num'][i], layer_params['filter num'][i+1],
kernel_size=layer_params['kernel sizes'][i], stride=layer_params['strides'][i]))
# layers.append(nn.BatchNorm2d(layer_params['filter num'][i+1]))
layers.append(nn.LeakyReLU(inplace=True))
self.encoder = nn.Sequential(*layers)
def forward(self, batch_image_tensors):
return self.encoder(batch_image_tensors)
class KeyPointHeatmapEncoder(nn.Module):
def __init__(self, layer_params, input_channel_num=1):
# layer_params {'filter num': [1, 2], 'operator':['conv2d','max_pool'], 'kernel sizes': [3, 3], 'strides': [1, 2]}
super(KeyPointHeatmapEncoder, self).__init__()
layers = []
layer_params['filter num'] = [input_channel_num] + layer_params['filter num']
for i in range(len(layer_params['filter num']) - 1):
if layer_params['operator'] == 'conv2d':
layers.append(nn.Conv2d(layer_params['filter num'][i], layer_params['filter num'][i + 1],
kernel_size=layer_params['kernel sizes'][i], stride=layer_params['strides'][i]))
layers.append(nn.BatchNorm2d(layer_params['filter num'][i+1]))
layers.append(nn.ReLU(inplace=True))
else:
layers.append(nn.MaxPool2d(kernel_size=layer_params['kernel sizes'][i], stride=layer_params['strides'][i]))
layers.append(View(-1, ))
self.encoder_conv = nn.Sequential(*layers)
def forward(self, batch_heatmap_tensor):
return self.encoder_conv(batch_heatmap_tensor)
#########################################################
# Graph Neural Network Library, a simple implementation #
"Any other graph neural network library could be used "
"TODO: use pytorchGeometrics lib"
"https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html"
#########################################################
class PairMessageGenerator(nn.Module):
def __init__(self, dim_hv, dim_hw, msg_dim):
"""
generate pair message between node Hv and Hw.
since the cat operation, msgs from hv -> hw and hw -> hv are different
"""
super(PairMessageGenerator, self).__init__()
self.dim_hv, self.dim_hw, self.msg_dim = dim_hv, dim_hw, msg_dim
self.in_dim = dim_hv + dim_hw # row * feature_dim, 2048
self.mlp = nn.Sequential(
nn.LayerNorm(self.in_dim), # this layer norm is important to create diversity
nn.Linear(self.in_dim, self.msg_dim),
nn.LeakyReLU(0.2)
)
def forward(self, Hv, Hw):
"""
Hv: m v nodes : node feature
Hw: m w nodes : node feature
"""
inputs = torch.cat((Hv, Hw), 1)
m_vw = self.mlp(inputs)
return m_vw
class MessagePassing(nn.Module):
def __init__(self, dim_h, msg_dim, msg_aggrgt='AVG'):
"""
input:
1 generate pair message between all connected nodes
2 do message aggregate
3 gru update, output all nodes' next h, only do one step
"""
super(MessagePassing, self).__init__()
self.dim_h = dim_h
self.msg_aggrgt = msg_aggrgt
self.msg_dim = msg_dim
self.msg_generator = PairMessageGenerator(dim_h, dim_h, msg_dim) # parameters shared
self.update = nn.GRUCell(msg_dim, dim_h) # parameters shared
def forward(self, Ht_batch, A):
"""
intput: Ht, hidden state of all nodes at time t, n instances
Ht, 3D matrix, instances : nodes : node vectors (dim_h)
A, Adjacent matrix, assuming all have the same adjacent matrix as all represent in one task kernel
steps: 1 generate pair message stored in a hash table (for undirected graph here)
2 aggregate msgs mt+1 for each node.
3 feed ht and mt+1 in GRU update module
output: Ht+1, hidden state of all nodes at time t+1, n instances
: 3D matrix, instance entries : nodes: node vectors
"""
# generate pair message
pair_msgs = {} ## hash msgs
device = Ht_batch.device
node_Ht_next_step = torch.zeros(Ht_batch.size()).to(device) # n instances : nodes_num : node vectors (dim_h)
# scan adjacent matrix
for i in range(A.shape[0]): # all the nodes
pair_msgs[i] = [] # connected_msg_num * n * msg_dim
Hv = Ht_batch[:, i, :] # n*dim_h
for j in range(A.shape[1]): # scan the other nodes
if A[i, j] == 1: ## connected nodeds
Hw = Ht_batch[:, j, :] # n*dim_h
# msg from Hv to Hw
msg_i_j = self.msg_generator(Hv, Hw) # n * dim_msgs
pair_msgs[i].append(msg_i_j)
# aggregate all connected msgs
msg_next_step = self.aggregate_msgs(pair_msgs[i]).to(device) # n*msg_dim
# update function, get next hidden state
Ht_next_step = self.update(msg_next_step, Hv) # n * dim_h
node_Ht_next_step[:, i, :] = Ht_next_step
return node_Ht_next_step
def aggregate_msgs(self, connected_msgs_list):
"""
# for n graph instances
# given edge index number
# this connected_msgs_list contains all connnected edges msg, say m connected edges
# connected_msgs_list: m items, each item is n*msg_dim matrix
# return n*msg_dim matrix, representing next step msg of this edge index
:param connected_msgs_list:
:return:
"""
msg_num = len(connected_msgs_list)
agg_msg = connected_msgs_list[0]
for i in range(1, msg_num):
agg_msg += connected_msgs_list[i]
if self.msg_aggrgt == 'AVG':
return agg_msg / msg_num
elif self.msg_aggrgt == 'SUM':
return agg_msg
class GraphReadOut(nn.Module):
"""
that's the readout function
input: all nodes final hidden state
output: readout vector representing the graph
"""
def __init__(self, input_dim, node_num, output_dim):
super(GraphReadOut, self).__init__()
self.input_dim = input_dim # 1024
self.output_dim = output_dim
self.node_num = node_num
self.mlp = nn.Sequential(
View((-1, self.node_num * self.input_dim)),
nn.LayerNorm(self.node_num*self.input_dim), # layer norm is good
nn.Linear(self.node_num * self.input_dim, self.output_dim),
nn.LeakyReLU(0.2),
)
def forward(self, nodes_final_hidden):
"""
nodes_final_hidden: n graphs : node_num (as the sequence) : node_hidden_state_dim
output: n scalar values representing weight of each graph
"""
return self.mlp(nodes_final_hidden)
# ****************************************************************
# --Test-- : Fri, 16:02:00, Jun 7, 2019, MDT
# --Result-- : PASS / NG, Jun Jin, 16:02:00, Jun 7, 2019, MDT
# ****************************************************************
# Ht = torch.randn((120, 2, 128))
# readout = GraphReadOut(128, 2)
# outputs = readout(Ht)
# print (outputs.shape)
# error_vec = torch.ones(120)
# outputs.backward(error_vec)
# print(list(readout.parameters())[0].grad)
# # when include lstm, forward function is OK. but after backward, grad is None
# images = np.load('../raw/sample_img.npy')
# transform = T.Compose([
# T.ToPILImage(),
# T.ToTensor(),
# T.Normalize((0.485, 0.4 | random_line_split | ||
Project4.py | hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
print(cell, end=" ")
print()
def pretty_percentage(amount):
print(str(np.round(amount*100,2)) + '%')
def clean_text(text):
"""
1. Remove html like text from europarl e.g. <Chapter 1>
2. Remove line breaks
3. Reduce all whitespaces to 1
4. turn everything to lower case
""" | regex = re.compile('[\.|\-|\,|\?|\_|\:|\"|\)|\(\)\/|\\|\>|\<]')
text = text.lower() # Turn everything to lower case
text = regex.sub(' ', text).strip()
out = re.sub(' +', ' ', text) # Reduce whitespace down to one
return out
########################################################################################################
##################################### Preprocessing ####################################################
########################################################################################################
tok = TweetTokenizer()
path = '.'
os.chdir(path)
raw = pd.read_csv('imdb_master.csv',encoding='iso-8859-1')
raw = raw[raw['label'] != 'unsup']
data = list(raw.review)
labels = raw.label
del raw
labels = list(labels.replace({'pos': 1, 'neg': 0}))
for i in range(0,len(data)):
clean = tok.tokenize(clean_text(data[i]))
clean = [word for word in clean if word != 'br']
clean = ' '.join(clean)
data[i] = clean
print('Review ' + str(i+1) + ' cleaned. Completed ' + str(round(i * 100 / len(data), 2)) + '%')
with open('data', 'wb') as f:
pickle.dump(data, f)
with open('labels', 'wb') as f:
pickle.dump(labels, f)
"""
with open('data', 'rb') as f:
data = pickle.load(f)
with open('labels', 'rb') as f:
labels = pickle.load(f)
"""
########################################################################################################
######################################## Split into Sets ##############################################
########################################################################################################
X_Train, X_Tune, Y_Train, Y_Tune = train_test_split(data,
labels,
test_size=0.10,
random_state = 40)
X_Validation, X_Test, Y_Validation, Y_Test = train_test_split(X_Tune,
Y_Tune,
test_size=0.5,
random_state = 40)
print('Training set size:', len(X_Train))
print('Validation set size:', len(X_Validation))
print('Test set size:', len(X_Test))
del X_Tune, Y_Tune
gc.collect()
########################################################################################################
########################################## Create Grams ################################################
########################################################################################################
Unigram_vectorizer = TfidfVectorizer(ngram_range = (1, 1),
min_df = 10,
analyzer = 'word',
stop_words = stopwords.words('english'))
Uni_X_Train = Unigram_vectorizer.fit_transform(X_Train)
Uni_X_Validation = Unigram_vectorizer.transform(X_Validation)
Uni_X_Test = Unigram_vectorizer.transform(X_Test)
print('Unigram Training Dataset:',Uni_X_Train.shape)
print('Unigram Validation Dataset:',Uni_X_Validation.shape)
print('Unigram Test Dataset:',Uni_X_Test.shape)
########################################################################################################
######################## Perform Truncated SVD to reduce dimensions ####################################
########################################################################################################
from sklearn.decomposition import TruncatedSVD
tsvd1 = TruncatedSVD(n_components=10000, random_state=42)
svd_model_uni = tsvd1.fit(Uni_X_Train)
uni_ex_var = np.sum(svd_model_uni.explained_variance_ratio_)
reduced_uni_train = svd_model_uni.transform(Uni_X_Train)
reduced_uni_validation = svd_model_uni.transform(Uni_X_Validation)
reduced_uni_test = svd_model_uni.transform(Uni_X_Test)
print('Initial Shape of Unigram Data:',Uni_X_Train.shape)
print('Explained Variance of Unigram model: ')
pretty_percentage(uni_ex_var)
print('Reduced Shape of Unigram Data:',reduced_uni_train.shape)
with open('reduced_uni_train', 'wb') as f:
pickle.dump(reduced_uni_train, f)
with open('reduced_uni_validation', 'wb') as f:
pickle.dump(reduced_uni_validation, f)
with open('reduced_uni_test', 'wb') as f:
pickle.dump(reduced_uni_test, f)
with open('Y_Train', 'wb') as f:
pickle.dump(Y_Train, f)
with open('Y_Validation', 'wb') as f:
pickle.dump(Y_Validation, f)
with open('Y_Test', 'wb') as f:
pickle.dump(Y_Test, f)
"""
with open('reduced_uni_train', 'rb') as f:
reduced_uni_train = pickle.load(f)
with open('reduced_uni_validation', 'rb') as f:
reduced_uni_validation = pickle.load(f)
with open('reduced_uni_test', 'rb') as f:
reduced_uni_test = pickle.load(f)
with open('Y_Train', 'rb') as f:
Y_Train = pickle.load(f)
with open('Y_Validation', 'rb') as f:
Y_Validation = pickle.load(f)
with open('Y_Test', 'rb') as f:
Y_Test = pickle.load(f)
"""
########################################################################################################
###################################### Modelling #######################################################
########################################################################################################
#######################################################
## Baseline Classifier ##
#######################################################
Class_Freq_Train = pd.Series(Y_Train).value_counts()
print(Class_Freq_Train)
baseline_train = np.zeros((len(Y_Train)))
baseline_valid = np.zeros((len(Y_Validation)))
Score_Classifier(baseline_train, Y_Train, baseline_valid, Y_Validation, Classifier_Title = 'Baseline')
#######################################################
## MLP for Unigrams ##
#######################################################
from keras.callbacks import Callback
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
from keras.regularizers import l1
from keras.layers import LeakyReLU
class Metrics(Callback):
def on_train_begin(self, logs={}):
self.val_f1s = []
self.val_recalls = []
self.val_precisions = []
def on_epoch_end(self, epoch, logs={}):
val_predict = (np.asarray(self.model.predict(self.validation_data[0]))).round()
val_targ = self.validation_data[1]
_val_f1 = f1_score(Y_Validation, val_predict)
_val_recall = recall_score(val_targ, val_predict)
_val_precision = precision_score(val_targ, val_predict)
self.val_f1s.append(_val_f1)
self.val_recalls.append(_val_recall)
self.val_precisions.append(_val_precision)
print(' — val_f1: %f — val_precision: %f — val_recall %f' %(_val_f1, _val_precision, _val_recall))
print()
return
metrics = Metrics()
def Dense_Layer(input_tensor, n_neurons, l1_rate = 0.02, dropout_rate = 0):
X = Dense(n_neurons, kernel_regularizer= l1(l1_rate))(input_tensor)
X = BatchNormalization(axis = -1)(X)
X = LeakyReLU(alpha = 0.1)(X)
X = Dropout(dropout_rate)(X)
return X
def Create_Model(structure, l1_rate = 0, dropout_rate = 0, opt = 'adam', inpt = 10000):
X_input = Input((inpt,))
X = BatchNormalization(axis = -1)(X_input)
for i in range(0,len(structure)):
X = Dense_Layer(X, structure[i], l1_rate = l1_rate, dropout_rate = dropout_rate)
X = BatchNormalization(axis = -1)(X)
X = Dense(1, activation = 'sigmoid')(X)
model = Model(inputs = X_input, outputs = X, name='Sentiment Recognizer')
model.compile(optimizer = opt, loss = "binary_crossentropy", metrics = ['accuracy'])
return model
###############################################################################
###################### Structure Tuning ###################################
###############################################################################
scenarios = []
scenarios.append([10])
scenarios.append([50])
scenarios.append([10,10])
scenarios.append([50,50])
scenarios.append([10,10,10])
scenarios.append([50,50,50])
scenarios.append([10,10,10,10])
scenarios.append([50,50,50,50])
scenarios.append([10,10,10,10,10])
scenarios.append([50,50,50,50,50])
epochs = 15
cnt = 1
for scenario in scenarios:
structure1 = scenario
model1 = Create_Model(structure1, 0 | random_line_split | |
Project4.py | uni_ex_var)
print('Reduced Shape of Unigram Data:',reduced_uni_train.shape)
with open('reduced_uni_train', 'wb') as f:
pickle.dump(reduced_uni_train, f)
with open('reduced_uni_validation', 'wb') as f:
pickle.dump(reduced_uni_validation, f)
with open('reduced_uni_test', 'wb') as f:
pickle.dump(reduced_uni_test, f)
with open('Y_Train', 'wb') as f:
pickle.dump(Y_Train, f)
with open('Y_Validation', 'wb') as f:
pickle.dump(Y_Validation, f)
with open('Y_Test', 'wb') as f:
pickle.dump(Y_Test, f)
"""
with open('reduced_uni_train', 'rb') as f:
reduced_uni_train = pickle.load(f)
with open('reduced_uni_validation', 'rb') as f:
reduced_uni_validation = pickle.load(f)
with open('reduced_uni_test', 'rb') as f:
reduced_uni_test = pickle.load(f)
with open('Y_Train', 'rb') as f:
Y_Train = pickle.load(f)
with open('Y_Validation', 'rb') as f:
Y_Validation = pickle.load(f)
with open('Y_Test', 'rb') as f:
Y_Test = pickle.load(f)
"""
########################################################################################################
###################################### Modelling #######################################################
########################################################################################################
#######################################################
## Baseline Classifier ##
#######################################################
Class_Freq_Train = pd.Series(Y_Train).value_counts()
print(Class_Freq_Train)
baseline_train = np.zeros((len(Y_Train)))
baseline_valid = np.zeros((len(Y_Validation)))
Score_Classifier(baseline_train, Y_Train, baseline_valid, Y_Validation, Classifier_Title = 'Baseline')
#######################################################
## MLP for Unigrams ##
#######################################################
from keras.callbacks import Callback
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
from keras.regularizers import l1
from keras.layers import LeakyReLU
class Metrics(Callback):
def on_train_begin(self, logs={}):
self.val_f1s = []
self.val_recalls = []
self.val_precisions = []
def on_epoch_end(self, epoch, logs={}):
val_predict = (np.asarray(self.model.predict(self.validation_data[0]))).round()
val_targ = self.validation_data[1]
_val_f1 = f1_score(Y_Validation, val_predict)
_val_recall = recall_score(val_targ, val_predict)
_val_precision = precision_score(val_targ, val_predict)
self.val_f1s.append(_val_f1)
self.val_recalls.append(_val_recall)
self.val_precisions.append(_val_precision)
print(' — val_f1: %f — val_precision: %f — val_recall %f' %(_val_f1, _val_precision, _val_recall))
print()
return
metrics = Metrics()
def Dense_Layer(input_tensor, n_neurons, l1_rate = 0.02, dropout_rate = 0):
X = Dense(n_neurons, kernel_regularizer= l1(l1_rate))(input_tensor)
X = BatchNormalization(axis = -1)(X)
X = LeakyReLU(alpha = 0.1)(X)
X = Dropout(dropout_rate)(X)
return X
def Create_Model(structure, l1_rate = 0, dropout_rate = 0, opt = 'adam', inpt = 10000):
X_input = Input((inpt,))
X = BatchNormalization(axis = -1)(X_input)
for i in range(0,len(structure)):
X = Dense_Layer(X, structure[i], l1_rate = l1_rate, dropout_rate = dropout_rate)
X = BatchNormalization(axis = -1)(X)
X = Dense(1, activation = 'sigmoid')(X)
model = Model(inputs = X_input, outputs = X, name='Sentiment Recognizer')
model.compile(optimizer = opt, loss = "binary_crossentropy", metrics = ['accuracy'])
return model
###############################################################################
###################### Structure Tuning ###################################
###############################################################################
scenarios = []
scenarios.append([10])
scenarios.append([50])
scenarios.append([10,10])
scenarios.append([50,50])
scenarios.append([10,10,10])
scenarios.append([50,50,50])
scenarios.append([10,10,10,10])
scenarios.append([50,50,50,50])
scenarios.append([10,10,10,10,10])
scenarios.append([50,50,50,50,50])
epochs = 15
cnt = 1
for scenario in scenarios:
structure1 = scenario
model1 = Create_Model(structure1, 0.005, 0.3, 'adam',10000)
model1.summary()
history1 = model1.fit(reduced_uni_train, Y_Train,
validation_data=(reduced_uni_validation, Y_Validation),
epochs=15,
batch_size=256,
callbacks=[metrics])
model1.evaluate(reduced_uni_test, Y_Test)
print('Number of Hidden Layers:',str(len(structure1)))
print('Number of Neurons per Layer:',str(structure1[0]))
plot_history(history1, epochs, 'Scenario' + str(cnt))
cnt+=1
final_structure1 = scenarios[9]
###############################################################################
###################### Optimizer Tuning ##############################
###############################################################################
model1 = Create_Model(final_structure1, 0.005, 0.3, 'adagrad',10000)
model1.summary()
history1 = model1.fit(reduced_uni_train, Y_Train,
validation_data=(reduced_uni_validation, Y_Validation),
epochs=epochs,
batch_size=256,
callbacks=[metrics])
plot_history(history1, epochs, 'adagrad', 1.0)
###############################################################################
###################### Regularization Tuning #########################
###############################################################################
epochs = 15
model1 = Create_Model(final_structure1, 0.011, 0, 'adagrad',10000)
model1.summary()
history1 = model1.fit(reduced_uni_train, Y_Train,
validation_data=(reduced_uni_validation, Y_Validation),
epochs=epochs,
batch_size=256,
callbacks=[metrics])
###############################################################################
############################# Final Model ############################
###############################################################################
epochs = 15
model1 = Create_Model(final_structure1, 0.011, 0, 'adagrad',10000)
model1.summary()
history1 = model1.fit(reduced_uni_train, Y_Train,
validation_data=(reduced_uni_validation, Y_Validation),
epochs=epochs,
batch_size=256,
callbacks=[metrics])
tr_pred = model1.predict(reduced_uni_train)
tr_pred[tr_pred>=0.5] = 1
tr_pred[tr_pred<0.5] = 0
ts_pred = model1.predict(reduced_uni_test)
ts_pred[ts_pred>=0.5] = 1
ts_pred[ts_pred<0.5] = 0
Score_Classifier(tr_pred, Y_Train, ts_pred, Y_Test,
labels = ['Negative','Positive'], Classifier_Title = 'Final Model',
evaluation_type = 'Test')
###############################################################################
################# Error Overview ####################
###############################################################################
true = np.array(Y_Test).reshape(2500,)/1.
predicted = ts_pred.reshape(2500,)/1.
# False Positive
fps = np.where((predicted== 1.)& (true ==0))[0]
index = np.random.choice(fps.shape[0], 1)[0]
X_Test[index]
# False Negative
fps = np.where((predicted== 0.)& (true ==1))[0]
index = np.random.choice(fps.shape[0], 1)[0]
X_Test[index]
#######################################################
## BIDIRECTIONAL RNN WITH MLP ON TOP ##
#######################################################
#Custom keras layer for linear attention over RNNs output states
from keras.engine.topology import Layer
from keras import initializers as initializers, regularizers, constraints
from keras import backend as K
from keras.layers import InputSpec
class AttentionWeightedAverage(Layer):
"""
| Computes a weighted average attention mechanism
"""
def __init__(self, return_attention=False, **kwargs):
self.init = initializers.get('uniform')
self.supports_masking = True
self.return_attention = return_attention
super(AttentionWeightedAverage, self).__init__(** kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(ndim=3)]
assert len(input_shape) == 3
self.w = self.add_weight(shape=(input_shape[2], 1),
name='{}_w'.format(self.name),
initializer=self.init)
self.trainable_weights = [self.w]
super(AttentionWeightedAverage, self).build(input_shape)
def call(self, h, mask=None): | identifier_body | |
Project4.py | hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
print(cell, end=" ")
print()
def pretty_percentage(amount):
print(str(np.round(amount*100,2)) + '%')
def clean_text(text):
"""
1. Remove html like text from europarl e.g. <Chapter 1>
2. Remove line breaks
3. Reduce all whitespaces to 1
4. turn everything to lower case
"""
regex = re.compile('[\.|\-|\,|\?|\_|\:|\"|\)|\(\)\/|\\|\>|\<]')
text = text.lower() # Turn everything to lower case
text = regex.sub(' ', text).strip()
out = re.sub(' +', ' ', text) # Reduce whitespace down to one
return out
########################################################################################################
##################################### Preprocessing ####################################################
########################################################################################################
tok = TweetTokenizer()
path = '.'
os.chdir(path)
raw = pd.read_csv('imdb_master.csv',encoding='iso-8859-1')
raw = raw[raw['label'] != 'unsup']
data = list(raw.review)
labels = raw.label
del raw
labels = list(labels.replace({'pos': 1, 'neg': 0}))
for i in range(0,len(data)):
clean = tok.tokenize(clean_text(data[i]))
clean = [word for word in clean if word != 'br']
clean = ' '.join(clean)
data[i] = clean
print('Review ' + str(i+1) + ' cleaned. Completed ' + str(round(i * 100 / len(data), 2)) + '%')
with open('data', 'wb') as f:
pickle.dump(data, f)
with open('labels', 'wb') as f:
pickle.dump(labels, f)
"""
with open('data', 'rb') as f:
data = pickle.load(f)
with open('labels', 'rb') as f:
labels = pickle.load(f)
"""
########################################################################################################
######################################## Split into Sets ##############################################
########################################################################################################
X_Train, X_Tune, Y_Train, Y_Tune = train_test_split(data,
labels,
test_size=0.10,
random_state = 40)
X_Validation, X_Test, Y_Validation, Y_Test = train_test_split(X_Tune,
Y_Tune,
test_size=0.5,
random_state = 40)
print('Training set size:', len(X_Train))
print('Validation set size:', len(X_Validation))
print('Test set size:', len(X_Test))
del X_Tune, Y_Tune
gc.collect()
########################################################################################################
########################################## Create Grams ################################################
########################################################################################################
Unigram_vectorizer = TfidfVectorizer(ngram_range = (1, 1),
min_df = 10,
analyzer = 'word',
stop_words = stopwords.words('english'))
Uni_X_Train = Unigram_vectorizer.fit_transform(X_Train)
Uni_X_Validation = Unigram_vectorizer.transform(X_Validation)
Uni_X_Test = Unigram_vectorizer.transform(X_Test)
print('Unigram Training Dataset:',Uni_X_Train.shape)
print('Unigram Validation Dataset:',Uni_X_Validation.shape)
print('Unigram Test Dataset:',Uni_X_Test.shape)
########################################################################################################
######################## Perform Truncated SVD to reduce dimensions ####################################
########################################################################################################
from sklearn.decomposition import TruncatedSVD
tsvd1 = TruncatedSVD(n_components=10000, random_state=42)
svd_model_uni = tsvd1.fit(Uni_X_Train)
uni_ex_var = np.sum(svd_model_uni.explained_variance_ratio_)
reduced_uni_train = svd_model_uni.transform(Uni_X_Train)
reduced_uni_validation = svd_model_uni.transform(Uni_X_Validation)
reduced_uni_test = svd_model_uni.transform(Uni_X_Test)
print('Initial Shape of Unigram Data:',Uni_X_Train.shape)
print('Explained Variance of Unigram model: ')
pretty_percentage(uni_ex_var)
print('Reduced Shape of Unigram Data:',reduced_uni_train.shape)
with open('reduced_uni_train', 'wb') as f:
pickle.dump(reduced_uni_train, f)
with open('reduced_uni_validation', 'wb') as f:
pickle.dump(reduced_uni_validation, f)
with open('reduced_uni_test', 'wb') as f:
pickle.dump(reduced_uni_test, f)
with open('Y_Train', 'wb') as f:
pickle.dump(Y_Train, f)
with open('Y_Validation', 'wb') as f:
pickle.dump(Y_Validation, f)
with open('Y_Test', 'wb') as f:
pickle.dump(Y_Test, f)
"""
with open('reduced_uni_train', 'rb') as f:
reduced_uni_train = pickle.load(f)
with open('reduced_uni_validation', 'rb') as f:
reduced_uni_validation = pickle.load(f)
with open('reduced_uni_test', 'rb') as f:
reduced_uni_test = pickle.load(f)
with open('Y_Train', 'rb') as f:
Y_Train = pickle.load(f)
with open('Y_Validation', 'rb') as f:
Y_Validation = pickle.load(f)
with open('Y_Test', 'rb') as f:
Y_Test = pickle.load(f)
"""
########################################################################################################
###################################### Modelling #######################################################
########################################################################################################
#######################################################
## Baseline Classifier ##
#######################################################
Class_Freq_Train = pd.Series(Y_Train).value_counts()
print(Class_Freq_Train)
baseline_train = np.zeros((len(Y_Train)))
baseline_valid = np.zeros((len(Y_Validation)))
Score_Classifier(baseline_train, Y_Train, baseline_valid, Y_Validation, Classifier_Title = 'Baseline')
#######################################################
## MLP for Unigrams ##
#######################################################
from keras.callbacks import Callback
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
from keras.regularizers import l1
from keras.layers import LeakyReLU
class | (Callback):
def on_train_begin(self, logs={}):
self.val_f1s = []
self.val_recalls = []
self.val_precisions = []
def on_epoch_end(self, epoch, logs={}):
val_predict = (np.asarray(self.model.predict(self.validation_data[0]))).round()
val_targ = self.validation_data[1]
_val_f1 = f1_score(Y_Validation, val_predict)
_val_recall = recall_score(val_targ, val_predict)
_val_precision = precision_score(val_targ, val_predict)
self.val_f1s.append(_val_f1)
self.val_recalls.append(_val_recall)
self.val_precisions.append(_val_precision)
print(' — val_f1: %f — val_precision: %f — val_recall %f' %(_val_f1, _val_precision, _val_recall))
print()
return
metrics = Metrics()
def Dense_Layer(input_tensor, n_neurons, l1_rate = 0.02, dropout_rate = 0):
X = Dense(n_neurons, kernel_regularizer= l1(l1_rate))(input_tensor)
X = BatchNormalization(axis = -1)(X)
X = LeakyReLU(alpha = 0.1)(X)
X = Dropout(dropout_rate)(X)
return X
def Create_Model(structure, l1_rate = 0, dropout_rate = 0, opt = 'adam', inpt = 10000):
X_input = Input((inpt,))
X = BatchNormalization(axis = -1)(X_input)
for i in range(0,len(structure)):
X = Dense_Layer(X, structure[i], l1_rate = l1_rate, dropout_rate = dropout_rate)
X = BatchNormalization(axis = -1)(X)
X = Dense(1, activation = 'sigmoid')(X)
model = Model(inputs = X_input, outputs = X, name='Sentiment Recognizer')
model.compile(optimizer = opt, loss = "binary_crossentropy", metrics = ['accuracy'])
return model
###############################################################################
###################### Structure Tuning ###################################
###############################################################################
scenarios = []
scenarios.append([10])
scenarios.append([50])
scenarios.append([10,10])
scenarios.append([50,50])
scenarios.append([10,10,10])
scenarios.append([50,50,50])
scenarios.append([10,10,10,10])
scenarios.append([50,50,50,50])
scenarios.append([10,10,10,10,10])
scenarios.append([50,50,50,50,50])
epochs = 15
cnt = 1
for scenario in scenarios:
structure1 = scenario
model1 = Create_Model(structure1, | Metrics | identifier_name |
Project4.py | (_val_f1)
self.val_recalls.append(_val_recall)
self.val_precisions.append(_val_precision)
print(' — val_f1: %f — val_precision: %f — val_recall %f' %(_val_f1, _val_precision, _val_recall))
print()
return
metrics = Metrics()
def Dense_Layer(input_tensor, n_neurons, l1_rate = 0.02, dropout_rate = 0):
X = Dense(n_neurons, kernel_regularizer= l1(l1_rate))(input_tensor)
X = BatchNormalization(axis = -1)(X)
X = LeakyReLU(alpha = 0.1)(X)
X = Dropout(dropout_rate)(X)
return X
def Create_Model(structure, l1_rate = 0, dropout_rate = 0, opt = 'adam', inpt = 10000):
X_input = Input((inpt,))
X = BatchNormalization(axis = -1)(X_input)
for i in range(0,len(structure)):
X = Dense_Layer(X, structure[i], l1_rate = l1_rate, dropout_rate = dropout_rate)
X = BatchNormalization(axis = -1)(X)
X = Dense(1, activation = 'sigmoid')(X)
model = Model(inputs = X_input, outputs = X, name='Sentiment Recognizer')
model.compile(optimizer = opt, loss = "binary_crossentropy", metrics = ['accuracy'])
return model
###############################################################################
###################### Structure Tuning ###################################
###############################################################################
scenarios = []
scenarios.append([10])
scenarios.append([50])
scenarios.append([10,10])
scenarios.append([50,50])
scenarios.append([10,10,10])
scenarios.append([50,50,50])
scenarios.append([10,10,10,10])
scenarios.append([50,50,50,50])
scenarios.append([10,10,10,10,10])
scenarios.append([50,50,50,50,50])
epochs = 15
cnt = 1
for scenario in scenarios:
structure1 = scenario
model1 = Create_Model(structure1, 0.005, 0.3, 'adam',10000)
model1.summary()
history1 = model1.fit(reduced_uni_train, Y_Train,
validation_data=(reduced_uni_validation, Y_Validation),
epochs=15,
batch_size=256,
callbacks=[metrics])
model1.evaluate(reduced_uni_test, Y_Test)
print('Number of Hidden Layers:',str(len(structure1)))
print('Number of Neurons per Layer:',str(structure1[0]))
plot_history(history1, epochs, 'Scenario' + str(cnt))
cnt+=1
final_structure1 = scenarios[9]
###############################################################################
###################### Optimizer Tuning ##############################
###############################################################################
model1 = Create_Model(final_structure1, 0.005, 0.3, 'adagrad',10000)
model1.summary()
history1 = model1.fit(reduced_uni_train, Y_Train,
validation_data=(reduced_uni_validation, Y_Validation),
epochs=epochs,
batch_size=256,
callbacks=[metrics])
plot_history(history1, epochs, 'adagrad', 1.0)
###############################################################################
###################### Regularization Tuning #########################
###############################################################################
epochs = 15
model1 = Create_Model(final_structure1, 0.011, 0, 'adagrad',10000)
model1.summary()
history1 = model1.fit(reduced_uni_train, Y_Train,
validation_data=(reduced_uni_validation, Y_Validation),
epochs=epochs,
batch_size=256,
callbacks=[metrics])
###############################################################################
############################# Final Model ############################
###############################################################################
epochs = 15
model1 = Create_Model(final_structure1, 0.011, 0, 'adagrad',10000)
model1.summary()
history1 = model1.fit(reduced_uni_train, Y_Train,
validation_data=(reduced_uni_validation, Y_Validation),
epochs=epochs,
batch_size=256,
callbacks=[metrics])
tr_pred = model1.predict(reduced_uni_train)
tr_pred[tr_pred>=0.5] = 1
tr_pred[tr_pred<0.5] = 0
ts_pred = model1.predict(reduced_uni_test)
ts_pred[ts_pred>=0.5] = 1
ts_pred[ts_pred<0.5] = 0
Score_Classifier(tr_pred, Y_Train, ts_pred, Y_Test,
labels = ['Negative','Positive'], Classifier_Title = 'Final Model',
evaluation_type = 'Test')
###############################################################################
################# Error Overview ####################
###############################################################################
true = np.array(Y_Test).reshape(2500,)/1.
predicted = ts_pred.reshape(2500,)/1.
# False Positive
fps = np.where((predicted== 1.)& (true ==0))[0]
index = np.random.choice(fps.shape[0], 1)[0]
X_Test[index]
# False Negative
fps = np.where((predicted== 0.)& (true ==1))[0]
index = np.random.choice(fps.shape[0], 1)[0]
X_Test[index]
#######################################################
## BIDIRECTIONAL RNN WITH MLP ON TOP ##
#######################################################
#Custom keras layer for linear attention over RNNs output states
from keras.engine.topology import Layer
from keras import initializers as initializers, regularizers, constraints
from keras import backend as K
from keras.layers import InputSpec
class AttentionWeightedAverage(Layer):
"""
Computes a weighted average attention mechanism
"""
def __init__(self, return_attention=False, **kwargs):
self.init = initializers.get('uniform')
self.supports_masking = True
self.return_attention = return_attention
super(AttentionWeightedAverage, self).__init__(** kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(ndim=3)]
assert len(input_shape) == 3
self.w = self.add_weight(shape=(input_shape[2], 1),
name='{}_w'.format(self.name),
initializer=self.init)
self.trainable_weights = [self.w]
super(AttentionWeightedAverage, self).build(input_shape)
def call(self, h, mask=None):
h_shape = K.shape(h)
d_w, T = h_shape[0], h_shape[1]
logits = K.dot(h, self.w) # w^T h
logits = K.reshape(logits, (d_w, T))
alpha = K.exp(logits - K.max(logits, axis=-1, keepdims=True)) # exp
# masked timesteps have zero weight
if mask is not None:
mask = K.cast(mask, K.floatx())
alpha = alpha * mask
alpha = alpha / K.sum(alpha, axis=1, keepdims=True) # softmax
r = K.sum(h * K.expand_dims(alpha), axis=1) # r = h*alpha^T
h_star = K.tanh(r) # h^* = tanh(r)
if self.return_attention:
return [h_star, alpha]
return h_star
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
output_len = input_shape[2]
if self.return_attention:
return [(input_shape[0], output_len), (input_shape[0], input_shape[1])]
return (input_shape[0], output_len)
def compute_mask(self, input, input_mask=None):
if isinstance(input_mask, list):
return [None] * len(input_mask)
else:
return None
# Set Maximum number of words to be embedded
NUM_WORDS = 20000
# load tokenizer from keras
from keras.preprocessing.text import Tokenizer
# Define/Load Tokenize text function
tokenizer = Tokenizer(num_words=NUM_WORDS,filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n\'',lower=True)
# Fit the function on the text
tokenizer.fit_on_texts(X_Train)
# Count number of unique tokens
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
word_vectors = dict()
# load the whole embedding into memory
f = open('glove.6B.300d.txt', encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
word_vectors[word] = coefs
f.close()
print('Loaded %s word vectors.' % len(word_vectors))
EMBEDDING_DIM=300
vocabulary_size=min(len(word_index)+1,(NUM_WORDS))
embedding_matrix = np.zeros((vocabulary_size, EMBEDDING_DIM))
for word, i in word_index.items():
if i>=NUM_WORDS:
contin | ue
t | conditional_block | |
transformer.go | {"result"})
feastFeatureStatus = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: transformer.PromNamespace,
Name: "feast_feature_status_count",
Help: "Feature status by feature",
}, []string{"feature", "status"})
feastFeatureSummary = promauto.NewSummaryVec(prometheus.SummaryOpts{
Namespace: transformer.PromNamespace,
Name: "feast_feature_value",
Help: "Summary of feature value",
AgeBuckets: 1,
}, []string{"feature"})
)
// Options for the Feast transformer.
type Options struct {
ServingURL string `envconfig:"FEAST_SERVING_URL" required:"true"`
StatusMonitoringEnabled bool `envconfig:"FEAST_FEATURE_STATUS_MONITORING_ENABLED" default:"false"`
ValueMonitoringEnabled bool `envconfig:"FEAST_FEATURE_VALUE_MONITORING_ENABLED" default:"false"`
}
// Transformer wraps feast serving client to retrieve features.
type Transformer struct {
feastClient feast.Client
config *transformer.StandardTransformerConfig
logger *zap.Logger
options *Options
defaultValues map[string]*types.Value
compiledJsonPath map[string]*jsonpath.Compiled
compiledUdf map[string]*vm.Program
}
// NewTransformer initializes a new Transformer.
func NewTransformer(feastClient feast.Client, config *transformer.StandardTransformerConfig, options *Options, logger *zap.Logger) (*Transformer, error) {
defaultValues := make(map[string]*types.Value)
// populate default values
for _, ft := range config.TransformerConfig.Feast {
for _, f := range ft.Features {
if len(f.DefaultValue) != 0 {
feastValType := types.ValueType_Enum(types.ValueType_Enum_value[f.ValueType])
defVal, err := getValue(f.DefaultValue, feastValType)
if err != nil {
logger.Warn(fmt.Sprintf("invalid default value for %s : %v, %v", f.Name, f.DefaultValue, err))
continue
}
defaultValues[f.Name] = defVal
}
}
}
compiledJsonPath := make(map[string]*jsonpath.Compiled)
compiledUdf := make(map[string]*vm.Program)
for _, ft := range config.TransformerConfig.Feast {
for _, configEntity := range ft.Entities {
switch configEntity.Extractor.(type) {
case *transformer.Entity_JsonPath:
c, err := jsonpath.Compile(configEntity.GetJsonPath())
if err != nil {
return nil, fmt.Errorf("unable to compile jsonpath for entity %s: %s", configEntity.Name, configEntity.GetJsonPath())
}
compiledJsonPath[configEntity.GetJsonPath()] = c
case *transformer.Entity_Udf:
c, err := expr.Compile(configEntity.GetUdf(), expr.Env(UdfEnv{}))
if err != nil {
return nil, err
}
compiledUdf[configEntity.GetUdf()] = c
}
}
}
return &Transformer{
feastClient: feastClient,
config: config,
options: options,
logger: logger,
defaultValues: defaultValues,
compiledJsonPath: compiledJsonPath,
compiledUdf: compiledUdf,
}, nil
}
type FeastFeature struct {
Columns []string `json:"columns"`
Data [][]interface{} `json:"data"`
}
type result struct {
tableName string
feastFeature *FeastFeature
err error
}
// Transform retrieves the Feast features values and add them into the request.
func (t *Transformer) Transform(ctx context.Context, request []byte) ([]byte, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.Transform")
defer span.Finish()
feastFeatures := make(map[string]*FeastFeature, len(t.config.TransformerConfig.Feast))
// parallelize feast call per feature table
resChan := make(chan result, len(t.config.TransformerConfig.Feast))
for _, config := range t.config.TransformerConfig.Feast {
go func(cfg *transformer.FeatureTable) {
tableName := createTableName(cfg.Entities)
val, err := t.getFeastFeature(ctx, tableName, request, cfg)
resChan <- result{tableName, val, err}
}(config)
}
// collect result
for i := 0; i < cap(resChan); i++ {
res := <-resChan
if res.err != nil {
return nil, res.err
}
feastFeatures[res.tableName] = res.feastFeature
}
out, err := enrichRequest(ctx, request, feastFeatures)
if err != nil {
return nil, err
}
return out, err
}
func (t *Transformer) getFeastFeature(ctx context.Context, tableName string, request []byte, config *transformer.FeatureTable) (*FeastFeature, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.getFeastFeature")
span.SetTag("table.name", tableName)
defer span.Finish()
entities, err := t.buildEntitiesRequest(ctx, request, config.Entities)
if err != nil {
return nil, err
}
var features []string
for _, feature := range config.Features {
features = append(features, feature.Name)
}
feastRequest := feast.OnlineFeaturesRequest{
Project: config.Project,
Entities: entities,
Features: features,
}
t.logger.Debug("feast_request", zap.Any("feast_request", feastRequest))
startTime := time.Now()
feastResponse, err := t.feastClient.GetOnlineFeatures(ctx, &feastRequest)
durationMs := time.Now().Sub(startTime).Milliseconds()
if err != nil {
feastLatency.WithLabelValues("error").Observe(float64(durationMs))
feastError.Inc()
return nil, err
}
feastLatency.WithLabelValues("success").Observe(float64(durationMs))
t.logger.Debug("feast_response", zap.Any("feast_response", feastResponse.Rows()))
feastFeature, err := t.buildFeastFeatures(ctx, feastResponse, config)
if err != nil {
return nil, err
}
return feastFeature, nil
}
func (t *Transformer) buildEntitiesRequest(ctx context.Context, request []byte, configEntities []*transformer.Entity) ([]feast.Row, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.buildEntitiesRequest")
defer span.Finish()
var entities []feast.Row
var nodesBody interface{}
err := json.Unmarshal(request, &nodesBody)
if err != nil {
return nil, err
}
for _, configEntity := range configEntities {
switch configEntity.Extractor.(type) {
case *transformer.Entity_JsonPath:
_, ok := t.compiledJsonPath[configEntity.GetJsonPath()]
if !ok {
c, err := jsonpath.Compile(configEntity.GetJsonPath())
if err != nil {
return nil, fmt.Errorf("unable to compile jsonpath for entity %s: %s", configEntity.Name, configEntity.GetJsonPath())
}
t.compiledJsonPath[configEntity.GetJsonPath()] = c
}
}
vals, err := getValuesFromJSONPayload(nodesBody, configEntity, t.compiledJsonPath[configEntity.GetJsonPath()], t.compiledUdf[configEntity.GetUdf()])
if err != nil {
return nil, fmt.Errorf("unable to extract entity %s: %v", configEntity.Name, err)
}
if len(entities) == 0 { | for _, val := range vals {
entities = append(entities, feast.Row{
configEntity.Name: val,
})
}
} else {
newEntities := []feast.Row{}
for _, entity := range entities {
for _, val := range vals {
newFeastRow := feast.Row{}
for k, v := range entity {
newFeastRow[k] = v
}
newFeastRow[configEntity.Name] = val
newEntities = append(newEntities, newFeastRow)
}
}
entities = newEntities
}
}
return entities, nil
}
func (t *Transformer) buildFeastFeatures(ctx context.Context, feastResponse *feast.OnlineFeaturesResponse, config *transformer.FeatureTable) (*FeastFeature, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.buildFeastFeatures")
defer span.Finish()
var columns []string
for _, entity := range config.Entities {
columns = append(columns, entity.Name)
}
for _, feature := range config.Features {
columns = append(columns, feature.Name)
}
var data [][]interface{}
status := feastResponse.Statuses()
for i, feastRow := range feastResponse.Rows() {
var row []interface{}
for _, column := range columns {
featureStatus := status[i][column]
switch featureStatus {
case serving.GetOnlineFeaturesResponse_PRESENT:
rawValue := feastRow[column]
featVal, err := getFeatureValue(rawValue)
if err != nil {
return nil, err
}
row = append(row, featVal)
// put behind feature toggle since it will generate high cardinality metrics
if t.options.ValueMonitoringEnabled | random_line_split | |
transformer.go | feastValType)
if err != nil {
logger.Warn(fmt.Sprintf("invalid default value for %s : %v, %v", f.Name, f.DefaultValue, err))
continue
}
defaultValues[f.Name] = defVal
}
}
}
compiledJsonPath := make(map[string]*jsonpath.Compiled)
compiledUdf := make(map[string]*vm.Program)
for _, ft := range config.TransformerConfig.Feast {
for _, configEntity := range ft.Entities {
switch configEntity.Extractor.(type) {
case *transformer.Entity_JsonPath:
c, err := jsonpath.Compile(configEntity.GetJsonPath())
if err != nil {
return nil, fmt.Errorf("unable to compile jsonpath for entity %s: %s", configEntity.Name, configEntity.GetJsonPath())
}
compiledJsonPath[configEntity.GetJsonPath()] = c
case *transformer.Entity_Udf:
c, err := expr.Compile(configEntity.GetUdf(), expr.Env(UdfEnv{}))
if err != nil {
return nil, err
}
compiledUdf[configEntity.GetUdf()] = c
}
}
}
return &Transformer{
feastClient: feastClient,
config: config,
options: options,
logger: logger,
defaultValues: defaultValues,
compiledJsonPath: compiledJsonPath,
compiledUdf: compiledUdf,
}, nil
}
type FeastFeature struct {
Columns []string `json:"columns"`
Data [][]interface{} `json:"data"`
}
type result struct {
tableName string
feastFeature *FeastFeature
err error
}
// Transform retrieves the Feast features values and add them into the request.
func (t *Transformer) Transform(ctx context.Context, request []byte) ([]byte, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.Transform")
defer span.Finish()
feastFeatures := make(map[string]*FeastFeature, len(t.config.TransformerConfig.Feast))
// parallelize feast call per feature table
resChan := make(chan result, len(t.config.TransformerConfig.Feast))
for _, config := range t.config.TransformerConfig.Feast {
go func(cfg *transformer.FeatureTable) {
tableName := createTableName(cfg.Entities)
val, err := t.getFeastFeature(ctx, tableName, request, cfg)
resChan <- result{tableName, val, err}
}(config)
}
// collect result
for i := 0; i < cap(resChan); i++ {
res := <-resChan
if res.err != nil {
return nil, res.err
}
feastFeatures[res.tableName] = res.feastFeature
}
out, err := enrichRequest(ctx, request, feastFeatures)
if err != nil {
return nil, err
}
return out, err
}
func (t *Transformer) getFeastFeature(ctx context.Context, tableName string, request []byte, config *transformer.FeatureTable) (*FeastFeature, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.getFeastFeature")
span.SetTag("table.name", tableName)
defer span.Finish()
entities, err := t.buildEntitiesRequest(ctx, request, config.Entities)
if err != nil {
return nil, err
}
var features []string
for _, feature := range config.Features {
features = append(features, feature.Name)
}
feastRequest := feast.OnlineFeaturesRequest{
Project: config.Project,
Entities: entities,
Features: features,
}
t.logger.Debug("feast_request", zap.Any("feast_request", feastRequest))
startTime := time.Now()
feastResponse, err := t.feastClient.GetOnlineFeatures(ctx, &feastRequest)
durationMs := time.Now().Sub(startTime).Milliseconds()
if err != nil {
feastLatency.WithLabelValues("error").Observe(float64(durationMs))
feastError.Inc()
return nil, err
}
feastLatency.WithLabelValues("success").Observe(float64(durationMs))
t.logger.Debug("feast_response", zap.Any("feast_response", feastResponse.Rows()))
feastFeature, err := t.buildFeastFeatures(ctx, feastResponse, config)
if err != nil {
return nil, err
}
return feastFeature, nil
}
func (t *Transformer) buildEntitiesRequest(ctx context.Context, request []byte, configEntities []*transformer.Entity) ([]feast.Row, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.buildEntitiesRequest")
defer span.Finish()
var entities []feast.Row
var nodesBody interface{}
err := json.Unmarshal(request, &nodesBody)
if err != nil {
return nil, err
}
for _, configEntity := range configEntities {
switch configEntity.Extractor.(type) {
case *transformer.Entity_JsonPath:
_, ok := t.compiledJsonPath[configEntity.GetJsonPath()]
if !ok {
c, err := jsonpath.Compile(configEntity.GetJsonPath())
if err != nil {
return nil, fmt.Errorf("unable to compile jsonpath for entity %s: %s", configEntity.Name, configEntity.GetJsonPath())
}
t.compiledJsonPath[configEntity.GetJsonPath()] = c
}
}
vals, err := getValuesFromJSONPayload(nodesBody, configEntity, t.compiledJsonPath[configEntity.GetJsonPath()], t.compiledUdf[configEntity.GetUdf()])
if err != nil {
return nil, fmt.Errorf("unable to extract entity %s: %v", configEntity.Name, err)
}
if len(entities) == 0 {
for _, val := range vals {
entities = append(entities, feast.Row{
configEntity.Name: val,
})
}
} else {
newEntities := []feast.Row{}
for _, entity := range entities {
for _, val := range vals {
newFeastRow := feast.Row{}
for k, v := range entity {
newFeastRow[k] = v
}
newFeastRow[configEntity.Name] = val
newEntities = append(newEntities, newFeastRow)
}
}
entities = newEntities
}
}
return entities, nil
}
func (t *Transformer) buildFeastFeatures(ctx context.Context, feastResponse *feast.OnlineFeaturesResponse, config *transformer.FeatureTable) (*FeastFeature, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.buildFeastFeatures")
defer span.Finish()
var columns []string
for _, entity := range config.Entities {
columns = append(columns, entity.Name)
}
for _, feature := range config.Features {
columns = append(columns, feature.Name)
}
var data [][]interface{}
status := feastResponse.Statuses()
for i, feastRow := range feastResponse.Rows() {
var row []interface{}
for _, column := range columns {
featureStatus := status[i][column]
switch featureStatus {
case serving.GetOnlineFeaturesResponse_PRESENT:
rawValue := feastRow[column]
featVal, err := getFeatureValue(rawValue)
if err != nil {
return nil, err
}
row = append(row, featVal)
// put behind feature toggle since it will generate high cardinality metrics
if t.options.ValueMonitoringEnabled {
v, err := getFloatValue(featVal)
if err != nil {
continue
}
feastFeatureSummary.WithLabelValues(column).Observe(v)
}
case serving.GetOnlineFeaturesResponse_NOT_FOUND, serving.GetOnlineFeaturesResponse_NULL_VALUE, serving.GetOnlineFeaturesResponse_OUTSIDE_MAX_AGE:
defVal, ok := t.defaultValues[column]
if !ok {
row = append(row, nil)
continue
}
featVal, err := getFeatureValue(defVal)
if err != nil {
return nil, err
}
row = append(row, featVal)
default:
return nil, fmt.Errorf("Unsupported feature retrieval status: %s", featureStatus)
}
// put behind feature toggle since it will generate high cardinality metrics
if t.options.StatusMonitoringEnabled {
feastFeatureStatus.WithLabelValues(column, featureStatus.String()).Inc()
}
}
data = append(data, row)
}
return &FeastFeature{
Columns: columns,
Data: data,
}, nil
}
func getFloatValue(val interface{}) (float64, error) {
switch i := val.(type) {
case float64:
return i, nil
case float32:
return float64(i), nil
case int64:
return float64(i), nil
case int32:
return float64(i), nil
default:
return math.NaN(), errors.New("getFloat: unknown value is of incompatible type")
}
}
func createTableName(entities []*transformer.Entity) string | {
entityNames := make([]string, 0)
for _, n := range entities {
entityNames = append(entityNames, n.Name)
}
return strings.Join(entityNames, "_")
} | identifier_body | |
transformer.go |
compiledJsonPath map[string]*jsonpath.Compiled
compiledUdf map[string]*vm.Program
}
// NewTransformer initializes a new Transformer.
func NewTransformer(feastClient feast.Client, config *transformer.StandardTransformerConfig, options *Options, logger *zap.Logger) (*Transformer, error) {
defaultValues := make(map[string]*types.Value)
// populate default values
for _, ft := range config.TransformerConfig.Feast {
for _, f := range ft.Features {
if len(f.DefaultValue) != 0 {
feastValType := types.ValueType_Enum(types.ValueType_Enum_value[f.ValueType])
defVal, err := getValue(f.DefaultValue, feastValType)
if err != nil {
logger.Warn(fmt.Sprintf("invalid default value for %s : %v, %v", f.Name, f.DefaultValue, err))
continue
}
defaultValues[f.Name] = defVal
}
}
}
compiledJsonPath := make(map[string]*jsonpath.Compiled)
compiledUdf := make(map[string]*vm.Program)
for _, ft := range config.TransformerConfig.Feast {
for _, configEntity := range ft.Entities {
switch configEntity.Extractor.(type) {
case *transformer.Entity_JsonPath:
c, err := jsonpath.Compile(configEntity.GetJsonPath())
if err != nil {
return nil, fmt.Errorf("unable to compile jsonpath for entity %s: %s", configEntity.Name, configEntity.GetJsonPath())
}
compiledJsonPath[configEntity.GetJsonPath()] = c
case *transformer.Entity_Udf:
c, err := expr.Compile(configEntity.GetUdf(), expr.Env(UdfEnv{}))
if err != nil {
return nil, err
}
compiledUdf[configEntity.GetUdf()] = c
}
}
}
return &Transformer{
feastClient: feastClient,
config: config,
options: options,
logger: logger,
defaultValues: defaultValues,
compiledJsonPath: compiledJsonPath,
compiledUdf: compiledUdf,
}, nil
}
type FeastFeature struct {
Columns []string `json:"columns"`
Data [][]interface{} `json:"data"`
}
type result struct {
tableName string
feastFeature *FeastFeature
err error
}
// Transform retrieves the Feast features values and add them into the request.
func (t *Transformer) Transform(ctx context.Context, request []byte) ([]byte, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.Transform")
defer span.Finish()
feastFeatures := make(map[string]*FeastFeature, len(t.config.TransformerConfig.Feast))
// parallelize feast call per feature table
resChan := make(chan result, len(t.config.TransformerConfig.Feast))
for _, config := range t.config.TransformerConfig.Feast {
go func(cfg *transformer.FeatureTable) {
tableName := createTableName(cfg.Entities)
val, err := t.getFeastFeature(ctx, tableName, request, cfg)
resChan <- result{tableName, val, err}
}(config)
}
// collect result
for i := 0; i < cap(resChan); i++ {
res := <-resChan
if res.err != nil {
return nil, res.err
}
feastFeatures[res.tableName] = res.feastFeature
}
out, err := enrichRequest(ctx, request, feastFeatures)
if err != nil {
return nil, err
}
return out, err
}
func (t *Transformer) getFeastFeature(ctx context.Context, tableName string, request []byte, config *transformer.FeatureTable) (*FeastFeature, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.getFeastFeature")
span.SetTag("table.name", tableName)
defer span.Finish()
entities, err := t.buildEntitiesRequest(ctx, request, config.Entities)
if err != nil {
return nil, err
}
var features []string
for _, feature := range config.Features {
features = append(features, feature.Name)
}
feastRequest := feast.OnlineFeaturesRequest{
Project: config.Project,
Entities: entities,
Features: features,
}
t.logger.Debug("feast_request", zap.Any("feast_request", feastRequest))
startTime := time.Now()
feastResponse, err := t.feastClient.GetOnlineFeatures(ctx, &feastRequest)
durationMs := time.Now().Sub(startTime).Milliseconds()
if err != nil {
feastLatency.WithLabelValues("error").Observe(float64(durationMs))
feastError.Inc()
return nil, err
}
feastLatency.WithLabelValues("success").Observe(float64(durationMs))
t.logger.Debug("feast_response", zap.Any("feast_response", feastResponse.Rows()))
feastFeature, err := t.buildFeastFeatures(ctx, feastResponse, config)
if err != nil {
return nil, err
}
return feastFeature, nil
}
func (t *Transformer) buildEntitiesRequest(ctx context.Context, request []byte, configEntities []*transformer.Entity) ([]feast.Row, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.buildEntitiesRequest")
defer span.Finish()
var entities []feast.Row
var nodesBody interface{}
err := json.Unmarshal(request, &nodesBody)
if err != nil {
return nil, err
}
for _, configEntity := range configEntities {
switch configEntity.Extractor.(type) {
case *transformer.Entity_JsonPath:
_, ok := t.compiledJsonPath[configEntity.GetJsonPath()]
if !ok {
c, err := jsonpath.Compile(configEntity.GetJsonPath())
if err != nil {
return nil, fmt.Errorf("unable to compile jsonpath for entity %s: %s", configEntity.Name, configEntity.GetJsonPath())
}
t.compiledJsonPath[configEntity.GetJsonPath()] = c
}
}
vals, err := getValuesFromJSONPayload(nodesBody, configEntity, t.compiledJsonPath[configEntity.GetJsonPath()], t.compiledUdf[configEntity.GetUdf()])
if err != nil {
return nil, fmt.Errorf("unable to extract entity %s: %v", configEntity.Name, err)
}
if len(entities) == 0 {
for _, val := range vals {
entities = append(entities, feast.Row{
configEntity.Name: val,
})
}
} else {
newEntities := []feast.Row{}
for _, entity := range entities {
for _, val := range vals {
newFeastRow := feast.Row{}
for k, v := range entity {
newFeastRow[k] = v
}
newFeastRow[configEntity.Name] = val
newEntities = append(newEntities, newFeastRow)
}
}
entities = newEntities
}
}
return entities, nil
}
func (t *Transformer) buildFeastFeatures(ctx context.Context, feastResponse *feast.OnlineFeaturesResponse, config *transformer.FeatureTable) (*FeastFeature, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.buildFeastFeatures")
defer span.Finish()
var columns []string
for _, entity := range config.Entities {
columns = append(columns, entity.Name)
}
for _, feature := range config.Features {
columns = append(columns, feature.Name)
}
var data [][]interface{}
status := feastResponse.Statuses()
for i, feastRow := range feastResponse.Rows() {
var row []interface{}
for _, column := range columns {
featureStatus := status[i][column]
switch featureStatus {
case serving.GetOnlineFeaturesResponse_PRESENT:
rawValue := feastRow[column]
featVal, err := getFeatureValue(rawValue)
if err != nil {
return nil, err
}
row = append(row, featVal)
// put behind feature toggle since it will generate high cardinality metrics
if t.options.ValueMonitoringEnabled {
v, err := getFloatValue(featVal)
if err != nil {
continue
}
feastFeatureSummary.WithLabelValues(column).Observe(v)
}
case serving.GetOnlineFeaturesResponse_NOT_FOUND, serving.GetOnlineFeaturesResponse_NULL_VALUE, serving.GetOnlineFeaturesResponse_OUTSIDE_MAX_AGE:
defVal, ok := t.defaultValues[column]
if !ok {
row = append(row, nil)
continue
}
featVal, err := getFeatureValue(defVal)
if err != nil {
return nil, err
}
row = append(row, featVal)
default:
return nil, fmt.Errorf("Unsupported feature retrieval status: %s", featureStatus)
}
// put behind feature toggle since it will generate high cardinality metrics
if t.options.StatusMonitoringEnabled {
feastFeatureStatus.WithLabelValues(column, featureStatus.String()).Inc()
}
}
data = append(data, row)
}
return &FeastFeature{
Columns: columns,
Data: data,
}, nil
}
func | getFloatValue | identifier_name | |
transformer.go | Help: "Feature status by feature",
}, []string{"feature", "status"})
feastFeatureSummary = promauto.NewSummaryVec(prometheus.SummaryOpts{
Namespace: transformer.PromNamespace,
Name: "feast_feature_value",
Help: "Summary of feature value",
AgeBuckets: 1,
}, []string{"feature"})
)
// Options for the Feast transformer.
type Options struct {
ServingURL string `envconfig:"FEAST_SERVING_URL" required:"true"`
StatusMonitoringEnabled bool `envconfig:"FEAST_FEATURE_STATUS_MONITORING_ENABLED" default:"false"`
ValueMonitoringEnabled bool `envconfig:"FEAST_FEATURE_VALUE_MONITORING_ENABLED" default:"false"`
}
// Transformer wraps feast serving client to retrieve features.
type Transformer struct {
feastClient feast.Client
config *transformer.StandardTransformerConfig
logger *zap.Logger
options *Options
defaultValues map[string]*types.Value
compiledJsonPath map[string]*jsonpath.Compiled
compiledUdf map[string]*vm.Program
}
// NewTransformer initializes a new Transformer.
func NewTransformer(feastClient feast.Client, config *transformer.StandardTransformerConfig, options *Options, logger *zap.Logger) (*Transformer, error) {
defaultValues := make(map[string]*types.Value)
// populate default values
for _, ft := range config.TransformerConfig.Feast {
for _, f := range ft.Features {
if len(f.DefaultValue) != 0 {
feastValType := types.ValueType_Enum(types.ValueType_Enum_value[f.ValueType])
defVal, err := getValue(f.DefaultValue, feastValType)
if err != nil {
logger.Warn(fmt.Sprintf("invalid default value for %s : %v, %v", f.Name, f.DefaultValue, err))
continue
}
defaultValues[f.Name] = defVal
}
}
}
compiledJsonPath := make(map[string]*jsonpath.Compiled)
compiledUdf := make(map[string]*vm.Program)
for _, ft := range config.TransformerConfig.Feast {
for _, configEntity := range ft.Entities {
switch configEntity.Extractor.(type) {
case *transformer.Entity_JsonPath:
c, err := jsonpath.Compile(configEntity.GetJsonPath())
if err != nil {
return nil, fmt.Errorf("unable to compile jsonpath for entity %s: %s", configEntity.Name, configEntity.GetJsonPath())
}
compiledJsonPath[configEntity.GetJsonPath()] = c
case *transformer.Entity_Udf:
c, err := expr.Compile(configEntity.GetUdf(), expr.Env(UdfEnv{}))
if err != nil {
return nil, err
}
compiledUdf[configEntity.GetUdf()] = c
}
}
}
return &Transformer{
feastClient: feastClient,
config: config,
options: options,
logger: logger,
defaultValues: defaultValues,
compiledJsonPath: compiledJsonPath,
compiledUdf: compiledUdf,
}, nil
}
type FeastFeature struct {
Columns []string `json:"columns"`
Data [][]interface{} `json:"data"`
}
type result struct {
tableName string
feastFeature *FeastFeature
err error
}
// Transform retrieves the Feast features values and add them into the request.
func (t *Transformer) Transform(ctx context.Context, request []byte) ([]byte, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.Transform")
defer span.Finish()
feastFeatures := make(map[string]*FeastFeature, len(t.config.TransformerConfig.Feast))
// parallelize feast call per feature table
resChan := make(chan result, len(t.config.TransformerConfig.Feast))
for _, config := range t.config.TransformerConfig.Feast {
go func(cfg *transformer.FeatureTable) {
tableName := createTableName(cfg.Entities)
val, err := t.getFeastFeature(ctx, tableName, request, cfg)
resChan <- result{tableName, val, err}
}(config)
}
// collect result
for i := 0; i < cap(resChan); i++ {
res := <-resChan
if res.err != nil {
return nil, res.err
}
feastFeatures[res.tableName] = res.feastFeature
}
out, err := enrichRequest(ctx, request, feastFeatures)
if err != nil {
return nil, err
}
return out, err
}
func (t *Transformer) getFeastFeature(ctx context.Context, tableName string, request []byte, config *transformer.FeatureTable) (*FeastFeature, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.getFeastFeature")
span.SetTag("table.name", tableName)
defer span.Finish()
entities, err := t.buildEntitiesRequest(ctx, request, config.Entities)
if err != nil {
return nil, err
}
var features []string
for _, feature := range config.Features {
features = append(features, feature.Name)
}
feastRequest := feast.OnlineFeaturesRequest{
Project: config.Project,
Entities: entities,
Features: features,
}
t.logger.Debug("feast_request", zap.Any("feast_request", feastRequest))
startTime := time.Now()
feastResponse, err := t.feastClient.GetOnlineFeatures(ctx, &feastRequest)
durationMs := time.Now().Sub(startTime).Milliseconds()
if err != nil {
feastLatency.WithLabelValues("error").Observe(float64(durationMs))
feastError.Inc()
return nil, err
}
feastLatency.WithLabelValues("success").Observe(float64(durationMs))
t.logger.Debug("feast_response", zap.Any("feast_response", feastResponse.Rows()))
feastFeature, err := t.buildFeastFeatures(ctx, feastResponse, config)
if err != nil {
return nil, err
}
return feastFeature, nil
}
func (t *Transformer) buildEntitiesRequest(ctx context.Context, request []byte, configEntities []*transformer.Entity) ([]feast.Row, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.buildEntitiesRequest")
defer span.Finish()
var entities []feast.Row
var nodesBody interface{}
err := json.Unmarshal(request, &nodesBody)
if err != nil {
return nil, err
}
for _, configEntity := range configEntities {
switch configEntity.Extractor.(type) {
case *transformer.Entity_JsonPath:
_, ok := t.compiledJsonPath[configEntity.GetJsonPath()]
if !ok {
c, err := jsonpath.Compile(configEntity.GetJsonPath())
if err != nil {
return nil, fmt.Errorf("unable to compile jsonpath for entity %s: %s", configEntity.Name, configEntity.GetJsonPath())
}
t.compiledJsonPath[configEntity.GetJsonPath()] = c
}
}
vals, err := getValuesFromJSONPayload(nodesBody, configEntity, t.compiledJsonPath[configEntity.GetJsonPath()], t.compiledUdf[configEntity.GetUdf()])
if err != nil {
return nil, fmt.Errorf("unable to extract entity %s: %v", configEntity.Name, err)
}
if len(entities) == 0 {
for _, val := range vals {
entities = append(entities, feast.Row{
configEntity.Name: val,
})
}
} else {
newEntities := []feast.Row{}
for _, entity := range entities {
for _, val := range vals {
newFeastRow := feast.Row{}
for k, v := range entity {
newFeastRow[k] = v
}
newFeastRow[configEntity.Name] = val
newEntities = append(newEntities, newFeastRow)
}
}
entities = newEntities
}
}
return entities, nil
}
func (t *Transformer) buildFeastFeatures(ctx context.Context, feastResponse *feast.OnlineFeaturesResponse, config *transformer.FeatureTable) (*FeastFeature, error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "feast.buildFeastFeatures")
defer span.Finish()
var columns []string
for _, entity := range config.Entities {
columns = append(columns, entity.Name)
}
for _, feature := range config.Features {
columns = append(columns, feature.Name)
}
var data [][]interface{}
status := feastResponse.Statuses()
for i, feastRow := range feastResponse.Rows() | {
var row []interface{}
for _, column := range columns {
featureStatus := status[i][column]
switch featureStatus {
case serving.GetOnlineFeaturesResponse_PRESENT:
rawValue := feastRow[column]
featVal, err := getFeatureValue(rawValue)
if err != nil {
return nil, err
}
row = append(row, featVal)
// put behind feature toggle since it will generate high cardinality metrics
if t.options.ValueMonitoringEnabled {
v, err := getFloatValue(featVal)
if err != nil {
continue
}
feastFeatureSummary.WithLabelValues(column).Observe(v) | conditional_block | |
start.py | , anchor_ratios)
# for inference , the batch size is 1, the model output shape is [1, N, 4],
# so we expand dim for anchors to [1, anchor_num, 4]
anchors_exp = np.expand_dims(anchors, axis=0)
id2class = {0: 'Mask', 1: 'NoMask'}
# 人脸对齐方法
def inference(image,
conf_thresh=0.5,
iou_thresh=0.4,
target_shape=(160, 160),
draw_result=True,
show_result=True
):
'''
Main function of detection inference
:param image: 3D numpy array of image
:param conf_thresh: the min threshold of classification probabity.
:param iou_thresh: the IOU threshold of NMS
:param target_shape: the model input size.
:param draw_result: whether to daw bounding box to the image.
:param show_result: whether to display the image.
:return:
'''
output_info = []
height, width, _ = image.shape
image_resized = cv2.resize(image, target_shape)
image_np = image_resized / 255.0 # 归一化到0~1
image_exp = np.expand_dims(image_np, axis=0)
# 输出回归框和人脸得分
y_bboxes_output, y_cls_output = tf_inference(sess, graph, image_exp)
# remove the batch dimension, for batch is always 1 for inference.
y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]
y_cls = y_cls_output[0]
# To speed up, do single class NMS, not multiple classes NMS.
bbox_max_scores = np.max(y_cls, axis=1)
bbox_max_score_classes = np.argmax(y_cls, axis=1)
# 对单个人脸进行非极大抑制
keep_idxs = single_class_non_max_suppression(y_bboxes,
bbox_max_scores,
conf_thresh=conf_thresh,
iou_thresh=iou_thresh,
)
for idx in keep_idxs:
conf = float(bbox_max_scores[idx])
class_id = bbox_max_score_classes[idx]
bbox = y_bboxes[idx]
# clip the coordinate, avoid the value exceed the image boundary.
xmin = max(0, int(bbox[0] * width))
ymin = max(0, int(bbox[1] * height))
xmax = min(int(bbox[2] * width), width)
ymax = min(int(bbox[3] * height), height)
if draw_result:
if class_id == 0:
color = (0, 255, 0)
else:
color = (255, 0, 0)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2)
cv2.putText(image, "%s: %.2f" % (id2class[class_id], conf), (xmin + 2, ymin - 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, color)
output_info.append([class_id, conf, xmin, ymin, xmax, ymax])
print(output_info)
if show_result:
Image.fromarray(image).show()
return output_info
def main():
global colours, img_size
args = parse_args()
videos_dir = args.video | for filename in os.listdir(videos_dir):
logger.info('All files:{}'.format(filename))
for filename in os.listdir(videos_dir):
suffix = filename.split('.')[1]
if suffix != 'mp4' and suffix != 'avi': # you can specify more video formats if you need
continue
video_name = os.path.join(videos_dir, filename)
directoryname = os.path.join(output_path, filename.split('.')[0])
logger.info('Video_name:{}'.format(video_name))
cam = cv2.VideoCapture(video_name)
c = 0
while True:
final_faces = []
addtional_attribute_list = []
ret, frame = cam.read()
if not ret:
logger.warning("ret false")
break
if frame is None:
logger.warning("frame drop")
break
frame = cv2.resize(frame, (0, 0), fx=scale_rate, fy=scale_rate)
r_g_b_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# 间隔取帧,默认每帧都取
# if c % detect_interval == 0:
# img_size = np.asarray(frame.shape)[0:2]
# faces = inference(r_g_b_frame, show_result=True, target_shape=(260, 260))
with tf.Graph().as_default():
with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
log_device_placement=False)) as sess:
pnet, rnet, onet = detect_face.create_mtcnn(sess, os.path.join(project_dir, "align"))
minsize = 40 # minimum size of face for mtcnn to detect
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
for filename in os.listdir(videos_dir):
logger.info('All files:{}'.format(filename))
for filename in os.listdir(videos_dir):
suffix = filename.split('.')[1]
if suffix != 'mp4' and suffix != 'avi': # you can specify more video formats if you need
continue
video_name = os.path.join(videos_dir, filename)
directoryname = os.path.join(output_path, filename.split('.')[0])
logger.info('Video_name:{}'.format(video_name))
cam = cv2.VideoCapture(video_name)
c = 0
while True:
final_faces = []
addtional_attribute_list = []
ret, frame = cam.read()
if not ret:
logger.warning("ret false")
break
if frame is None:
logger.warning("frame drop")
break
frame = cv2.resize(frame, (0, 0), fx=scale_rate, fy=scale_rate)
r_g_b_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if c % detect_interval == 0:
img_size = np.asarray(frame.shape)[0:2]
mtcnn_starttime = time()
faces, points = detect_face.detect_face(r_g_b_frame, minsize, pnet, rnet, onet, threshold,
factor)
logger.info("MTCNN detect face cost time : {} s".format(
round(time() - mtcnn_starttime, 3))) # mtcnn detect ,slow
print('testttttt')
print(type(faces))
face_sums = faces.shape[0]
if face_sums > 0:
face_list = []
for i, item in enumerate(faces):
print(item)
score = round(faces[i, 4], 6)
if score > face_score_threshold:
det = np.squeeze(faces[i, 0:4])
# face rectangle
det[0] = np.maximum(det[0] - margin, 0)
det[1] = np.maximum(det[1] - margin, 0)
det[2] = np.minimum(det[2] + margin, img_size[1])
det[3] = np.minimum(det[3] + margin, img_size[0])
face_list.append(item)
# face cropped
bb = np.array(det, dtype=np.int32)
# use 5 face landmarks to judge the face is front or side
squeeze_points = np.squeeze(points[:, i])
tolist = squeeze_points.tolist()
facial_landmarks = []
for j in range(5):
item = [tolist[j], tolist[(j + 5)]]
facial_landmarks.append(item)
if args.face_landmarks:
for (x, y) in facial_landmarks:
cv2.circle(frame, (int(x), int(y)), 3, (0, 255, 0), -1)
cropped = frame[bb[1]:bb[3], bb[0]:bb[2], :].copy()
dist_rate, high_ratio_variance, width_rate = judge_side_face(
np.array(facial_landmarks))
# face addtional attribute(index 0:face score; index 1:0 represents front face and 1 for side face )
item_list = [cropped, score | s_dir
output_path = args.output_path
no_display = args.no_display
detect_interval = args.detect_interval # 间隔一帧检测一次
margin = args.margin # 脸边距(默认10)
scale_rate = args.scale_rate # 检测图像的尺寸设置
show_rate = args.show_rate # 展示图像的尺寸设置
face_score_threshold = args.face_score_threshold # 人脸判别阈值
mkdir(output_path)
# for display
if not no_display:
colours = np.random.rand(32, 3)
# 初始化追踪器
tracker = Sort() # create instance of the SORT tracker
logger.info('Start track and extract......')
# 影像处理 | identifier_body |
start.py | _sizes, anchor_ratios)
# for inference , the batch size is 1, the model output shape is [1, N, 4],
# so we expand dim for anchors to [1, anchor_num, 4]
anchors_exp = np.expand_dims(anchors, axis=0)
id2class = {0: 'Mask', 1: 'NoMask'}
# 人脸对齐方法
def inference(image,
conf_thresh=0.5,
iou_thresh=0.4,
target_shape=(160, 160),
draw_result=True, | :param image: 3D numpy array of image
:param conf_thresh: the min threshold of classification probabity.
:param iou_thresh: the IOU threshold of NMS
:param target_shape: the model input size.
:param draw_result: whether to daw bounding box to the image.
:param show_result: whether to display the image.
:return:
'''
output_info = []
height, width, _ = image.shape
image_resized = cv2.resize(image, target_shape)
image_np = image_resized / 255.0 # 归一化到0~1
image_exp = np.expand_dims(image_np, axis=0)
# 输出回归框和人脸得分
y_bboxes_output, y_cls_output = tf_inference(sess, graph, image_exp)
# remove the batch dimension, for batch is always 1 for inference.
y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]
y_cls = y_cls_output[0]
# To speed up, do single class NMS, not multiple classes NMS.
bbox_max_scores = np.max(y_cls, axis=1)
bbox_max_score_classes = np.argmax(y_cls, axis=1)
# 对单个人脸进行非极大抑制
keep_idxs = single_class_non_max_suppression(y_bboxes,
bbox_max_scores,
conf_thresh=conf_thresh,
iou_thresh=iou_thresh,
)
for idx in keep_idxs:
conf = float(bbox_max_scores[idx])
class_id = bbox_max_score_classes[idx]
bbox = y_bboxes[idx]
# clip the coordinate, avoid the value exceed the image boundary.
xmin = max(0, int(bbox[0] * width))
ymin = max(0, int(bbox[1] * height))
xmax = min(int(bbox[2] * width), width)
ymax = min(int(bbox[3] * height), height)
if draw_result:
if class_id == 0:
color = (0, 255, 0)
else:
color = (255, 0, 0)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2)
cv2.putText(image, "%s: %.2f" % (id2class[class_id], conf), (xmin + 2, ymin - 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, color)
output_info.append([class_id, conf, xmin, ymin, xmax, ymax])
print(output_info)
if show_result:
Image.fromarray(image).show()
return output_info
def main():
global colours, img_size
args = parse_args()
videos_dir = args.videos_dir
output_path = args.output_path
no_display = args.no_display
detect_interval = args.detect_interval # 间隔一帧检测一次
margin = args.margin # 脸边距(默认10)
scale_rate = args.scale_rate # 检测图像的尺寸设置
show_rate = args.show_rate # 展示图像的尺寸设置
face_score_threshold = args.face_score_threshold # 人脸判别阈值
mkdir(output_path)
# for display
if not no_display:
colours = np.random.rand(32, 3)
# 初始化追踪器
tracker = Sort() # create instance of the SORT tracker
logger.info('Start track and extract......')
# 影像处理
for filename in os.listdir(videos_dir):
logger.info('All files:{}'.format(filename))
for filename in os.listdir(videos_dir):
suffix = filename.split('.')[1]
if suffix != 'mp4' and suffix != 'avi': # you can specify more video formats if you need
continue
video_name = os.path.join(videos_dir, filename)
directoryname = os.path.join(output_path, filename.split('.')[0])
logger.info('Video_name:{}'.format(video_name))
cam = cv2.VideoCapture(video_name)
c = 0
while True:
final_faces = []
addtional_attribute_list = []
ret, frame = cam.read()
if not ret:
logger.warning("ret false")
break
if frame is None:
logger.warning("frame drop")
break
frame = cv2.resize(frame, (0, 0), fx=scale_rate, fy=scale_rate)
r_g_b_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# 间隔取帧,默认每帧都取
# if c % detect_interval == 0:
# img_size = np.asarray(frame.shape)[0:2]
# faces = inference(r_g_b_frame, show_result=True, target_shape=(260, 260))
with tf.Graph().as_default():
with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
log_device_placement=False)) as sess:
pnet, rnet, onet = detect_face.create_mtcnn(sess, os.path.join(project_dir, "align"))
minsize = 40 # minimum size of face for mtcnn to detect
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
for filename in os.listdir(videos_dir):
logger.info('All files:{}'.format(filename))
for filename in os.listdir(videos_dir):
suffix = filename.split('.')[1]
if suffix != 'mp4' and suffix != 'avi': # you can specify more video formats if you need
continue
video_name = os.path.join(videos_dir, filename)
directoryname = os.path.join(output_path, filename.split('.')[0])
logger.info('Video_name:{}'.format(video_name))
cam = cv2.VideoCapture(video_name)
c = 0
while True:
final_faces = []
addtional_attribute_list = []
ret, frame = cam.read()
if not ret:
logger.warning("ret false")
break
if frame is None:
logger.warning("frame drop")
break
frame = cv2.resize(frame, (0, 0), fx=scale_rate, fy=scale_rate)
r_g_b_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if c % detect_interval == 0:
img_size = np.asarray(frame.shape)[0:2]
mtcnn_starttime = time()
faces, points = detect_face.detect_face(r_g_b_frame, minsize, pnet, rnet, onet, threshold,
factor)
logger.info("MTCNN detect face cost time : {} s".format(
round(time() - mtcnn_starttime, 3))) # mtcnn detect ,slow
print('testttttt')
print(type(faces))
face_sums = faces.shape[0]
if face_sums > 0:
face_list = []
for i, item in enumerate(faces):
print(item)
score = round(faces[i, 4], 6)
if score > face_score_threshold:
det = np.squeeze(faces[i, 0:4])
# face rectangle
det[0] = np.maximum(det[0] - margin, 0)
det[1] = np.maximum(det[1] - margin, 0)
det[2] = np.minimum(det[2] + margin, img_size[1])
det[3] = np.minimum(det[3] + margin, img_size[0])
face_list.append(item)
# face cropped
bb = np.array(det, dtype=np.int32)
# use 5 face landmarks to judge the face is front or side
squeeze_points = np.squeeze(points[:, i])
tolist = squeeze_points.tolist()
facial_landmarks = []
for j in range(5):
item = [tolist[j], tolist[(j + 5)]]
facial_landmarks.append(item)
if args.face_landmarks:
for (x, y) in facial_landmarks:
cv2.circle(frame, (int(x), int(y)), 3, (0, 255, 0), -1)
cropped = frame[bb[1]:bb[3], bb[0]:bb[2], :].copy()
dist_rate, high_ratio_variance, width_rate = judge_side_face(
np.array(facial_landmarks))
# face addtional attribute(index 0:face score; index 1:0 represents front face and 1 for side face )
item_list = [cropped, | show_result=True
):
'''
Main function of detection inference | random_line_split |
start.py | anchor_ratios)
# for inference , the batch size is 1, the model output shape is [1, N, 4],
# so we expand dim for anchors to [1, anchor_num, 4]
anchors_exp = np.expand_dims(anchors, axis=0)
id2class = {0: 'Mask', 1: 'NoMask'}
# 人脸对齐方法
def inference(image,
| nf_thresh=0.5,
iou_thresh=0.4,
target_shape=(160, 160),
draw_result=True,
show_result=True
):
'''
Main function of detection inference
:param image: 3D numpy array of image
:param conf_thresh: the min threshold of classification probabity.
:param iou_thresh: the IOU threshold of NMS
:param target_shape: the model input size.
:param draw_result: whether to daw bounding box to the image.
:param show_result: whether to display the image.
:return:
'''
output_info = []
height, width, _ = image.shape
image_resized = cv2.resize(image, target_shape)
image_np = image_resized / 255.0 # 归一化到0~1
image_exp = np.expand_dims(image_np, axis=0)
# 输出回归框和人脸得分
y_bboxes_output, y_cls_output = tf_inference(sess, graph, image_exp)
# remove the batch dimension, for batch is always 1 for inference.
y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]
y_cls = y_cls_output[0]
# To speed up, do single class NMS, not multiple classes NMS.
bbox_max_scores = np.max(y_cls, axis=1)
bbox_max_score_classes = np.argmax(y_cls, axis=1)
# 对单个人脸进行非极大抑制
keep_idxs = single_class_non_max_suppression(y_bboxes,
bbox_max_scores,
conf_thresh=conf_thresh,
iou_thresh=iou_thresh,
)
for idx in keep_idxs:
conf = float(bbox_max_scores[idx])
class_id = bbox_max_score_classes[idx]
bbox = y_bboxes[idx]
# clip the coordinate, avoid the value exceed the image boundary.
xmin = max(0, int(bbox[0] * width))
ymin = max(0, int(bbox[1] * height))
xmax = min(int(bbox[2] * width), width)
ymax = min(int(bbox[3] * height), height)
if draw_result:
if class_id == 0:
color = (0, 255, 0)
else:
color = (255, 0, 0)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2)
cv2.putText(image, "%s: %.2f" % (id2class[class_id], conf), (xmin + 2, ymin - 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, color)
output_info.append([class_id, conf, xmin, ymin, xmax, ymax])
print(output_info)
if show_result:
Image.fromarray(image).show()
return output_info
def main():
global colours, img_size
args = parse_args()
videos_dir = args.videos_dir
output_path = args.output_path
no_display = args.no_display
detect_interval = args.detect_interval # 间隔一帧检测一次
margin = args.margin # 脸边距(默认10)
scale_rate = args.scale_rate # 检测图像的尺寸设置
show_rate = args.show_rate # 展示图像的尺寸设置
face_score_threshold = args.face_score_threshold # 人脸判别阈值
mkdir(output_path)
# for display
if not no_display:
colours = np.random.rand(32, 3)
# 初始化追踪器
tracker = Sort() # create instance of the SORT tracker
logger.info('Start track and extract......')
# 影像处理
for filename in os.listdir(videos_dir):
logger.info('All files:{}'.format(filename))
for filename in os.listdir(videos_dir):
suffix = filename.split('.')[1]
if suffix != 'mp4' and suffix != 'avi': # you can specify more video formats if you need
continue
video_name = os.path.join(videos_dir, filename)
directoryname = os.path.join(output_path, filename.split('.')[0])
logger.info('Video_name:{}'.format(video_name))
cam = cv2.VideoCapture(video_name)
c = 0
while True:
final_faces = []
addtional_attribute_list = []
ret, frame = cam.read()
if not ret:
logger.warning("ret false")
break
if frame is None:
logger.warning("frame drop")
break
frame = cv2.resize(frame, (0, 0), fx=scale_rate, fy=scale_rate)
r_g_b_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# 间隔取帧,默认每帧都取
# if c % detect_interval == 0:
# img_size = np.asarray(frame.shape)[0:2]
# faces = inference(r_g_b_frame, show_result=True, target_shape=(260, 260))
with tf.Graph().as_default():
with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
log_device_placement=False)) as sess:
pnet, rnet, onet = detect_face.create_mtcnn(sess, os.path.join(project_dir, "align"))
minsize = 40 # minimum size of face for mtcnn to detect
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
for filename in os.listdir(videos_dir):
logger.info('All files:{}'.format(filename))
for filename in os.listdir(videos_dir):
suffix = filename.split('.')[1]
if suffix != 'mp4' and suffix != 'avi': # you can specify more video formats if you need
continue
video_name = os.path.join(videos_dir, filename)
directoryname = os.path.join(output_path, filename.split('.')[0])
logger.info('Video_name:{}'.format(video_name))
cam = cv2.VideoCapture(video_name)
c = 0
while True:
final_faces = []
addtional_attribute_list = []
ret, frame = cam.read()
if not ret:
logger.warning("ret false")
break
if frame is None:
logger.warning("frame drop")
break
frame = cv2.resize(frame, (0, 0), fx=scale_rate, fy=scale_rate)
r_g_b_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if c % detect_interval == 0:
img_size = np.asarray(frame.shape)[0:2]
mtcnn_starttime = time()
faces, points = detect_face.detect_face(r_g_b_frame, minsize, pnet, rnet, onet, threshold,
factor)
logger.info("MTCNN detect face cost time : {} s".format(
round(time() - mtcnn_starttime, 3))) # mtcnn detect ,slow
print('testttttt')
print(type(faces))
face_sums = faces.shape[0]
if face_sums > 0:
face_list = []
for i, item in enumerate(faces):
print(item)
score = round(faces[i, 4], 6)
if score > face_score_threshold:
det = np.squeeze(faces[i, 0:4])
# face rectangle
det[0] = np.maximum(det[0] - margin, 0)
det[1] = np.maximum(det[1] - margin, 0)
det[2] = np.minimum(det[2] + margin, img_size[1])
det[3] = np.minimum(det[3] + margin, img_size[0])
face_list.append(item)
# face cropped
bb = np.array(det, dtype=np.int32)
# use 5 face landmarks to judge the face is front or side
squeeze_points = np.squeeze(points[:, i])
tolist = squeeze_points.tolist()
facial_landmarks = []
for j in range(5):
item = [tolist[j], tolist[(j + 5)]]
facial_landmarks.append(item)
if args.face_landmarks:
for (x, y) in facial_landmarks:
cv2.circle(frame, (int(x), int(y)), 3, (0, 255, 0), -1)
cropped = frame[bb[1]:bb[3], bb[0]:bb[2], :].copy()
dist_rate, high_ratio_variance, width_rate = judge_side_face(
np.array(facial_landmarks))
# face addtional attribute(index 0:face score; index 1:0 represents front face and 1 for side face )
item_list = [cropped | co | identifier_name |
start.py | anchor_ratios)
# for inference , the batch size is 1, the model output shape is [1, N, 4],
# so we expand dim for anchors to [1, anchor_num, 4]
anchors_exp = np.expand_dims(anchors, axis=0)
id2class = {0: 'Mask', 1: 'NoMask'}
# 人脸对齐方法
def inference(image,
conf_thresh=0.5,
iou_thresh=0.4,
target_shape=(160, 160),
draw_result=True,
show_result=True
):
'''
Main function of detection inference
:param image: 3D numpy array of image
:param conf_thresh: the min threshold of classification probabity.
:param iou_thresh: the IOU threshold of NMS
:param target_shape: the model input size.
:param draw_result: whether to daw bounding box to the image.
:param show_result: whether to display the image.
:return:
'''
output_info = []
height, width, _ = image.shape
image_resized = cv2.resize(image, target_shape)
image_np = image_resized / 255.0 # 归一化到0~1
image_exp = np.expand_dims(image_np, axis=0)
# 输出回归框和人脸得分
y_bboxes_output, y_cls_output = tf_inference(sess, graph, image_exp)
# remove the batch dimension, for batch is always 1 for inference.
y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]
y_cls = y_cls_output[0]
# To speed up, do single class NMS, not multiple classes NMS.
bbox_max_scores = np.max(y_cls, axis=1)
bbox_max_score_classes = np.argmax(y_cls, axis=1)
# 对单个人脸进行非极大抑制
keep_idxs = single_class_non_max_suppression(y_bboxes,
bbox_max_scores,
conf_thresh=conf_thresh,
iou_thresh=iou_thresh,
)
for idx in keep_idxs:
conf = float(bbox_max_scores[idx])
class_id = bbox_max_score_classes[idx]
bbox = y_bboxes[idx]
# clip the coordinate, avoid the value exceed the image boundary.
xmin = max(0, int(bbox[0] * width))
ymin = max(0, int(bbox[1] * height))
xmax = min(int(bbox[2] * width), width)
ymax = min(int(bbox[3] * height), height)
if draw_result:
if class_id == 0:
color = (0, 255, 0)
else:
color = (255, 0, 0)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2)
cv2.putText(image, "%s: %.2f" % (id2class[class_id], conf), (xmin + 2, ymin - 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, color)
output_info.append([class_id, conf, xmin, ymin, xmax, ymax])
print(output_info)
if show_result:
Image.fromarray(image).show()
return output_info
def main():
global colours, img_size
args = parse_args()
videos_dir = args.videos_dir
output_path = args.output_path
no_display = args.no_display
detect_interval = args.detect_interval # 间隔一帧检测一次
margin = args.margin # 脸边距(默认10)
scale_rate = args.scale_rate # 检测图像的尺寸设置
show_rate = args.show_rate # 展示图像的尺寸设置
face_score_threshold = args.face_score_threshold # 人脸判别阈值
mkdir(output_path)
# for display
if not no_display:
colours = np.random.rand(32, 3)
# 初始化追踪器
tracker = Sort() # create instance of the SORT tracker
logger.info('Start track and extract......')
# 影像处理
for filename in os.listdir(videos_dir):
logger.info('All files:{}'.format(filename))
for filename in os.listdir(videos_dir):
suffix = filename.split('.')[1]
if suffix != 'mp4' and suffix != 'avi': # you can specify more video formats if you need
continue
video_name = os.path.join(videos_dir, filename)
directoryname = os.path.join(output_path, filename.split('.')[0])
logger.info('Video_name:{}'.format(video_name))
cam = cv2.VideoCapture(video_name)
c = 0
while True:
final_faces = []
addtional_attribute_list = []
ret, frame = cam.read()
if not ret:
logger.warning("ret false")
break
if frame is None:
logger.warning("frame drop")
break
frame = cv2.resize(frame, (0, 0), fx=scale_rate, fy=scale_rate)
r_g_b_frame = cv2.cvtColor(frame, | # if c % detect_interval == 0:
# img_size = np.asarray(frame.shape)[0:2]
# faces = inference(r_g_b_frame, show_result=True, target_shape=(260, 260))
with tf.Graph().as_default():
with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
log_device_placement=False)) as sess:
pnet, rnet, onet = detect_face.create_mtcnn(sess, os.path.join(project_dir, "align"))
minsize = 40 # minimum size of face for mtcnn to detect
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
for filename in os.listdir(videos_dir):
logger.info('All files:{}'.format(filename))
for filename in os.listdir(videos_dir):
suffix = filename.split('.')[1]
if suffix != 'mp4' and suffix != 'avi': # you can specify more video formats if you need
continue
video_name = os.path.join(videos_dir, filename)
directoryname = os.path.join(output_path, filename.split('.')[0])
logger.info('Video_name:{}'.format(video_name))
cam = cv2.VideoCapture(video_name)
c = 0
while True:
final_faces = []
addtional_attribute_list = []
ret, frame = cam.read()
if not ret:
logger.warning("ret false")
break
if frame is None:
logger.warning("frame drop")
break
frame = cv2.resize(frame, (0, 0), fx=scale_rate, fy=scale_rate)
r_g_b_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if c % detect_interval == 0:
img_size = np.asarray(frame.shape)[0:2]
mtcnn_starttime = time()
faces, points = detect_face.detect_face(r_g_b_frame, minsize, pnet, rnet, onet, threshold,
factor)
logger.info("MTCNN detect face cost time : {} s".format(
round(time() - mtcnn_starttime, 3))) # mtcnn detect ,slow
print('testttttt')
print(type(faces))
face_sums = faces.shape[0]
if face_sums > 0:
face_list = []
for i, item in enumerate(faces):
print(item)
score = round(faces[i, 4], 6)
if score > face_score_threshold:
det = np.squeeze(faces[i, 0:4])
# face rectangle
det[0] = np.maximum(det[0] - margin, 0)
det[1] = np.maximum(det[1] - margin, 0)
det[2] = np.minimum(det[2] + margin, img_size[1])
det[3] = np.minimum(det[3] + margin, img_size[0])
face_list.append(item)
# face cropped
bb = np.array(det, dtype=np.int32)
# use 5 face landmarks to judge the face is front or side
squeeze_points = np.squeeze(points[:, i])
tolist = squeeze_points.tolist()
facial_landmarks = []
for j in range(5):
item = [tolist[j], tolist[(j + 5)]]
facial_landmarks.append(item)
if args.face_landmarks:
for (x, y) in facial_landmarks:
cv2.circle(frame, (int(x), int(y)), 3, (0, 255, 0), -1)
cropped = frame[bb[1]:bb[3], bb[0]:bb[2], :].copy()
dist_rate, high_ratio_variance, width_rate = judge_side_face(
np.array(facial_landmarks))
# face addtional attribute(index 0:face score; index 1:0 represents front face and 1 for side face )
item_list = [cropped | cv2.COLOR_BGR2RGB)
# 间隔取帧,默认每帧都取
| conditional_block |
h5t.go | _t dtype_id, size_tsize )
func (t *DataType) SetSize(sz int) error {
err := C.H5Tset_size(t.id, C.size_t(sz))
return togo_err(err)
}
// ---------------------------------------------------------------------------
// array data type
type ArrayType struct {
DataType
}
func new_array_type(id C.hid_t) *ArrayType {
t := &ArrayType{DataType{id: id}}
//runtime.SetFinalizer(t, (*DataType).h5t_finalizer)
return t
}
func NewArrayType(base_type *DataType, dims []int) (*ArrayType, error) {
ndims := C.uint(len(dims))
c_dims := (*C.hsize_t)(unsafe.Pointer(&dims[0]))
hid := C.H5Tarray_create2(base_type.id, ndims, c_dims)
err := togo_err(C.herr_t(int(hid)))
if err != nil {
return nil, err
}
t := new_array_type(hid)
return t, err
}
// Returns the rank of an array datatype.
// int H5Tget_array_ndims( hid_t adtype_id )
func (t *ArrayType) NDims() int {
return int(C.H5Tget_array_ndims(t.id))
}
// Retrieves sizes of array dimensions.
// int H5Tget_array_dims2( hid_t adtype_id, hsize_t dims[] )
func (t *ArrayType) ArrayDims() []int {
rank := t.NDims()
dims := make([]int, rank)
// fixme: int/hsize_t size!
c_dims := (*C.hsize_t)(unsafe.Pointer(&dims[0]))
c_rank := int(C.H5Tget_array_dims2(t.id, c_dims))
if c_rank == rank {
return dims
}
return nil
}
// ---------------------------------------------------------------------------
// variable length array data type
type VarLenType struct {
DataType
}
func NewVarLenType(base_type *DataType) (*VarLenType, error) {
hid := C.H5Tvlen_create(base_type.id)
err := togo_err(C.herr_t(int(hid)))
if err != nil {
return nil, err
}
dt := new_vltype(hid)
return dt, err
}
func new_vltype(id C.hid_t) *VarLenType {
t := &VarLenType{DataType{id: id}}
//runtime.SetFinalizer(t, (*DataType).h5t_finalizer)
return t
}
// Determines whether datatype is a variable-length string.
// htri_t H5Tis_variable_str( hid_t dtype_id )
func (vl *VarLenType) IsVariableStr() bool {
o := int(C.H5Tis_variable_str(vl.id))
if o > 0 {
return true
}
return false
}
// ---------------------------------------------------------------------------
// compound data type
type CompType struct {
DataType
}
// Retrieves the number of elements in a compound or enumeration datatype.
// int H5Tget_nmembers( hid_t dtype_id )
func (t *CompType) NMembers() int {
return int(C.H5Tget_nmembers(t.id))
}
// Returns datatype class of compound datatype member.
// H5T_class_t H5Tget_member_class( hid_t cdtype_id, unsigned member_no )
func (t *CompType) MemberClass(mbr_idx int) TypeClass {
return TypeClass(C.H5Tget_member_class(t.id, C.uint(mbr_idx)))
}
// Retrieves the name of a compound or enumeration datatype member.
// char * H5Tget_member_name( hid_t dtype_id, unsigned field_idx )
func (t *CompType) MemberName(mbr_idx int) string {
c_name := C.H5Tget_member_name(t.id, C.uint(mbr_idx))
return C.GoString(c_name)
}
// Retrieves the index of a compound or enumeration datatype member.
// int H5Tget_member_index( hid_t dtype_id, const char * field_name )
func (t *CompType) MemberIndex(name string) int {
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_name))
return int(C.H5Tget_member_index(t.id, c_name))
}
// Retrieves the offset of a field of a compound datatype.
// size_t H5Tget_member_offset( hid_t dtype_id, unsigned memb_no )
func (t *CompType) MemberOffset(mbr_idx int) int {
return int(C.H5Tget_member_offset(t.id, C.uint(mbr_idx)))
}
// Returns the datatype of the specified member.
// hid_t H5Tget_member_type( hid_t dtype_id, unsigned field_idx )
func (t *CompType) MemberType(mbr_idx int) (*DataType, error) {
hid := C.H5Tget_member_type(t.id, C.uint(mbr_idx))
err := togo_err(C.herr_t(int(hid)))
if err != nil {
return nil, err
}
dt := new_dtype(hid, t.rt.Field(mbr_idx).Type)
return dt, nil
}
// Adds a new member to a compound datatype.
// herr_t H5Tinsert( hid_t dtype_id, const char * name, size_t offset, hid_t field_id )
func (t *CompType) Insert(name string, offset int, field *DataType) error {
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_name))
//fmt.Printf("inserting [%s] at offset:%d (id=%d)...\n", name, offset, field.id)
err := C.H5Tinsert(t.id, c_name, C.size_t(offset), field.id)
return togo_err(err)
}
// Recursively removes padding from within a compound datatype.
// herr_t H5Tpack( hid_t dtype_id )
func (t *CompType) Pack() error {
err := C.H5Tpack(t.id)
return togo_err(err)
}
// --- opaque type ---
type OpaqueDataType struct {
DataType
}
// Tags an opaque datatype.
// herr_t H5Tset_tag( hid_t dtype_id const char *tag )
func (t *OpaqueDataType) SetTag(tag string) error {
c_tag := C.CString(tag)
defer C.free(unsafe.Pointer(c_tag))
err := C.H5Tset_tag(t.id, c_tag)
return togo_err(err)
}
// Gets the tag associated with an opaque datatype.
// char *H5Tget_tag( hid_t dtype_id )
func (t *OpaqueDataType) Tag() string {
c_name := C.H5Tget_tag(t.id)
if c_name != nil {
return C.GoString(c_name)
}
return ""
}
// -----------------------------------------
// create a data-type from a golang value
func NewDataTypeFromValue(v interface{}) *DataType {
t := reflect.TypeOf(v)
return new_dataTypeFromType(t)
}
func new_dataTypeFromType(t reflect.Type) *DataType {
var dt *DataType = nil
switch t.Kind() {
case reflect.Int:
dt = T_NATIVE_INT // FIXME: .Copy() instead ?
case reflect.Int8:
dt = T_NATIVE_INT8
case reflect.Int16:
dt = T_NATIVE_INT16
case reflect.Int32:
dt = T_NATIVE_INT32
case reflect.Int64:
dt = T_NATIVE_INT64
case reflect.Uint:
dt = T_NATIVE_UINT // FIXME: .Copy() instead ?
case reflect.Uint8:
dt = T_NATIVE_UINT8
case reflect.Uint16:
dt = T_NATIVE_UINT16
case reflect.Uint32:
dt = T_NATIVE_UINT32
case reflect.Uint64:
dt = T_NATIVE_UINT64
case reflect.Float32:
dt = T_NATIVE_FLOAT
case reflect.Float64:
dt = T_NATIVE_DOUBLE
case reflect.String:
dt = T_GO_STRING
//dt = T_C_S1
case reflect.Array:
elem_type := new_dataTypeFromType(t.Elem())
n := t.Len()
dims := []int{n}
adt, err := NewArrayType(elem_type, dims)
if err != nil {
panic(err)
}
dt, err = adt.Copy()
if err != nil {
panic(err)
}
case reflect.Slice:
elem_type := new_dataTypeFromType(t.Elem())
vlen_dt, err := NewVarLenType(elem_type)
if err != nil {
panic(err)
}
dt, err = vlen_dt.Copy()
if err != nil {
panic(err)
}
case reflect.Struct:
sz := int(t.Size())
hdf_dt, err := CreateDataType(T_COMPOUND, sz)
if err != nil {
panic(err)
}
cdt := &CompType{*hdf_dt}
n := t.NumField()
for i := 0; i < n; i++ | {
f := t.Field(i)
var field_dt *DataType = nil
field_dt = new_dataTypeFromType(f.Type)
offset := int(f.Offset + 0)
if field_dt == nil {
panic(fmt.Sprintf("pb with field [%d-%s]", i, f.Name))
}
field_name := string(f.Tag)
if len(field_name) == 0 {
field_name = f.Name
}
err = cdt.Insert(field_name, offset, field_dt)
if err != nil {
panic(fmt.Sprintf("pb with field [%d-%s]: %s", i, f.Name, err))
}
} | conditional_block | |
h5t.go | .hid_t, rt reflect.Type) *DataType {
t := &DataType{id: id, rt: rt}
//runtime.SetFinalizer(t, (*DataType).h5t_finalizer)
return t
}
// Creates a new datatype.
// hid_t H5Tcreate( H5T_class_t class, size_tsize )
func CreateDataType(class TypeClass, size int) (t *DataType, err error) {
t = nil
err = nil
hid := C.H5Tcreate(C.H5T_class_t(class), C.size_t(size))
err = togo_err(C.herr_t(int(hid)))
if err != nil {
return
}
t = new_dtype(hid, _type_cls_to_go_type[class])
return
}
func (t *DataType) h5t_finalizer() {
err := t.Close()
if err != nil {
panic(fmt.Sprintf("error closing datatype: %s", err))
}
}
// Releases a datatype.
// herr_t H5Tclose( hid_t dtype_id )
func (t *DataType) Close() error {
if t.id > 0 {
fmt.Printf("--- closing dtype [%d]...\n", t.id)
err := togo_err(C.H5Tclose(t.id))
t.id = 0
return err
}
return nil
}
// Commits a transient datatype, linking it into the file and creating a new named datatype.
// herr_t H5Tcommit( hid_t loc_id, const char *name, hid_t dtype_id, hid_t lcpl_id, hid_t tcpl_id, hid_t tapl_id )
//func (t *DataType) Commit()
// Determines whether a datatype is a named type or a transient type.
// htri_tH5Tcommitted( hid_t dtype_id )
func (t *DataType) Committed() bool {
o := int(C.H5Tcommitted(t.id))
if o > 0 {
return true
}
return false
}
// Copies an existing datatype.
// hid_t H5Tcopy( hid_t dtype_id )
func (t *DataType) Copy() (*DataType, error) {
hid := C.H5Tcopy(t.id)
err := togo_err(C.herr_t(int(hid)))
if err != nil {
return nil, err
}
o := new_dtype(hid, t.rt)
return o, err
}
// Determines whether two datatype identifiers refer to the same datatype.
// htri_t H5Tequal( hid_t dtype_id1, hid_t dtype_id2 )
func (t *DataType) Equal(o *DataType) bool {
v := int(C.H5Tequal(t.id, o.id))
if v > 0 {
return true
}
return false
}
// Locks a datatype.
// herr_t H5Tlock( hid_t dtype_id )
func (t *DataType) Lock() error {
return togo_err(C.H5Tlock(t.id))
}
// Returns the size of a datatype.
// size_t H5Tget_size( hid_t dtype_id )
func (t *DataType) Size() int {
return int(C.H5Tget_size(t.id))
}
// Sets the total size for an atomic datatype.
// herr_t H5Tset_size( hid_t dtype_id, size_tsize )
func (t *DataType) SetSize(sz int) error {
err := C.H5Tset_size(t.id, C.size_t(sz))
return togo_err(err)
}
// ---------------------------------------------------------------------------
// array data type
type ArrayType struct {
DataType
}
func new_array_type(id C.hid_t) *ArrayType {
t := &ArrayType{DataType{id: id}}
//runtime.SetFinalizer(t, (*DataType).h5t_finalizer)
return t
}
func NewArrayType(base_type *DataType, dims []int) (*ArrayType, error) {
ndims := C.uint(len(dims))
c_dims := (*C.hsize_t)(unsafe.Pointer(&dims[0]))
hid := C.H5Tarray_create2(base_type.id, ndims, c_dims)
err := togo_err(C.herr_t(int(hid)))
if err != nil {
return nil, err
}
t := new_array_type(hid)
return t, err
}
// Returns the rank of an array datatype.
// int H5Tget_array_ndims( hid_t adtype_id )
func (t *ArrayType) NDims() int {
return int(C.H5Tget_array_ndims(t.id))
}
// Retrieves sizes of array dimensions.
// int H5Tget_array_dims2( hid_t adtype_id, hsize_t dims[] )
func (t *ArrayType) ArrayDims() []int {
rank := t.NDims()
dims := make([]int, rank)
// fixme: int/hsize_t size!
c_dims := (*C.hsize_t)(unsafe.Pointer(&dims[0]))
c_rank := int(C.H5Tget_array_dims2(t.id, c_dims))
if c_rank == rank {
return dims
}
return nil
}
// ---------------------------------------------------------------------------
// variable length array data type
type VarLenType struct {
DataType
}
func NewVarLenType(base_type *DataType) (*VarLenType, error) {
hid := C.H5Tvlen_create(base_type.id)
err := togo_err(C.herr_t(int(hid)))
if err != nil {
return nil, err
}
dt := new_vltype(hid)
return dt, err
}
func new_vltype(id C.hid_t) *VarLenType {
t := &VarLenType{DataType{id: id}}
//runtime.SetFinalizer(t, (*DataType).h5t_finalizer)
return t
}
// Determines whether datatype is a variable-length string.
// htri_t H5Tis_variable_str( hid_t dtype_id )
func (vl *VarLenType) IsVariableStr() bool {
o := int(C.H5Tis_variable_str(vl.id))
if o > 0 {
return true
}
return false
}
// ---------------------------------------------------------------------------
// compound data type
type CompType struct {
DataType
}
// Retrieves the number of elements in a compound or enumeration datatype.
// int H5Tget_nmembers( hid_t dtype_id )
func (t *CompType) NMembers() int {
return int(C.H5Tget_nmembers(t.id))
}
// Returns datatype class of compound datatype member.
// H5T_class_t H5Tget_member_class( hid_t cdtype_id, unsigned member_no )
func (t *CompType) MemberClass(mbr_idx int) TypeClass {
return TypeClass(C.H5Tget_member_class(t.id, C.uint(mbr_idx)))
}
// Retrieves the name of a compound or enumeration datatype member.
// char * H5Tget_member_name( hid_t dtype_id, unsigned field_idx )
func (t *CompType) MemberName(mbr_idx int) string {
c_name := C.H5Tget_member_name(t.id, C.uint(mbr_idx))
return C.GoString(c_name)
}
// Retrieves the index of a compound or enumeration datatype member.
// int H5Tget_member_index( hid_t dtype_id, const char * field_name )
func (t *CompType) MemberIndex(name string) int {
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_name))
return int(C.H5Tget_member_index(t.id, c_name))
}
// Retrieves the offset of a field of a compound datatype.
// size_t H5Tget_member_offset( hid_t dtype_id, unsigned memb_no )
func (t *CompType) MemberOffset(mbr_idx int) int {
return int(C.H5Tget_member_offset(t.id, C.uint(mbr_idx)))
}
// Returns the datatype of the specified member.
// hid_t H5Tget_member_type( hid_t dtype_id, unsigned field_idx )
func (t *CompType) MemberType(mbr_idx int) (*DataType, error) {
hid := C.H5Tget_member_type(t.id, C.uint(mbr_idx))
err := togo_err(C.herr_t(int(hid)))
if err != nil {
return nil, err
}
dt := new_dtype(hid, t.rt.Field(mbr_idx).Type)
return dt, nil
}
// Adds a new member to a compound datatype.
// herr_t H5Tinsert( hid_t dtype_id, const char * name, size_t offset, hid_t field_id )
func (t *CompType) Insert(name string, offset int, field *DataType) error {
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_name))
//fmt.Printf("inserting [%s] at offset:%d (id=%d)...\n", name, offset, field.id)
err := C.H5Tinsert(t.id, c_name, C.size_t(offset), field.id)
return togo_err(err)
}
// Recursively removes padding from within a compound datatype.
// herr_t H5Tpack( hid_t dtype_id )
func (t *CompType) Pack() error {
err := C.H5Tpack(t.id)
return togo_err(err)
}
// --- opaque type ---
type OpaqueDataType struct {
DataType
}
// Tags an opaque datatype.
// herr_t H5Tset_tag( hid_t dtype_id const char *tag )
func (t *OpaqueDataType) SetTag(tag string) error | {
c_tag := C.CString(tag)
defer C.free(unsafe.Pointer(c_tag))
err := C.H5Tset_tag(t.id, c_tag)
return togo_err(err)
} | identifier_body | |
h5t.go | 32(0))
_go_uint64_t reflect.Type = reflect.TypeOf(uint64(0))
_go_float32_t reflect.Type = reflect.TypeOf(float32(0))
_go_float64_t reflect.Type = reflect.TypeOf(float64(0))
_go_array_t reflect.Type = reflect.TypeOf([1]int{0})
_go_slice_t reflect.Type = reflect.TypeOf([]int{0})
_go_struct_t reflect.Type = reflect.TypeOf(dummy_struct{})
_go_ptr_t reflect.Type = reflect.PtrTo(_go_int_t)
)
type typeClassToType map[TypeClass]reflect.Type
var (
// mapping of type-class to go-type
_type_cls_to_go_type typeClassToType = typeClassToType{
T_NO_CLASS: nil,
T_INTEGER: _go_int_t,
T_FLOAT: _go_float32_t,
T_TIME: nil,
T_STRING: _go_string_t,
T_BITFIELD: nil,
T_OPAQUE: nil,
T_COMPOUND: _go_struct_t,
T_REFERENCE: _go_ptr_t,
T_ENUM: _go_int_t,
T_VLEN: _go_slice_t,
T_ARRAY: _go_array_t,
}
)
func new_dtype(id C.hid_t, rt reflect.Type) *DataType {
t := &DataType{id: id, rt: rt}
//runtime.SetFinalizer(t, (*DataType).h5t_finalizer)
return t
}
// Creates a new datatype.
// hid_t H5Tcreate( H5T_class_t class, size_tsize )
func CreateDataType(class TypeClass, size int) (t *DataType, err error) {
t = nil
err = nil
hid := C.H5Tcreate(C.H5T_class_t(class), C.size_t(size))
err = togo_err(C.herr_t(int(hid)))
if err != nil {
return
}
t = new_dtype(hid, _type_cls_to_go_type[class])
return
}
func (t *DataType) | () {
err := t.Close()
if err != nil {
panic(fmt.Sprintf("error closing datatype: %s", err))
}
}
// Releases a datatype.
// herr_t H5Tclose( hid_t dtype_id )
func (t *DataType) Close() error {
if t.id > 0 {
fmt.Printf("--- closing dtype [%d]...\n", t.id)
err := togo_err(C.H5Tclose(t.id))
t.id = 0
return err
}
return nil
}
// Commits a transient datatype, linking it into the file and creating a new named datatype.
// herr_t H5Tcommit( hid_t loc_id, const char *name, hid_t dtype_id, hid_t lcpl_id, hid_t tcpl_id, hid_t tapl_id )
//func (t *DataType) Commit()
// Determines whether a datatype is a named type or a transient type.
// htri_tH5Tcommitted( hid_t dtype_id )
func (t *DataType) Committed() bool {
o := int(C.H5Tcommitted(t.id))
if o > 0 {
return true
}
return false
}
// Copies an existing datatype.
// hid_t H5Tcopy( hid_t dtype_id )
func (t *DataType) Copy() (*DataType, error) {
hid := C.H5Tcopy(t.id)
err := togo_err(C.herr_t(int(hid)))
if err != nil {
return nil, err
}
o := new_dtype(hid, t.rt)
return o, err
}
// Determines whether two datatype identifiers refer to the same datatype.
// htri_t H5Tequal( hid_t dtype_id1, hid_t dtype_id2 )
func (t *DataType) Equal(o *DataType) bool {
v := int(C.H5Tequal(t.id, o.id))
if v > 0 {
return true
}
return false
}
// Locks a datatype.
// herr_t H5Tlock( hid_t dtype_id )
func (t *DataType) Lock() error {
return togo_err(C.H5Tlock(t.id))
}
// Returns the size of a datatype.
// size_t H5Tget_size( hid_t dtype_id )
func (t *DataType) Size() int {
return int(C.H5Tget_size(t.id))
}
// Sets the total size for an atomic datatype.
// herr_t H5Tset_size( hid_t dtype_id, size_tsize )
func (t *DataType) SetSize(sz int) error {
err := C.H5Tset_size(t.id, C.size_t(sz))
return togo_err(err)
}
// ---------------------------------------------------------------------------
// array data type
type ArrayType struct {
DataType
}
func new_array_type(id C.hid_t) *ArrayType {
t := &ArrayType{DataType{id: id}}
//runtime.SetFinalizer(t, (*DataType).h5t_finalizer)
return t
}
func NewArrayType(base_type *DataType, dims []int) (*ArrayType, error) {
ndims := C.uint(len(dims))
c_dims := (*C.hsize_t)(unsafe.Pointer(&dims[0]))
hid := C.H5Tarray_create2(base_type.id, ndims, c_dims)
err := togo_err(C.herr_t(int(hid)))
if err != nil {
return nil, err
}
t := new_array_type(hid)
return t, err
}
// Returns the rank of an array datatype.
// int H5Tget_array_ndims( hid_t adtype_id )
func (t *ArrayType) NDims() int {
return int(C.H5Tget_array_ndims(t.id))
}
// Retrieves sizes of array dimensions.
// int H5Tget_array_dims2( hid_t adtype_id, hsize_t dims[] )
func (t *ArrayType) ArrayDims() []int {
rank := t.NDims()
dims := make([]int, rank)
// fixme: int/hsize_t size!
c_dims := (*C.hsize_t)(unsafe.Pointer(&dims[0]))
c_rank := int(C.H5Tget_array_dims2(t.id, c_dims))
if c_rank == rank {
return dims
}
return nil
}
// ---------------------------------------------------------------------------
// variable length array data type
type VarLenType struct {
DataType
}
func NewVarLenType(base_type *DataType) (*VarLenType, error) {
hid := C.H5Tvlen_create(base_type.id)
err := togo_err(C.herr_t(int(hid)))
if err != nil {
return nil, err
}
dt := new_vltype(hid)
return dt, err
}
func new_vltype(id C.hid_t) *VarLenType {
t := &VarLenType{DataType{id: id}}
//runtime.SetFinalizer(t, (*DataType).h5t_finalizer)
return t
}
// Determines whether datatype is a variable-length string.
// htri_t H5Tis_variable_str( hid_t dtype_id )
func (vl *VarLenType) IsVariableStr() bool {
o := int(C.H5Tis_variable_str(vl.id))
if o > 0 {
return true
}
return false
}
// ---------------------------------------------------------------------------
// compound data type
type CompType struct {
DataType
}
// Retrieves the number of elements in a compound or enumeration datatype.
// int H5Tget_nmembers( hid_t dtype_id )
func (t *CompType) NMembers() int {
return int(C.H5Tget_nmembers(t.id))
}
// Returns datatype class of compound datatype member.
// H5T_class_t H5Tget_member_class( hid_t cdtype_id, unsigned member_no )
func (t *CompType) MemberClass(mbr_idx int) TypeClass {
return TypeClass(C.H5Tget_member_class(t.id, C.uint(mbr_idx)))
}
// Retrieves the name of a compound or enumeration datatype member.
// char * H5Tget_member_name( hid_t dtype_id, unsigned field_idx )
func (t *CompType) MemberName(mbr_idx int) string {
c_name := C.H5Tget_member_name(t.id, C.uint(mbr_idx))
return C.GoString(c_name)
}
// Retrieves the index of a compound or enumeration datatype member.
// int H5Tget_member_index( hid_t dtype_id, const char * field_name )
func (t *CompType) MemberIndex(name string) int {
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_name))
return int(C.H5Tget_member_index(t.id, c_name))
}
// Retrieves the offset of a field of a compound datatype.
// size_t H5Tget_member_offset( hid_t dtype_id, unsigned memb_no )
func (t *CompType) MemberOffset(mbr_idx int) int {
return int(C.H5Tget_member_offset(t.id, C.uint(mbr_idx)))
}
// Returns the datatype of the specified member.
// hid_t H5Tget_member_type( hid_t dtype_id, unsigned field_idx )
func (t *CompType) MemberType(mbr_idx int) (*DataType, error) {
hid := C.H5Tget_member_type(t.id, C.uint(mbr_idx))
err := togo_err(C.herr_t(int(hid)))
if err != nil {
return nil, err
}
dt := new_dtype(hid, t.rt.Field(mbr_idx).Type)
return dt, nil
}
// | h5t_finalizer | identifier_name |
h5t.go | T_TIME: nil,
T_STRING: _go_string_t,
T_BITFIELD: nil,
T_OPAQUE: nil,
T_COMPOUND: _go_struct_t,
T_REFERENCE: _go_ptr_t,
T_ENUM: _go_int_t,
T_VLEN: _go_slice_t,
T_ARRAY: _go_array_t,
}
)
func new_dtype(id C.hid_t, rt reflect.Type) *DataType {
t := &DataType{id: id, rt: rt}
//runtime.SetFinalizer(t, (*DataType).h5t_finalizer)
return t
}
// Creates a new datatype.
// hid_t H5Tcreate( H5T_class_t class, size_tsize )
func CreateDataType(class TypeClass, size int) (t *DataType, err error) {
t = nil
err = nil
hid := C.H5Tcreate(C.H5T_class_t(class), C.size_t(size))
err = togo_err(C.herr_t(int(hid)))
if err != nil {
return
}
t = new_dtype(hid, _type_cls_to_go_type[class])
return
}
func (t *DataType) h5t_finalizer() {
err := t.Close()
if err != nil {
panic(fmt.Sprintf("error closing datatype: %s", err))
}
}
// Releases a datatype.
// herr_t H5Tclose( hid_t dtype_id )
func (t *DataType) Close() error {
if t.id > 0 {
fmt.Printf("--- closing dtype [%d]...\n", t.id)
err := togo_err(C.H5Tclose(t.id))
t.id = 0
return err
}
return nil
}
// Commits a transient datatype, linking it into the file and creating a new named datatype.
// herr_t H5Tcommit( hid_t loc_id, const char *name, hid_t dtype_id, hid_t lcpl_id, hid_t tcpl_id, hid_t tapl_id )
//func (t *DataType) Commit()
// Determines whether a datatype is a named type or a transient type.
// htri_tH5Tcommitted( hid_t dtype_id )
func (t *DataType) Committed() bool {
o := int(C.H5Tcommitted(t.id))
if o > 0 {
return true
}
return false
}
// Copies an existing datatype.
// hid_t H5Tcopy( hid_t dtype_id )
func (t *DataType) Copy() (*DataType, error) {
hid := C.H5Tcopy(t.id)
err := togo_err(C.herr_t(int(hid)))
if err != nil {
return nil, err
}
o := new_dtype(hid, t.rt)
return o, err
}
// Determines whether two datatype identifiers refer to the same datatype.
// htri_t H5Tequal( hid_t dtype_id1, hid_t dtype_id2 )
func (t *DataType) Equal(o *DataType) bool {
v := int(C.H5Tequal(t.id, o.id))
if v > 0 {
return true
}
return false
}
// Locks a datatype.
// herr_t H5Tlock( hid_t dtype_id )
func (t *DataType) Lock() error {
return togo_err(C.H5Tlock(t.id))
}
// Returns the size of a datatype.
// size_t H5Tget_size( hid_t dtype_id )
func (t *DataType) Size() int {
return int(C.H5Tget_size(t.id))
}
// Sets the total size for an atomic datatype.
// herr_t H5Tset_size( hid_t dtype_id, size_tsize )
func (t *DataType) SetSize(sz int) error {
err := C.H5Tset_size(t.id, C.size_t(sz))
return togo_err(err)
}
// ---------------------------------------------------------------------------
// array data type
type ArrayType struct {
DataType
}
func new_array_type(id C.hid_t) *ArrayType {
t := &ArrayType{DataType{id: id}}
//runtime.SetFinalizer(t, (*DataType).h5t_finalizer)
return t
}
func NewArrayType(base_type *DataType, dims []int) (*ArrayType, error) {
ndims := C.uint(len(dims))
c_dims := (*C.hsize_t)(unsafe.Pointer(&dims[0]))
hid := C.H5Tarray_create2(base_type.id, ndims, c_dims)
err := togo_err(C.herr_t(int(hid)))
if err != nil {
return nil, err
}
t := new_array_type(hid)
return t, err
}
// Returns the rank of an array datatype.
// int H5Tget_array_ndims( hid_t adtype_id )
func (t *ArrayType) NDims() int {
return int(C.H5Tget_array_ndims(t.id))
}
// Retrieves sizes of array dimensions.
// int H5Tget_array_dims2( hid_t adtype_id, hsize_t dims[] )
func (t *ArrayType) ArrayDims() []int {
rank := t.NDims()
dims := make([]int, rank)
// fixme: int/hsize_t size!
c_dims := (*C.hsize_t)(unsafe.Pointer(&dims[0]))
c_rank := int(C.H5Tget_array_dims2(t.id, c_dims))
if c_rank == rank {
return dims
}
return nil
}
// ---------------------------------------------------------------------------
// variable length array data type
type VarLenType struct {
DataType
}
func NewVarLenType(base_type *DataType) (*VarLenType, error) {
hid := C.H5Tvlen_create(base_type.id)
err := togo_err(C.herr_t(int(hid)))
if err != nil {
return nil, err
}
dt := new_vltype(hid)
return dt, err
}
func new_vltype(id C.hid_t) *VarLenType {
t := &VarLenType{DataType{id: id}}
//runtime.SetFinalizer(t, (*DataType).h5t_finalizer)
return t
}
// Determines whether datatype is a variable-length string.
// htri_t H5Tis_variable_str( hid_t dtype_id )
func (vl *VarLenType) IsVariableStr() bool {
o := int(C.H5Tis_variable_str(vl.id))
if o > 0 {
return true
}
return false
}
// ---------------------------------------------------------------------------
// compound data type
type CompType struct {
DataType
}
// Retrieves the number of elements in a compound or enumeration datatype.
// int H5Tget_nmembers( hid_t dtype_id )
func (t *CompType) NMembers() int {
return int(C.H5Tget_nmembers(t.id))
}
// Returns datatype class of compound datatype member.
// H5T_class_t H5Tget_member_class( hid_t cdtype_id, unsigned member_no )
func (t *CompType) MemberClass(mbr_idx int) TypeClass {
return TypeClass(C.H5Tget_member_class(t.id, C.uint(mbr_idx)))
}
// Retrieves the name of a compound or enumeration datatype member.
// char * H5Tget_member_name( hid_t dtype_id, unsigned field_idx )
func (t *CompType) MemberName(mbr_idx int) string {
c_name := C.H5Tget_member_name(t.id, C.uint(mbr_idx))
return C.GoString(c_name)
}
// Retrieves the index of a compound or enumeration datatype member.
// int H5Tget_member_index( hid_t dtype_id, const char * field_name )
func (t *CompType) MemberIndex(name string) int {
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_name))
return int(C.H5Tget_member_index(t.id, c_name))
}
// Retrieves the offset of a field of a compound datatype.
// size_t H5Tget_member_offset( hid_t dtype_id, unsigned memb_no )
func (t *CompType) MemberOffset(mbr_idx int) int {
return int(C.H5Tget_member_offset(t.id, C.uint(mbr_idx)))
}
// Returns the datatype of the specified member.
// hid_t H5Tget_member_type( hid_t dtype_id, unsigned field_idx )
func (t *CompType) MemberType(mbr_idx int) (*DataType, error) {
hid := C.H5Tget_member_type(t.id, C.uint(mbr_idx))
err := togo_err(C.herr_t(int(hid)))
if err != nil {
return nil, err
}
dt := new_dtype(hid, t.rt.Field(mbr_idx).Type)
return dt, nil
}
// Adds a new member to a compound datatype.
// herr_t H5Tinsert( hid_t dtype_id, const char * name, size_t offset, hid_t field_id )
func (t *CompType) Insert(name string, offset int, field *DataType) error {
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_name))
//fmt.Printf("inserting [%s] at offset:%d (id=%d)...\n", name, offset, field.id)
err := C.H5Tinsert(t.id, c_name, C.size_t(offset), field.id)
return togo_err(err)
}
// Recursively removes padding from within a compound datatype.
// herr_t H5Tpack( hid_t dtype_id )
func (t *CompType) Pack() error {
err := C.H5Tpack(t.id)
return togo_err(err)
} | random_line_split | ||
ai.py | between two angles.
"""
import math
import pymunk
from pymunk import Vec2d
import gameobjects | def angle_between_vectors(vec1, vec2):
"""
Since Vec2d operates in a cartesian coordinate space we have to
convert the resulting vector to get the correct angle for our space.
"""
vec = vec1 - vec2
vec = vec.perpendicular()
return vec.angle
def periodic_difference_of_angles(angle1, angle2):
return (angle1 % (2*math.pi)) - (angle2 % (2*math.pi))
class Ai:
"""
A simple ai that finds the shortest path to the target using
a breadth first search. Also capable of shooting other tanks and or
wooden boxes.
"""
def __init__(self, tank, game_objects_list, tanks_list, space, currentmap):
self.tank = tank
self.game_objects_list = game_objects_list
self.tanks_list = tanks_list
self.space = space
self.currentmap = currentmap
self.flag = None
self.MAX_X = currentmap.width - 1
self.MAX_Y = currentmap.height - 1
self.last_distance = 1
self.path = deque()
self.move_cycle = self.move_cycle_gen()
self.update_grid_pos()
def update_grid_pos(self):
"""
This should only be called in the beginning, or at the
end of a move_cycle.
"""
self.grid_pos = self.get_tile_of_position(self.tank.body.position)
def decide(self):
"""
Main decision function that gets called on every
tick of the game.
"""
self.maybe_shoot()
next(self.move_cycle)
pass
def maybe_shoot(self):
"""
Makes a raycast query in front of the tank. If another tank
or a wooden box is found, then we shoot.
"""
res = self.space.segment_query_first((self.tank.body.position[0] - \
0.6 * math.sin(self.tank.body.angle), self.tank.body.position[1] +\
0.6 * math.cos(self.tank.body.angle)), (self.tank.body.position[0] -\
10*math.sin(self.tank.body.angle), self.tank.body.position[1] + \
10*math.cos(self.tank.body.angle)), 0, pymunk.ShapeFilter())
if res is not None:
try:
if hasattr(res, 'shape'):
if isinstance(res.shape.parent, gameobjects.Tank):
bullet = self.tank.shoot(self.space)
if bullet is not None:
self.game_objects_list.append(bullet)
elif isinstance(res.shape.parent, gameobjects.Box):
if res.shape.parent.boxmodel.destructable is True:
bullet = self.tank.shoot(self.space)
if bullet is not None:
self.game_objects_list.append(bullet)
except:
pass
def move_cycle_gen(self):
"""
A generator that iteratively goes through all the required
steps to move to our goal.
"""
while True:
self.update_grid_pos()
path = self.find_shortest_path("without_metalbox")
if not path:
path = self.find_shortest_path("metalbox")
yield
if not path:
continue
next_coord = path.pop()
next_coord += Vec2d(0.5, 0.5)
yield
target_angle = \
angle_between_vectors(Vec2d(self.tank.body.position), next_coord)
angle_tank = self.tank.body.angle
self.turn(angle_tank, target_angle)
while not self.correct_angle(angle_tank, target_angle):
angle_tank = self.tank.body.angle
target_angle = \
angle_between_vectors(Vec2d(self.tank.body.position),
next_coord)
yield
self.tank.accelerate()
while not self.correct_pos(next_coord, self.last_distance):
yield
yield
def correct_pos(self, target_pos, last_distance):
"""
Checks if the tank is on the correct position, compared from the
last one.
"""
tank_pos = Vec2d(self.tank.body.position)
current_distance = target_pos.get_distance(tank_pos)
self.last_distance = current_distance
if last_distance < current_distance:
return True
else:
return False
def turn(self, tank_angle, target_angle):
"""
Finds the angle closest to next tile, and turns accordingly.
WIP: Sometimes it turns to the other side.
"""
angle_diff = periodic_difference_of_angles(tank_angle, target_angle)
if ((angle_diff + 2 * math.pi) % 2
* math.pi >= math.pi and abs(angle_diff) > MIN_ANGLE_DIF):
self.tank.stop_moving()
self.tank.turn_left()
elif ((angle_diff + 2 * math.pi) % 2 * math.pi
< math.pi and abs(angle_diff) > MIN_ANGLE_DIF):
self.tank.stop_moving()
self.tank.turn_right()
def correct_angle(self, tank_angle, target_angle):
"""
If the tank has the correct angle to the next tile; stop turning.
"""
angle_diff = periodic_difference_of_angles(target_angle, tank_angle)
if abs(angle_diff) <= MIN_ANGLE_DIF:
self.tank.stop_turning()
return True
else:
return False
def find_shortest_path(self, box_indicator):
"""
A simple Breadth First Search using integer coordinates as our
nodes. Edges are calculated as we go, using an external function.
"""
# To be implemented
dict = {}
shortest_path = []
visited = set()
queue = deque()
queue.append(self.grid_pos)
goal_node = None
while queue:
node = Vec2d(queue.popleft())
if node == self.get_target_tile().int_tuple:
goal_node = node.int_tuple
break
for neighbor in self.get_tile_neighbors(node, box_indicator):
neighbor = neighbor.int_tuple
if neighbor not in visited:
queue.append(neighbor)
visited.add(neighbor)
dict[neighbor] = node.int_tuple
if goal_node is None:
return deque([])
else:
key = goal_node
while key != self.grid_pos.int_tuple:
shortest_path.append(Vec2d(key))
parent_node = dict[key]
key = parent_node
return deque(shortest_path)
def get_target_tile(self):
"""
Returns position of the flag if we don't have it. If we
do have the flag, return the position of our home base.
"""
if self.tank.flag is not None:
x, y = self.tank.start_position
else:
self.get_flag() # Ensure that we have initialized it.
x, y = self.flag.x, self.flag.y
return Vec2d(int(x), int(y))
def get_flag(self):
"""
This has to be called to get the flag, since we don't know
where it is when the Ai object is initialized.
"""
if self.flag is None:
# Find the flag in the game objects list
for obj in self.game_objects_list:
if isinstance(obj, gameobjects.Flag):
self.flag = obj
break
return self.flag
def get_tile_of_position(self, position_vector):
"""
Converts and returns the float position of our tank to an
integer position.
"""
x, y = position_vector
return Vec2d(int(x), int(y))
def get_tile_neighbors(self, coord_vec, box_indicator):
"""
Returns all bordering grid squares of the input coordinate.
A bordering square is only considered accessible if it is grass
or a wooden box.
"""
neighbors = [] # Find the coordinates of the tiles' four neighbors
neighbors.append(coord_vec + Vec2d(1, 0))
neighbors.append(coord_vec + Vec2d(-1, 0))
neighbors.append(coord_vec + Vec2d(0, 1))
neighbors.append(coord_vec + Vec2d(0, -1))
if box_indicator == "without_metalbox":
return filter(self.filter_tile_neighbors, neighbors)
else:
return filter(self.filter_tile_neighbors_metalbox, neighbors)
def filter_tile_neighbors(self, coord):
"""
Filter for all the tiles around the tank. This filter removes
the immovable stones so we don't count those tiles to find the
shortest path.
"""
coord = coord.int_tuple
if coord[1] <= self.MAX_Y and coord[0] <= self.MAX_X and coord[1] >= \
0 and coord[0] >=\
0 and (self.currentmap.boxAt(coord[0], coord[1])
== 0 or self.currentmap.boxAt(coord[0], coord[1]) == 2):
return True
return False
def filter_tile_neighbors_metalbox(self, coord):
"""
Filter for all the tiles around the tank, metalboxes included. This
filter removes the immovable stones so we don't count those tiles to
find the shortest path.
"""
coord = coord.int_tuple
if coord[1] <= self.MAX_Y and coord[0] <= self.MAX | from collections import defaultdict, deque
MIN_ANGLE_DIF = math.radians(5)
| random_line_split |
ai.py | between two angles.
"""
import math
import pymunk
from pymunk import Vec2d
import gameobjects
from collections import defaultdict, deque
MIN_ANGLE_DIF = math.radians(5)
def angle_between_vectors(vec1, vec2):
"""
Since Vec2d operates in a cartesian coordinate space we have to
convert the resulting vector to get the correct angle for our space.
"""
vec = vec1 - vec2
vec = vec.perpendicular()
return vec.angle
def periodic_difference_of_angles(angle1, angle2):
return (angle1 % (2*math.pi)) - (angle2 % (2*math.pi))
class Ai:
"""
A simple ai that finds the shortest path to the target using
a breadth first search. Also capable of shooting other tanks and or
wooden boxes.
"""
def __init__(self, tank, game_objects_list, tanks_list, space, currentmap):
self.tank = tank
self.game_objects_list = game_objects_list
self.tanks_list = tanks_list
self.space = space
self.currentmap = currentmap
self.flag = None
self.MAX_X = currentmap.width - 1
self.MAX_Y = currentmap.height - 1
self.last_distance = 1
self.path = deque()
self.move_cycle = self.move_cycle_gen()
self.update_grid_pos()
def update_grid_pos(self):
"""
This should only be called in the beginning, or at the
end of a move_cycle.
"""
self.grid_pos = self.get_tile_of_position(self.tank.body.position)
def decide(self):
"""
Main decision function that gets called on every
tick of the game.
"""
self.maybe_shoot()
next(self.move_cycle)
pass
def maybe_shoot(self):
"""
Makes a raycast query in front of the tank. If another tank
or a wooden box is found, then we shoot.
"""
res = self.space.segment_query_first((self.tank.body.position[0] - \
0.6 * math.sin(self.tank.body.angle), self.tank.body.position[1] +\
0.6 * math.cos(self.tank.body.angle)), (self.tank.body.position[0] -\
10*math.sin(self.tank.body.angle), self.tank.body.position[1] + \
10*math.cos(self.tank.body.angle)), 0, pymunk.ShapeFilter())
if res is not None:
try:
if hasattr(res, 'shape'):
if isinstance(res.shape.parent, gameobjects.Tank):
|
elif isinstance(res.shape.parent, gameobjects.Box):
if res.shape.parent.boxmodel.destructable is True:
bullet = self.tank.shoot(self.space)
if bullet is not None:
self.game_objects_list.append(bullet)
except:
pass
def move_cycle_gen(self):
"""
A generator that iteratively goes through all the required
steps to move to our goal.
"""
while True:
self.update_grid_pos()
path = self.find_shortest_path("without_metalbox")
if not path:
path = self.find_shortest_path("metalbox")
yield
if not path:
continue
next_coord = path.pop()
next_coord += Vec2d(0.5, 0.5)
yield
target_angle = \
angle_between_vectors(Vec2d(self.tank.body.position), next_coord)
angle_tank = self.tank.body.angle
self.turn(angle_tank, target_angle)
while not self.correct_angle(angle_tank, target_angle):
angle_tank = self.tank.body.angle
target_angle = \
angle_between_vectors(Vec2d(self.tank.body.position),
next_coord)
yield
self.tank.accelerate()
while not self.correct_pos(next_coord, self.last_distance):
yield
yield
def correct_pos(self, target_pos, last_distance):
"""
Checks if the tank is on the correct position, compared from the
last one.
"""
tank_pos = Vec2d(self.tank.body.position)
current_distance = target_pos.get_distance(tank_pos)
self.last_distance = current_distance
if last_distance < current_distance:
return True
else:
return False
def turn(self, tank_angle, target_angle):
"""
Finds the angle closest to next tile, and turns accordingly.
WIP: Sometimes it turns to the other side.
"""
angle_diff = periodic_difference_of_angles(tank_angle, target_angle)
if ((angle_diff + 2 * math.pi) % 2
* math.pi >= math.pi and abs(angle_diff) > MIN_ANGLE_DIF):
self.tank.stop_moving()
self.tank.turn_left()
elif ((angle_diff + 2 * math.pi) % 2 * math.pi
< math.pi and abs(angle_diff) > MIN_ANGLE_DIF):
self.tank.stop_moving()
self.tank.turn_right()
def correct_angle(self, tank_angle, target_angle):
"""
If the tank has the correct angle to the next tile; stop turning.
"""
angle_diff = periodic_difference_of_angles(target_angle, tank_angle)
if abs(angle_diff) <= MIN_ANGLE_DIF:
self.tank.stop_turning()
return True
else:
return False
def find_shortest_path(self, box_indicator):
"""
A simple Breadth First Search using integer coordinates as our
nodes. Edges are calculated as we go, using an external function.
"""
# To be implemented
dict = {}
shortest_path = []
visited = set()
queue = deque()
queue.append(self.grid_pos)
goal_node = None
while queue:
node = Vec2d(queue.popleft())
if node == self.get_target_tile().int_tuple:
goal_node = node.int_tuple
break
for neighbor in self.get_tile_neighbors(node, box_indicator):
neighbor = neighbor.int_tuple
if neighbor not in visited:
queue.append(neighbor)
visited.add(neighbor)
dict[neighbor] = node.int_tuple
if goal_node is None:
return deque([])
else:
key = goal_node
while key != self.grid_pos.int_tuple:
shortest_path.append(Vec2d(key))
parent_node = dict[key]
key = parent_node
return deque(shortest_path)
def get_target_tile(self):
"""
Returns position of the flag if we don't have it. If we
do have the flag, return the position of our home base.
"""
if self.tank.flag is not None:
x, y = self.tank.start_position
else:
self.get_flag() # Ensure that we have initialized it.
x, y = self.flag.x, self.flag.y
return Vec2d(int(x), int(y))
def get_flag(self):
"""
This has to be called to get the flag, since we don't know
where it is when the Ai object is initialized.
"""
if self.flag is None:
# Find the flag in the game objects list
for obj in self.game_objects_list:
if isinstance(obj, gameobjects.Flag):
self.flag = obj
break
return self.flag
def get_tile_of_position(self, position_vector):
"""
Converts and returns the float position of our tank to an
integer position.
"""
x, y = position_vector
return Vec2d(int(x), int(y))
def get_tile_neighbors(self, coord_vec, box_indicator):
"""
Returns all bordering grid squares of the input coordinate.
A bordering square is only considered accessible if it is grass
or a wooden box.
"""
neighbors = [] # Find the coordinates of the tiles' four neighbors
neighbors.append(coord_vec + Vec2d(1, 0))
neighbors.append(coord_vec + Vec2d(-1, 0))
neighbors.append(coord_vec + Vec2d(0, 1))
neighbors.append(coord_vec + Vec2d(0, -1))
if box_indicator == "without_metalbox":
return filter(self.filter_tile_neighbors, neighbors)
else:
return filter(self.filter_tile_neighbors_metalbox, neighbors)
def filter_tile_neighbors(self, coord):
"""
Filter for all the tiles around the tank. This filter removes
the immovable stones so we don't count those tiles to find the
shortest path.
"""
coord = coord.int_tuple
if coord[1] <= self.MAX_Y and coord[0] <= self.MAX_X and coord[1] >= \
0 and coord[0] >=\
0 and (self.currentmap.boxAt(coord[0], coord[1])
== 0 or self.currentmap.boxAt(coord[0], coord[1]) == 2):
return True
return False
def filter_tile_neighbors_metalbox(self, coord):
"""
Filter for all the tiles around the tank, metalboxes included. This
filter removes the immovable stones so we don't count those tiles to
find the shortest path.
"""
coord = coord.int_tuple
if coord[1] <= self.MAX_Y and coord[0] <= | bullet = self.tank.shoot(self.space)
if bullet is not None:
self.game_objects_list.append(bullet) | conditional_block |
ai.py | between two angles.
"""
import math
import pymunk
from pymunk import Vec2d
import gameobjects
from collections import defaultdict, deque
MIN_ANGLE_DIF = math.radians(5)
def angle_between_vectors(vec1, vec2):
"""
Since Vec2d operates in a cartesian coordinate space we have to
convert the resulting vector to get the correct angle for our space.
"""
vec = vec1 - vec2
vec = vec.perpendicular()
return vec.angle
def periodic_difference_of_angles(angle1, angle2):
return (angle1 % (2*math.pi)) - (angle2 % (2*math.pi))
class Ai:
"""
A simple ai that finds the shortest path to the target using
a breadth first search. Also capable of shooting other tanks and or
wooden boxes.
"""
def __init__(self, tank, game_objects_list, tanks_list, space, currentmap):
self.tank = tank
self.game_objects_list = game_objects_list
self.tanks_list = tanks_list
self.space = space
self.currentmap = currentmap
self.flag = None
self.MAX_X = currentmap.width - 1
self.MAX_Y = currentmap.height - 1
self.last_distance = 1
self.path = deque()
self.move_cycle = self.move_cycle_gen()
self.update_grid_pos()
def update_grid_pos(self):
"""
This should only be called in the beginning, or at the
end of a move_cycle.
"""
self.grid_pos = self.get_tile_of_position(self.tank.body.position)
def decide(self):
"""
Main decision function that gets called on every
tick of the game.
"""
self.maybe_shoot()
next(self.move_cycle)
pass
def maybe_shoot(self):
"""
Makes a raycast query in front of the tank. If another tank
or a wooden box is found, then we shoot.
"""
res = self.space.segment_query_first((self.tank.body.position[0] - \
0.6 * math.sin(self.tank.body.angle), self.tank.body.position[1] +\
0.6 * math.cos(self.tank.body.angle)), (self.tank.body.position[0] -\
10*math.sin(self.tank.body.angle), self.tank.body.position[1] + \
10*math.cos(self.tank.body.angle)), 0, pymunk.ShapeFilter())
if res is not None:
try:
if hasattr(res, 'shape'):
if isinstance(res.shape.parent, gameobjects.Tank):
bullet = self.tank.shoot(self.space)
if bullet is not None:
self.game_objects_list.append(bullet)
elif isinstance(res.shape.parent, gameobjects.Box):
if res.shape.parent.boxmodel.destructable is True:
bullet = self.tank.shoot(self.space)
if bullet is not None:
self.game_objects_list.append(bullet)
except:
pass
def move_cycle_gen(self):
| angle_tank = self.tank.body.angle
target_angle = \
angle_between_vectors(Vec2d(self.tank.body.position),
next_coord)
yield
self.tank.accelerate()
while not self.correct_pos(next_coord, self.last_distance):
yield
yield
def correct_pos(self, target_pos, last_distance):
"""
Checks if the tank is on the correct position, compared from the
last one.
"""
tank_pos = Vec2d(self.tank.body.position)
current_distance = target_pos.get_distance(tank_pos)
self.last_distance = current_distance
if last_distance < current_distance:
return True
else:
return False
def turn(self, tank_angle, target_angle):
"""
Finds the angle closest to next tile, and turns accordingly.
WIP: Sometimes it turns to the other side.
"""
angle_diff = periodic_difference_of_angles(tank_angle, target_angle)
if ((angle_diff + 2 * math.pi) % 2
* math.pi >= math.pi and abs(angle_diff) > MIN_ANGLE_DIF):
self.tank.stop_moving()
self.tank.turn_left()
elif ((angle_diff + 2 * math.pi) % 2 * math.pi
< math.pi and abs(angle_diff) > MIN_ANGLE_DIF):
self.tank.stop_moving()
self.tank.turn_right()
def correct_angle(self, tank_angle, target_angle):
"""
If the tank has the correct angle to the next tile; stop turning.
"""
angle_diff = periodic_difference_of_angles(target_angle, tank_angle)
if abs(angle_diff) <= MIN_ANGLE_DIF:
self.tank.stop_turning()
return True
else:
return False
def find_shortest_path(self, box_indicator):
"""
A simple Breadth First Search using integer coordinates as our
nodes. Edges are calculated as we go, using an external function.
"""
# To be implemented
dict = {}
shortest_path = []
visited = set()
queue = deque()
queue.append(self.grid_pos)
goal_node = None
while queue:
node = Vec2d(queue.popleft())
if node == self.get_target_tile().int_tuple:
goal_node = node.int_tuple
break
for neighbor in self.get_tile_neighbors(node, box_indicator):
neighbor = neighbor.int_tuple
if neighbor not in visited:
queue.append(neighbor)
visited.add(neighbor)
dict[neighbor] = node.int_tuple
if goal_node is None:
return deque([])
else:
key = goal_node
while key != self.grid_pos.int_tuple:
shortest_path.append(Vec2d(key))
parent_node = dict[key]
key = parent_node
return deque(shortest_path)
def get_target_tile(self):
"""
Returns position of the flag if we don't have it. If we
do have the flag, return the position of our home base.
"""
if self.tank.flag is not None:
x, y = self.tank.start_position
else:
self.get_flag() # Ensure that we have initialized it.
x, y = self.flag.x, self.flag.y
return Vec2d(int(x), int(y))
def get_flag(self):
"""
This has to be called to get the flag, since we don't know
where it is when the Ai object is initialized.
"""
if self.flag is None:
# Find the flag in the game objects list
for obj in self.game_objects_list:
if isinstance(obj, gameobjects.Flag):
self.flag = obj
break
return self.flag
def get_tile_of_position(self, position_vector):
"""
Converts and returns the float position of our tank to an
integer position.
"""
x, y = position_vector
return Vec2d(int(x), int(y))
def get_tile_neighbors(self, coord_vec, box_indicator):
"""
Returns all bordering grid squares of the input coordinate.
A bordering square is only considered accessible if it is grass
or a wooden box.
"""
neighbors = [] # Find the coordinates of the tiles' four neighbors
neighbors.append(coord_vec + Vec2d(1, 0))
neighbors.append(coord_vec + Vec2d(-1, 0))
neighbors.append(coord_vec + Vec2d(0, 1))
neighbors.append(coord_vec + Vec2d(0, -1))
if box_indicator == "without_metalbox":
return filter(self.filter_tile_neighbors, neighbors)
else:
return filter(self.filter_tile_neighbors_metalbox, neighbors)
def filter_tile_neighbors(self, coord):
"""
Filter for all the tiles around the tank. This filter removes
the immovable stones so we don't count those tiles to find the
shortest path.
"""
coord = coord.int_tuple
if coord[1] <= self.MAX_Y and coord[0] <= self.MAX_X and coord[1] >= \
0 and coord[0] >=\
0 and (self.currentmap.boxAt(coord[0], coord[1])
== 0 or self.currentmap.boxAt(coord[0], coord[1]) == 2):
return True
return False
def filter_tile_neighbors_metalbox(self, coord):
"""
Filter for all the tiles around the tank, metalboxes included. This
filter removes the immovable stones so we don't count those tiles to
find the shortest path.
"""
coord = coord.int_tuple
if coord[1] <= self.MAX_Y and coord[0] <= self | """
A generator that iteratively goes through all the required
steps to move to our goal.
"""
while True:
self.update_grid_pos()
path = self.find_shortest_path("without_metalbox")
if not path:
path = self.find_shortest_path("metalbox")
yield
if not path:
continue
next_coord = path.pop()
next_coord += Vec2d(0.5, 0.5)
yield
target_angle = \
angle_between_vectors(Vec2d(self.tank.body.position), next_coord)
angle_tank = self.tank.body.angle
self.turn(angle_tank, target_angle)
while not self.correct_angle(angle_tank, target_angle): | identifier_body |
ai.py | between two angles.
"""
import math
import pymunk
from pymunk import Vec2d
import gameobjects
from collections import defaultdict, deque
MIN_ANGLE_DIF = math.radians(5)
def angle_between_vectors(vec1, vec2):
"""
Since Vec2d operates in a cartesian coordinate space we have to
convert the resulting vector to get the correct angle for our space.
"""
vec = vec1 - vec2
vec = vec.perpendicular()
return vec.angle
def periodic_difference_of_angles(angle1, angle2):
return (angle1 % (2*math.pi)) - (angle2 % (2*math.pi))
class Ai:
"""
A simple ai that finds the shortest path to the target using
a breadth first search. Also capable of shooting other tanks and or
wooden boxes.
"""
def __init__(self, tank, game_objects_list, tanks_list, space, currentmap):
self.tank = tank
self.game_objects_list = game_objects_list
self.tanks_list = tanks_list
self.space = space
self.currentmap = currentmap
self.flag = None
self.MAX_X = currentmap.width - 1
self.MAX_Y = currentmap.height - 1
self.last_distance = 1
self.path = deque()
self.move_cycle = self.move_cycle_gen()
self.update_grid_pos()
def update_grid_pos(self):
"""
This should only be called in the beginning, or at the
end of a move_cycle.
"""
self.grid_pos = self.get_tile_of_position(self.tank.body.position)
def decide(self):
"""
Main decision function that gets called on every
tick of the game.
"""
self.maybe_shoot()
next(self.move_cycle)
pass
def maybe_shoot(self):
"""
Makes a raycast query in front of the tank. If another tank
or a wooden box is found, then we shoot.
"""
res = self.space.segment_query_first((self.tank.body.position[0] - \
0.6 * math.sin(self.tank.body.angle), self.tank.body.position[1] +\
0.6 * math.cos(self.tank.body.angle)), (self.tank.body.position[0] -\
10*math.sin(self.tank.body.angle), self.tank.body.position[1] + \
10*math.cos(self.tank.body.angle)), 0, pymunk.ShapeFilter())
if res is not None:
try:
if hasattr(res, 'shape'):
if isinstance(res.shape.parent, gameobjects.Tank):
bullet = self.tank.shoot(self.space)
if bullet is not None:
self.game_objects_list.append(bullet)
elif isinstance(res.shape.parent, gameobjects.Box):
if res.shape.parent.boxmodel.destructable is True:
bullet = self.tank.shoot(self.space)
if bullet is not None:
self.game_objects_list.append(bullet)
except:
pass
def move_cycle_gen(self):
"""
A generator that iteratively goes through all the required
steps to move to our goal.
"""
while True:
self.update_grid_pos()
path = self.find_shortest_path("without_metalbox")
if not path:
path = self.find_shortest_path("metalbox")
yield
if not path:
continue
next_coord = path.pop()
next_coord += Vec2d(0.5, 0.5)
yield
target_angle = \
angle_between_vectors(Vec2d(self.tank.body.position), next_coord)
angle_tank = self.tank.body.angle
self.turn(angle_tank, target_angle)
while not self.correct_angle(angle_tank, target_angle):
angle_tank = self.tank.body.angle
target_angle = \
angle_between_vectors(Vec2d(self.tank.body.position),
next_coord)
yield
self.tank.accelerate()
while not self.correct_pos(next_coord, self.last_distance):
yield
yield
def correct_pos(self, target_pos, last_distance):
"""
Checks if the tank is on the correct position, compared from the
last one.
"""
tank_pos = Vec2d(self.tank.body.position)
current_distance = target_pos.get_distance(tank_pos)
self.last_distance = current_distance
if last_distance < current_distance:
return True
else:
return False
def turn(self, tank_angle, target_angle):
"""
Finds the angle closest to next tile, and turns accordingly.
WIP: Sometimes it turns to the other side.
"""
angle_diff = periodic_difference_of_angles(tank_angle, target_angle)
if ((angle_diff + 2 * math.pi) % 2
* math.pi >= math.pi and abs(angle_diff) > MIN_ANGLE_DIF):
self.tank.stop_moving()
self.tank.turn_left()
elif ((angle_diff + 2 * math.pi) % 2 * math.pi
< math.pi and abs(angle_diff) > MIN_ANGLE_DIF):
self.tank.stop_moving()
self.tank.turn_right()
def correct_angle(self, tank_angle, target_angle):
"""
If the tank has the correct angle to the next tile; stop turning.
"""
angle_diff = periodic_difference_of_angles(target_angle, tank_angle)
if abs(angle_diff) <= MIN_ANGLE_DIF:
self.tank.stop_turning()
return True
else:
return False
def find_shortest_path(self, box_indicator):
"""
A simple Breadth First Search using integer coordinates as our
nodes. Edges are calculated as we go, using an external function.
"""
# To be implemented
dict = {}
shortest_path = []
visited = set()
queue = deque()
queue.append(self.grid_pos)
goal_node = None
while queue:
node = Vec2d(queue.popleft())
if node == self.get_target_tile().int_tuple:
goal_node = node.int_tuple
break
for neighbor in self.get_tile_neighbors(node, box_indicator):
neighbor = neighbor.int_tuple
if neighbor not in visited:
queue.append(neighbor)
visited.add(neighbor)
dict[neighbor] = node.int_tuple
if goal_node is None:
return deque([])
else:
key = goal_node
while key != self.grid_pos.int_tuple:
shortest_path.append(Vec2d(key))
parent_node = dict[key]
key = parent_node
return deque(shortest_path)
def get_target_tile(self):
"""
Returns position of the flag if we don't have it. If we
do have the flag, return the position of our home base.
"""
if self.tank.flag is not None:
x, y = self.tank.start_position
else:
self.get_flag() # Ensure that we have initialized it.
x, y = self.flag.x, self.flag.y
return Vec2d(int(x), int(y))
def get_flag(self):
"""
This has to be called to get the flag, since we don't know
where it is when the Ai object is initialized.
"""
if self.flag is None:
# Find the flag in the game objects list
for obj in self.game_objects_list:
if isinstance(obj, gameobjects.Flag):
self.flag = obj
break
return self.flag
def get_tile_of_position(self, position_vector):
"""
Converts and returns the float position of our tank to an
integer position.
"""
x, y = position_vector
return Vec2d(int(x), int(y))
def get_tile_neighbors(self, coord_vec, box_indicator):
"""
Returns all bordering grid squares of the input coordinate.
A bordering square is only considered accessible if it is grass
or a wooden box.
"""
neighbors = [] # Find the coordinates of the tiles' four neighbors
neighbors.append(coord_vec + Vec2d(1, 0))
neighbors.append(coord_vec + Vec2d(-1, 0))
neighbors.append(coord_vec + Vec2d(0, 1))
neighbors.append(coord_vec + Vec2d(0, -1))
if box_indicator == "without_metalbox":
return filter(self.filter_tile_neighbors, neighbors)
else:
return filter(self.filter_tile_neighbors_metalbox, neighbors)
def filter_tile_neighbors(self, coord):
"""
Filter for all the tiles around the tank. This filter removes
the immovable stones so we don't count those tiles to find the
shortest path.
"""
coord = coord.int_tuple
if coord[1] <= self.MAX_Y and coord[0] <= self.MAX_X and coord[1] >= \
0 and coord[0] >=\
0 and (self.currentmap.boxAt(coord[0], coord[1])
== 0 or self.currentmap.boxAt(coord[0], coord[1]) == 2):
return True
return False
def | (self, coord):
"""
Filter for all the tiles around the tank, metalboxes included. This
filter removes the immovable stones so we don't count those tiles to
find the shortest path.
"""
coord = coord.int_tuple
if coord[1] <= self.MAX_Y and coord[0] <= | filter_tile_neighbors_metalbox | identifier_name |
mole.go | struct {
Conf *Configuration
Tunnel *tunnel.Tunnel
sigs chan os.Signal
}
// New initializes a new mole's client.
func New(conf *Configuration) *Client {
cli = &Client{
Conf: conf,
sigs: make(chan os.Signal, 1),
}
return cli
}
// Start kicks off mole's client, establishing the tunnel and its channels
// based on the client configuration attributes.
func (c *Client) Start() error {
// memguard is used to securely keep sensitive information in memory.
// This call makes sure all data will be destroy when the program exits.
defer memguard.Purge()
if c.Conf.Id == "" {
u, err := uuid.NewV4()
if err != nil {
return fmt.Errorf("could not auto generate app instance id: %v", err)
}
c.Conf.Id = u.String()[:8]
}
r, err := c.Running()
if err != nil {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).WithError(err).Error("error while checking for another instance using the same id")
return err
}
if r {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).Error("can't start. Another instance is already using the same id")
return fmt.Errorf("can't start. Another instance is already using the same id %s", c.Conf.Id)
}
log.Infof("instance identifier is %s", c.Conf.Id)
if c.Conf.Detach {
var err error
ic, err := NewDetachedInstance(c.Conf.Id)
if err != nil {
log.WithError(err).Errorf("error while creating directory to store mole instance related files")
return err
}
err = startDaemonProcess(ic)
if err != nil {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).WithError(err).Error("error starting ssh tunnel")
return err
}
} else {
go c.handleSignals()
}
if c.Conf.Verbose {
log.SetLevel(log.DebugLevel)
}
d, err := fsutils.CreateInstanceDir(c.Conf.Id)
if err != nil {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).WithError(err).Error("error creating directory for mole instance")
return err
}
if c.Conf.Rpc {
addr, err := rpc.Start(c.Conf.RpcAddress)
if err != nil {
return err
}
rd := filepath.Join(d.Dir, "rpc")
err = ioutil.WriteFile(rd, []byte(addr.String()), 0644)
if err != nil {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).WithError(err).Error("error creating file with rpc address")
return err
}
c.Conf.RpcAddress = addr.String()
log.Infof("rpc server address saved on %s", rd)
}
t, err := createTunnel(c.Conf)
if err != nil {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).WithError(err).Error("error creating tunnel")
return err
}
c.Tunnel = t
if err = c.Tunnel.Start(); err != nil {
log.WithFields(log.Fields{
"tunnel": c.Tunnel.String(),
}).WithError(err).Error("error while starting tunnel")
return err
}
return nil
}
// Stop shuts down a detached mole's application instance.
func (c *Client) Stop() error {
pfp, err := fsutils.GetPidFileLocation(c.Conf.Id)
if err != nil {
return fmt.Errorf("error getting information about aliases directory: %v", err)
}
if _, err := os.Stat(pfp); os.IsNotExist(err) {
return fmt.Errorf("no instance of mole with id %s is running", c.Conf.Id)
}
cntxt := &daemon.Context{
PidFileName: pfp,
}
d, err := cntxt.Search()
if err != nil {
return err
}
if c.Conf.Detach {
err = os.RemoveAll(pfp)
if err != nil {
return err
}
} else {
d, err := fsutils.InstanceDir(c.Conf.Id)
if err != nil {
return err
}
err = os.RemoveAll(d.Dir)
if err != nil {
return err
}
}
err = d.Kill()
if err != nil {
return err
}
return nil
}
func (c *Client) handleSignals() {
signal.Notify(c.sigs, syscall.SIGINT, syscall.SIGTERM, os.Interrupt)
sig := <-c.sigs
log.Debugf("process signal %s received", sig)
err := c.Stop()
if err != nil {
log.WithError(err).Error("instance not properly stopped")
}
}
// Merge overwrites Configuration from the given Alias.
//
// Certain attributes like Verbose, Insecure and Detach will be overwritten
// only if they are found on the givenFlags which should contain the name of
// all flags given by the user through UI (e.g. CLI).
func (c *Configuration) Merge(al *alias.Alias, givenFlags []string) error {
var fl flags = givenFlags
if !fl.lookup("verbose") {
c.Verbose = al.Verbose
}
if !fl.lookup("insecure") {
c.Insecure = al.Insecure
}
if !fl.lookup("detach") {
c.Detach = al.Detach
}
c.Id = al.Name
c.TunnelType = al.TunnelType
srcl := AddressInputList{}
for _, src := range al.Source {
err := srcl.Set(src)
if err != nil {
return err
}
}
c.Source = srcl
dstl := AddressInputList{}
for _, dst := range al.Destination {
err := dstl.Set(dst)
if err != nil {
return err
}
}
c.Destination = dstl
srv := AddressInput{}
err := srv.Set(al.Server)
if err != nil {
return err
}
c.Server = srv
c.Key = al.Key
kai, err := time.ParseDuration(al.KeepAliveInterval)
if err != nil {
return err
}
c.KeepAliveInterval = kai
c.ConnectionRetries = al.ConnectionRetries
war, err := time.ParseDuration(al.WaitAndRetry)
if err != nil {
return err
}
c.WaitAndRetry = war
c.SshAgent = al.SshAgent
tim, err := time.ParseDuration(al.Timeout)
if err != nil {
return err
}
c.Timeout = tim
c.SshConfig = al.SshConfig
c.Rpc = al.Rpc
c.RpcAddress = al.RpcAddress
return nil
}
// ShowInstances returns the runtime information about all instances of mole
// running on the system with rpc enabled.
func ShowInstances() (*InstancesRuntime, error) {
ctx := context.Background()
data, err := rpc.ShowAll(ctx)
if err != nil {
return nil, err
}
var instances []Runtime
err = mapstructure.Decode(data, &instances)
if err != nil {
return nil, err
}
runtime := InstancesRuntime(instances)
if len(runtime) == 0 {
return nil, fmt.Errorf("no instances were found.")
}
return &runtime, nil
}
// ShowInstance returns the runtime information about an application instance
// from the given id or alias.
func ShowInstance(id string) (*Runtime, error) {
ctx := context.Background()
info, err := rpc.Show(ctx, id)
if err != nil {
return nil, err
}
var r Runtime
err = mapstructure.Decode(info, &r)
if err != nil {
return nil, err
}
return &r, nil
}
func startDaemonProcess(instanceConf *DetachedInstance) error {
args := appendIdArg(instanceConf.Id, os.Args)
cntxt := &daemon.Context{
PidFileName: instanceConf.PidFile,
PidFilePerm: 0644,
LogFileName: instanceConf.LogFile,
LogFilePerm: 0640,
Umask: 027,
Args: args,
}
d, err := cntxt.Reborn()
if err != nil {
return err
}
if d != nil {
err = os.Rename(instanceConf.PidFile, instanceConf.PidFile)
if err != nil {
return err
}
err = os.Rename(instanceConf.LogFile, instanceConf.LogFile)
if err != nil {
return err
}
log.Infof("execute \"mole stop %s\" if you like to stop it at any time", instanceConf.Id)
os.Exit(0)
}
defer func() {
err := cntxt.Release()
if err != nil {
log.WithFields(log.Fields{
"id": instanceConf.Id,
}).WithError(err).Error("error detaching the mole instance")
}
}()
return nil
}
type flags []string
func (fs flags) lookup(flag string) bool {
for _, f := range fs {
if flag == f {
return true
}
}
return false
}
func | createTunnel | identifier_name | |
mole.go | configuration.
type Client struct {
Conf *Configuration
Tunnel *tunnel.Tunnel
sigs chan os.Signal
}
// New initializes a new mole's client.
func New(conf *Configuration) *Client {
cli = &Client{
Conf: conf,
sigs: make(chan os.Signal, 1),
}
return cli
}
// Start kicks off mole's client, establishing the tunnel and its channels
// based on the client configuration attributes.
func (c *Client) Start() error {
// memguard is used to securely keep sensitive information in memory.
// This call makes sure all data will be destroy when the program exits.
defer memguard.Purge()
if c.Conf.Id == "" {
u, err := uuid.NewV4()
if err != nil {
return fmt.Errorf("could not auto generate app instance id: %v", err)
}
c.Conf.Id = u.String()[:8]
}
r, err := c.Running()
if err != nil {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).WithError(err).Error("error while checking for another instance using the same id")
return err
}
if r {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).Error("can't start. Another instance is already using the same id")
return fmt.Errorf("can't start. Another instance is already using the same id %s", c.Conf.Id)
}
log.Infof("instance identifier is %s", c.Conf.Id)
if c.Conf.Detach {
var err error
ic, err := NewDetachedInstance(c.Conf.Id)
if err != nil {
log.WithError(err).Errorf("error while creating directory to store mole instance related files")
return err
}
err = startDaemonProcess(ic)
if err != nil {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).WithError(err).Error("error starting ssh tunnel")
return err
}
} else {
go c.handleSignals()
}
if c.Conf.Verbose {
log.SetLevel(log.DebugLevel)
}
d, err := fsutils.CreateInstanceDir(c.Conf.Id)
if err != nil {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).WithError(err).Error("error creating directory for mole instance")
return err
}
if c.Conf.Rpc {
addr, err := rpc.Start(c.Conf.RpcAddress)
if err != nil {
return err
}
rd := filepath.Join(d.Dir, "rpc")
err = ioutil.WriteFile(rd, []byte(addr.String()), 0644)
if err != nil {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).WithError(err).Error("error creating file with rpc address")
return err
}
c.Conf.RpcAddress = addr.String()
log.Infof("rpc server address saved on %s", rd)
}
t, err := createTunnel(c.Conf)
if err != nil {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).WithError(err).Error("error creating tunnel")
return err
}
c.Tunnel = t
if err = c.Tunnel.Start(); err != nil {
log.WithFields(log.Fields{
"tunnel": c.Tunnel.String(),
}).WithError(err).Error("error while starting tunnel")
return err
}
return nil
}
// Stop shuts down a detached mole's application instance.
func (c *Client) Stop() error {
pfp, err := fsutils.GetPidFileLocation(c.Conf.Id)
if err != nil {
return fmt.Errorf("error getting information about aliases directory: %v", err)
}
if _, err := os.Stat(pfp); os.IsNotExist(err) {
return fmt.Errorf("no instance of mole with id %s is running", c.Conf.Id)
}
cntxt := &daemon.Context{
PidFileName: pfp,
}
d, err := cntxt.Search()
if err != nil {
return err
}
if c.Conf.Detach {
err = os.RemoveAll(pfp)
if err != nil {
return err
}
} else {
d, err := fsutils.InstanceDir(c.Conf.Id)
if err != nil {
return err
}
err = os.RemoveAll(d.Dir)
if err != nil {
return err
}
}
err = d.Kill()
if err != nil {
return err
}
return nil
}
func (c *Client) handleSignals() {
signal.Notify(c.sigs, syscall.SIGINT, syscall.SIGTERM, os.Interrupt)
sig := <-c.sigs
log.Debugf("process signal %s received", sig)
err := c.Stop()
if err != nil {
log.WithError(err).Error("instance not properly stopped")
}
}
// Merge overwrites Configuration from the given Alias.
//
// Certain attributes like Verbose, Insecure and Detach will be overwritten
// only if they are found on the givenFlags which should contain the name of
// all flags given by the user through UI (e.g. CLI).
func (c *Configuration) Merge(al *alias.Alias, givenFlags []string) error {
var fl flags = givenFlags
if !fl.lookup("verbose") {
c.Verbose = al.Verbose
}
if !fl.lookup("insecure") {
c.Insecure = al.Insecure
}
if !fl.lookup("detach") {
c.Detach = al.Detach
}
c.Id = al.Name
c.TunnelType = al.TunnelType
srcl := AddressInputList{}
for _, src := range al.Source {
err := srcl.Set(src)
if err != nil {
return err
}
}
c.Source = srcl
dstl := AddressInputList{}
for _, dst := range al.Destination {
err := dstl.Set(dst)
if err != nil {
return err
}
}
c.Destination = dstl
srv := AddressInput{}
err := srv.Set(al.Server)
if err != nil {
return err
}
c.Server = srv
c.Key = al.Key
kai, err := time.ParseDuration(al.KeepAliveInterval)
if err != nil {
return err
}
c.KeepAliveInterval = kai
c.ConnectionRetries = al.ConnectionRetries
war, err := time.ParseDuration(al.WaitAndRetry)
if err != nil {
return err
}
c.WaitAndRetry = war
c.SshAgent = al.SshAgent
tim, err := time.ParseDuration(al.Timeout)
if err != nil {
return err
}
c.Timeout = tim
c.SshConfig = al.SshConfig
c.Rpc = al.Rpc
c.RpcAddress = al.RpcAddress
return nil
}
// ShowInstances returns the runtime information about all instances of mole
// running on the system with rpc enabled.
func ShowInstances() (*InstancesRuntime, error) {
ctx := context.Background()
data, err := rpc.ShowAll(ctx)
if err != nil {
return nil, err
}
var instances []Runtime
err = mapstructure.Decode(data, &instances)
if err != nil {
return nil, err
}
runtime := InstancesRuntime(instances)
if len(runtime) == 0 {
return nil, fmt.Errorf("no instances were found.")
}
return &runtime, nil
}
// ShowInstance returns the runtime information about an application instance
// from the given id or alias.
func ShowInstance(id string) (*Runtime, error) {
ctx := context.Background()
info, err := rpc.Show(ctx, id)
if err != nil {
return nil, err
}
var r Runtime
err = mapstructure.Decode(info, &r)
if err != nil {
return nil, err
}
return &r, nil
}
func startDaemonProcess(instanceConf *DetachedInstance) error {
args := appendIdArg(instanceConf.Id, os.Args)
cntxt := &daemon.Context{
PidFileName: instanceConf.PidFile,
PidFilePerm: 0644,
LogFileName: instanceConf.LogFile,
LogFilePerm: 0640,
Umask: 027,
Args: args,
}
d, err := cntxt.Reborn()
if err != nil {
return err
}
if d != nil {
err = os.Rename(instanceConf.PidFile, instanceConf.PidFile)
if err != nil {
return err
}
err = os.Rename(instanceConf.LogFile, instanceConf.LogFile)
if err != nil {
return err
}
log.Infof("execute \"mole stop %s\" if you like to stop it at any time", instanceConf.Id)
os.Exit(0)
}
defer func() {
err := cntxt.Release()
if err != nil {
log.WithFields(log.Fields{
"id": instanceConf.Id,
}).WithError(err).Error("error detaching the mole instance")
}
}()
return nil
}
type flags []string
func (fs flags) lookup(flag string) bool | {
for _, f := range fs {
if flag == f {
return true
}
}
return false
} | identifier_body | |
mole.go | KeepAliveInterval time.Duration `json:"keep-alive-interval" mapstructure:"keep-alive-interva" toml:"keep-alive-interval"`
ConnectionRetries int `json:"connection-retries" mapstructure:"connection-retries" toml:"connection-retries"`
WaitAndRetry time.Duration `json:"wait-and-retry" mapstructure:"wait-and-retry" toml:"wait-and-retry"`
SshAgent string `json:"ssh-agent" mapstructure:"ssh-agent" toml:"ssh-agent"`
Timeout time.Duration `json:"timeout" mapstructure:"timeout" toml:"timeout"`
SshConfig string `json:"ssh-config" mapstructure:"ssh-config" toml:"ssh-config"`
Rpc bool `json:"rpc" mapstructure:"rpc" toml:"rpc"`
RpcAddress string `json:"rpc-address" mapstructure:"rpc-address" toml:"rpc-address"`
}
// ParseAlias translates a Configuration object to an Alias object.
func (c Configuration) ParseAlias(name string) *alias.Alias {
return &alias.Alias{
Name: name,
TunnelType: c.TunnelType,
Verbose: c.Verbose,
Insecure: c.Insecure,
Detach: c.Detach,
Source: c.Source.List(),
Destination: c.Destination.List(),
Server: c.Server.String(),
Key: c.Key,
KeepAliveInterval: c.KeepAliveInterval.String(),
ConnectionRetries: c.ConnectionRetries,
WaitAndRetry: c.WaitAndRetry.String(),
SshAgent: c.SshAgent,
Timeout: c.Timeout.String(),
SshConfig: c.SshConfig,
Rpc: c.Rpc,
RpcAddress: c.RpcAddress,
}
}
// Client manages the overall state of the application based on its configuration.
type Client struct {
Conf *Configuration
Tunnel *tunnel.Tunnel
sigs chan os.Signal
}
// New initializes a new mole's client.
func New(conf *Configuration) *Client {
cli = &Client{
Conf: conf,
sigs: make(chan os.Signal, 1),
}
return cli
}
// Start kicks off mole's client, establishing the tunnel and its channels
// based on the client configuration attributes.
func (c *Client) Start() error {
// memguard is used to securely keep sensitive information in memory.
// This call makes sure all data will be destroy when the program exits.
defer memguard.Purge()
if c.Conf.Id == "" {
u, err := uuid.NewV4()
if err != nil {
return fmt.Errorf("could not auto generate app instance id: %v", err)
}
c.Conf.Id = u.String()[:8]
}
r, err := c.Running()
if err != nil {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).WithError(err).Error("error while checking for another instance using the same id")
return err
}
if r {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).Error("can't start. Another instance is already using the same id")
return fmt.Errorf("can't start. Another instance is already using the same id %s", c.Conf.Id)
}
log.Infof("instance identifier is %s", c.Conf.Id)
if c.Conf.Detach {
var err error
ic, err := NewDetachedInstance(c.Conf.Id)
if err != nil {
log.WithError(err).Errorf("error while creating directory to store mole instance related files")
return err
}
err = startDaemonProcess(ic)
if err != nil {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).WithError(err).Error("error starting ssh tunnel")
return err
}
} else {
go c.handleSignals()
}
if c.Conf.Verbose {
log.SetLevel(log.DebugLevel)
}
d, err := fsutils.CreateInstanceDir(c.Conf.Id)
if err != nil {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).WithError(err).Error("error creating directory for mole instance")
return err
}
if c.Conf.Rpc {
addr, err := rpc.Start(c.Conf.RpcAddress)
if err != nil {
return err
}
rd := filepath.Join(d.Dir, "rpc")
err = ioutil.WriteFile(rd, []byte(addr.String()), 0644)
if err != nil {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).WithError(err).Error("error creating file with rpc address")
return err
}
c.Conf.RpcAddress = addr.String()
log.Infof("rpc server address saved on %s", rd)
}
t, err := createTunnel(c.Conf)
if err != nil {
log.WithFields(log.Fields{
"id": c.Conf.Id,
}).WithError(err).Error("error creating tunnel")
return err
}
c.Tunnel = t
if err = c.Tunnel.Start(); err != nil {
log.WithFields(log.Fields{
"tunnel": c.Tunnel.String(),
}).WithError(err).Error("error while starting tunnel")
return err
}
return nil
}
// Stop shuts down a detached mole's application instance.
func (c *Client) Stop() error {
pfp, err := fsutils.GetPidFileLocation(c.Conf.Id)
if err != nil {
return fmt.Errorf("error getting information about aliases directory: %v", err)
}
if _, err := os.Stat(pfp); os.IsNotExist(err) |
cntxt := &daemon.Context{
PidFileName: pfp,
}
d, err := cntxt.Search()
if err != nil {
return err
}
if c.Conf.Detach {
err = os.RemoveAll(pfp)
if err != nil {
return err
}
} else {
d, err := fsutils.InstanceDir(c.Conf.Id)
if err != nil {
return err
}
err = os.RemoveAll(d.Dir)
if err != nil {
return err
}
}
err = d.Kill()
if err != nil {
return err
}
return nil
}
func (c *Client) handleSignals() {
signal.Notify(c.sigs, syscall.SIGINT, syscall.SIGTERM, os.Interrupt)
sig := <-c.sigs
log.Debugf("process signal %s received", sig)
err := c.Stop()
if err != nil {
log.WithError(err).Error("instance not properly stopped")
}
}
// Merge overwrites Configuration from the given Alias.
//
// Certain attributes like Verbose, Insecure and Detach will be overwritten
// only if they are found on the givenFlags which should contain the name of
// all flags given by the user through UI (e.g. CLI).
func (c *Configuration) Merge(al *alias.Alias, givenFlags []string) error {
var fl flags = givenFlags
if !fl.lookup("verbose") {
c.Verbose = al.Verbose
}
if !fl.lookup("insecure") {
c.Insecure = al.Insecure
}
if !fl.lookup("detach") {
c.Detach = al.Detach
}
c.Id = al.Name
c.TunnelType = al.TunnelType
srcl := AddressInputList{}
for _, src := range al.Source {
err := srcl.Set(src)
if err != nil {
return err
}
}
c.Source = srcl
dstl := AddressInputList{}
for _, dst := range al.Destination {
err := dstl.Set(dst)
if err != nil {
return err
}
}
c.Destination = dstl
srv := AddressInput{}
err := srv.Set(al.Server)
if err != nil {
return err
}
c.Server = srv
c.Key = al.Key
kai, err := time.ParseDuration(al.KeepAliveInterval)
if err != nil {
return err
}
c.KeepAliveInterval = kai
c.ConnectionRetries = al.ConnectionRetries
war, err := time.ParseDuration(al.WaitAndRetry)
if err != nil {
return err
}
c.WaitAndRetry = war
c.SshAgent = al.SshAgent
tim, err := time.ParseDuration(al.Timeout)
if err != nil {
return err
}
c.Timeout = tim
c.SshConfig = al.SshConfig
c.Rpc = al.Rpc
c.RpcAddress = al.RpcAddress
return nil
}
// ShowInstances returns the runtime information about all instances of mole
// running on the system with rpc enabled.
func ShowInstances() (*InstancesRuntime, error) {
ctx := context.Background()
data, err := rpc.ShowAll(ctx)
if err != nil {
return nil, err
}
var instances []Runtime
err = mapstructure.Decode(data, &instances)
if err != nil {
return nil, err
}
runtime := InstancesRuntime(instances)
if len(runtime) == 0 {
return nil, | {
return fmt.Errorf("no instance of mole with id %s is running", c.Conf.Id)
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.