text
stringlengths
1
1.05M
/****************************************************************************** * Copyright 2011 Kitware Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include "ButtonDelegate.h" #include "ButtonLineEdit.h" #include "ButtonEditUI.h" #include <QBoxLayout> #include <QPushButton> #include <QLineEdit> ButtonDelegate::ButtonDelegate(QObject* parent) : QItemDelegate(parent) { m_EditUI = NULL; m_Item = NULL; } ButtonDelegate::~ButtonDelegate() { } void ButtonDelegate::setItem(MidasItemTreeItem* item) { m_Item = item; } void ButtonDelegate::setField(MIDASFields field) { m_Field = field; } void ButtonDelegate::setEditUI(ButtonEditUI* editUI) { m_EditUI = editUI; } QWidget * ButtonDelegate::createEditor(QWidget* parent, const QStyleOptionViewItem& option, const QModelIndex& index) const { (void)option; (void)index; return new ButtonLineEdit(m_Item, m_Field, m_EditUI, parent); } void ButtonDelegate::setEditorData(QWidget* editor, const QModelIndex& index) const { ButtonLineEdit* edit = static_cast<ButtonLineEdit *>(editor); std::string value = index.model()->data( index, Qt::DisplayRole).toString().toStdString(); edit->setData(value.c_str() ); } void ButtonDelegate::setModelData(QWidget* editor, QAbstractItemModel* model, const QModelIndex& index) const { (void)index; ButtonLineEdit* edit = static_cast<ButtonLineEdit *>(editor); model->setData(index, edit->getData(), Qt::EditRole); } void ButtonDelegate::updateEditorGeometry(QWidget* editor, const QStyleOptionViewItem & option, const QModelIndex& index) const { (void)index; editor->setGeometry(option.rect); }
import { NgModule } from '@angular/core'; import { Routes, RouterModule } from '@angular/router'; import { StageModule } from './stage/stage.module'; import { BrowserModule } from '@angular/platform-browser'; import { BrowserAnimationsModule } from '@angular/platform-browser/animations'; const routes: Routes = [ { path: '', loadChildren: () => import('./stage/stage.module').then(mod => mod.StageModule)}, // { path: 'sample', component: SampleDemoComponent }, // { path: 'user', loadChildren: () => import('./user/user.module').then(mod => mod.UserModule)}, ]; @NgModule({ imports: [ RouterModule.forRoot(routes) ], exports: [RouterModule] }) export class AppRoutingModule { }
import numpy as np def backward_mapping(backward_img, output): backward_img = np.expand_dims(backward_img, axis=0) output = np.concatenate((output, backward_img), 0) return output def bw_mapping(bw_map, image, device): # Perform backward mapping operation mapped_image = backward_mapping(bw_map, image) return mapped_image
package ca.nova.gestion.controller; import ca.nova.gestion.model.Maintenance; import ca.nova.gestion.services.MaintenanceService; import org.springframework.http.MediaType; import org.springframework.validation.annotation.Validated; import org.springframework.web.bind.annotation.*; import java.util.Map; @RestController @RequestMapping(produces = MediaType.APPLICATION_JSON_VALUE) public class MaintenanceController { private final MaintenanceService maintenanceService; public MaintenanceController(MaintenanceService maintenanceService) { this.maintenanceService = maintenanceService; } /** * POST /v1/maintenance * Cree une maintenance dans la base de donnee * @param maintenance * @return idMaintenance ajoutée */ @PostMapping("/v1/maintenance") public Map<String, Integer> insertMaintenance(@RequestBody @Validated Maintenance maintenance) { return Map.of("idMaintenance", maintenanceService.insertMaintenance(maintenance)); } /** * PUT /v1/maintenance * Mis à jour une maintenance dans la BD * @param maintenance */ @PutMapping("/v1/maintenance") public void updateMaintenance(@RequestBody @Validated Maintenance maintenance) { maintenanceService.updateMaintenance(maintenance); } /** * DELETE /v1/maintenance/{idMaintenance}/ * supprime une maintenance de la BD avec l'id spécifié * @param idMaintenance */ @DeleteMapping("/v1/maintenance/{idMaintenance}") public void deleteMaintenance(@PathVariable @Validated int idMaintenance) { maintenanceService.deleteMaintenance(idMaintenance); } }
##################################################### # Source code https://github.com/end222/pacmenu # Updated by Afiniel for crypto use... ##################################################### source /etc/functions.sh RESULT=$(dialog --stdout --nocancel --default-item 1 --title "Afiniel Yiimp installer" --menu "Choose one" -1 60 16 \ ' ' "- Installation or upgrade -" \ 1 "Install YiiMP" \ ' ' "- Upgrade Yiimp -" \ 2 "YiiMP Stratum Upgrade" \ ' ' "- Daemon Wallet Builder -" \ 3 "Daemonbuilder" \ 4 Exit) if [ $RESULT = ] then bash $(basename $0) && exit; fi # Single install if [ $RESULT = 1 ] then clear; cd $HOME/multipool/install source bootstrap_single.sh; fi if [ $RESULT = 2 ] then clear; cd $HOME/multipool/install source bootstrap_upgrade.sh; fi if [ $RESULT = 3 ] then clear; cd $HOME/multipool/install source bootstrap_coin.sh; fi if [ $RESULT = 4 ] then clear; exit; fi
SELECT * FROM customers WHERE city = 'New York' ORDER BY age INDEX BY city;
<gh_stars>10-100 export * from './dist/react'
from django.conf.urls import include from django.conf.urls import url from django.views.generic import TemplateView from rest_framework.documentation import include_docs_urls from authmultitoken.views import ( manage_tokens, create_token, manage_token, add_restriction ) from .views import login_and_agree, nginx_auth_probe, download, download_schema from smartcambridge.decorator import smartcambridge_valid_user api_description = ''' Programmatic access to data held by the Smartcambridge project. See [the main API documentation](/api/) for important information about using this API, **in particular about the need for authentication**. ''' # These are the URL patterns for which documentation should # be generated docpatterns = [ url(r'^api/v1/parking/', include('parking.api.urls')), url(r'^api/v1/traffic/', include('traffic.api.urls')), url(r'^api/v1/aq/', include('aq.api.urls')), # Import transport views previously servedunder /transport/api/ url(r'^api/v1/transport/', include('transport.api.urls')), ] # These are all the URLs. The near-duplication of the URLs above # is regretted urlpatterns = [ url(r'^$', TemplateView.as_view(template_name="api/index.html"), name="api_home"), url(r'^program/$', TemplateView.as_view(template_name="api/program_api.html"), name="program_api"), url(r'download/$', download, name='download_api'), url(r'download/(?P<feed>[-\w]+)-schema/$', download_schema, name="downlod_schema"), url(r'login-and-agree/', login_and_agree, name="login-and-agree"), # Define these here, rather then via include('authmultitoken.html_urls', # so we can wrap them with smartcambridge_valid_user() url(r'^tokens/', smartcambridge_valid_user(manage_tokens), name='manage_tokens'), url(r'^create-token/', smartcambridge_valid_user(create_token), name='create_token'), url(r'^manage-token/(?P<token_id>[-\w]+)', smartcambridge_valid_user(manage_token), name='manage_token'), url(r'^add-restriction/(?P<token_id>[-\w]+)', smartcambridge_valid_user(add_restriction), name='add_restriction'), url(r'^nginx-auth-probe/', nginx_auth_probe, name='nginx_auth_probe'), url(r'^auth/', include('api.auth_urls')), url(r'^docs/', include_docs_urls( title='SmartCambridge API', description=api_description, patterns=docpatterns)), url(r'^v1/parking/', include('parking.api.urls')), url(r'^v1/traffic/', include('traffic.api.urls')), url(r'^v1/aq/', include('aq.api.urls')), # Import transport views previously served under /transport/api/ url(r'^v1/transport/', include('transport.api.urls')), ]
<gh_stars>1-10 //surveyJS object options, can be customized for a specific questionnaire by id export default { "default": { showQuestionNumbers: 'off', completeText: 'Submit', clearInvisibleValues: 'onHidden', requiredText: '', completedHtml: '<h3>The screening is complete.</h3><h3>You may now close the window.</h3>' }, "WHOAUDIT": { title: "Alcohol Screening", logo: "favicon.png", logoWidth: 70, logoHeight: 60 } };
<filename>extern/glow-extras/vector/glow-extras/vector/text_align.hh #pragma once namespace glow::vector { enum class text_align { // horizontal left = 1 << 0, center = 1 << 1, right = 1 << 2, // vertical top = 1 << 3, middle = 1 << 4, bottom = 1 << 5, baseline = 1 << 6, // special top_left = top | left, top_center = top | center, top_right = top | right, middle_left = middle | left, middle_center = middle | center, middle_right = middle | right, bottom_left = bottom | left, bottom_center = bottom | center, bottom_right = bottom | right, baseline_left = baseline | left, baseline_center = baseline | center, baseline_right = baseline | right, }; }
import pandas as pd import numpy as np # Load the data ratings_df = pd.read_csv('ratings.csv') # Generate item-item proximity matrix item_matrix = ratings_df.pivot_table(index='user_id', columns='item_id', values='rating') item_prox_matrix = item_matrix.corr(method='pearson') # Create the model user_item_matrix = ratings_df.pivot_table(index='user_id', columns='item_id', values='rating') user_item_matrix = np.array(user_item_matrix) # Get the recommendations predictions = np.dot(user_item_matrix, item_prox_matrix) predictions = pd.DataFrame(predictions, columns=item_matrix.columns, index=item_matrix.index) # Identify personalized recommendations recommendations = pd.DataFrame(columns=['user_id', 'item_id', 'prediction']) for user_id in ratings_df.user_id.unique(): user_recommendations = predictions.loc[user_id].sort_values(ascending=False).head(20) recommendations = recommendations.append(pd.DataFrame({'user_id': [user_id]*len(user_recommendations), 'item_id': user_recommendations.index, 'prediction': user_recommendations.values}, columns=recommendations.columns)) # Print the personalized recommendations print(recommendations)
from fastapi import FastAPI, HTTPException import requests import datetime from datetime import timedelta from starlette.responses import RedirectResponse from urllib3.util.retry import Retry from requests.adapters import HTTPAdapter s = requests.Session() retries = Retry(total=5, backoff_factor=0.1, status_forcelist=[ 500, 502, 503, 504 ]) s.mount('https://', HTTPAdapter(max_retries=retries)) app = FastAPI() cache = {} @app.get("/api/micromamba/{platform}/{version}") def get_micromamba(platform, version='latest'): cache_ts = cache.get(platform, {}).get(version, {}).get('ts', None) if cache_ts: if (datetime.datetime.now() - cache_ts) <= timedelta(minutes=30): url = cache[platform][version]['url'] print(f"Cache hit: {int((datetime.datetime.now() - cache_ts).seconds / 60)}: {url}") return RedirectResponse(url) url = f"https://api.anaconda.org/release/conda-forge/micromamba/{version}" print("Getting Anaconda.org API") r = s.get(url, timeout=10) if r.ok: rj = r.json() tmp_cache = {} for d in rj["distributions"]: dplat = d["attrs"]["subdir"] cache_dict = { 'ts': datetime.datetime.now(), 'url': d["download_url"], 'build_number': d.get("attrs", {}).get("build_number", 0) } if dplat in tmp_cache: if cache_dict['build_number'] > tmp_cache[dplat][version]['build_number']: tmp_cache[dplat][version] = cache_dict else: tmp_cache[dplat] = {version: cache_dict} for plat in tmp_cache: if version in cache: cache[plat][version] = tmp_cache[plat][version] else: cache[plat] = tmp_cache[plat] # check if we still have something in our cache url = cache.get(platform, {}).get(version, {}).get('url') if url: ts = datetime.datetime.now() - cache.get(platform, {}).get(version, {}).get('ts', None) print(f"Returning {platform}/{version}. Serving {url}. Age: {ts.seconds // 60}mn") return RedirectResponse(url=url) raise HTTPException(status_code=404, detail=f"No version found for {platform}/{version}")
import type { v1Connector } from './v1Connector'; export declare type v1GetConnectorResponse = { connector?: v1Connector; };
#!/bin/bash g++ -std=c++11 -I.. -I../.. -L../lib -lopenmi_idl attr_value_test.cc -o xx
#!/bin/bash # set initial values SUBDIR="end" NO_INIT=0 CLEAN=0 FLAGS="" # read options TEMP=`getopt -o s:ncl: --long subdir:,flags:,no-init,clean,lab: -n $0 -- "$@"` eval set -- "$TEMP" # extract options and their arguments while true ; do case "$1" in -s|--subdir) case "$2" in "") shift 2 ;; *) SUBDIR=$2 ; shift 2 ;; esac ;; -f|--flags) case "$2" in "") shift 2 ;; *) FLAGS=$2 ; shift 2 ;; esac ;; -n|--no-init) NO_INIT=1 ; shift ;; -c|--clean) CLEAN=1 ; shift ;; -l|--lab) case "$2" in "") shift 2 ;; *) LAB=$2 ; shift 2;; esac ;; --) shift ; break ;; *) echo "Internal error!" ; exit 1 ;; esac done if [ "$CLEAN" -eq 1 ]; then rm -rf xv6-public-* exit 0 fi if [ -z "$LAB" ]; then echo "usage: $0 --lab lab [--subdir subdir] [--no-init] [--clean]" exit 1 fi DST=xv6-public-$(basename "$LAB") mkdir -p "$DST" if [ $NO_INIT -eq 0 ]; then rm -rf "$DST" cp -r xv6-public "$DST" fi cp -r "$LAB"/"$SUBDIR"/* "$DST" cd "$DST" make SCHEDPOLICY=$FLAGS make qemu-nox exit 0
import numpy as np import matplotlib.pyplot as plt # Parameters N = 1000 # Total population size I0 = 1 # Initial number of infected individuals S0 = N - I0 # Initial number of susceptible individuals R0 = 0 # Initial number of recovered individuals beta = 0.3 # Transmission rate gamma = 0.1 # Recovery rate t_max = 200 # Simulation time # SIR model equations def sir_model(S, I, R, beta, gamma): dSdt = -beta * S * I / N dIdt = beta * S * I / N - gamma * I dRdt = gamma * I return dSdt, dIdt, dRdt # Simulation S, I, R = S0, I0, R0 susceptible, infected, recovered = [S], [I], [R] for t in range(t_max): dS, dI, dR = sir_model(S, I, R, beta, gamma) S += dS I += dI R += dR susceptible.append(S) infected.append(I) recovered.append(R) # Plotting plt.plot(susceptible, label='Susceptible') plt.plot(infected, label='Infected') plt.plot(recovered, label='Recovered') plt.xlabel('Time') plt.ylabel('Population') plt.legend() plt.title('SIR Model Simulation\n' + r'$\beta$=' + f'{beta}' + r' | $\gamma$=' + f'{gamma} | ' + r'$I_{0}=$' + f'{I0}' + r' | $S_{0}=$' + f'{S0}') plt.savefig('SIR.png') plt.show()
const request = (url, token) => { return new Promise((resolve, reject) => { const options = { method: 'GET', uri: url, headers: { Authorization: `Bearer ${token}` } }; request(options) .then((resp) => { const body = JSON.parse(resp.body); resolve(body); }) .catch((err) => { reject(err); }); }); }
<reponame>fsancheztemprano/chess-lite<gh_stars>0 import { ChangeDetectionStrategy, Component } from '@angular/core'; @Component({ selector: 'app-user-management-account', templateUrl: './user-management-account.component.html', styleUrls: ['./user-management-account.component.scss'], changeDetection: ChangeDetectionStrategy.OnPush, }) export class UserManagementAccountComponent {}
<reponame>jonahisadev/nitrogen<filename>include/Nitrogen/Util.h #ifndef NITROGEN_UTIL_H #define NITROGEN_UTIL_H #include <cstdio> #include <cstdlib> #include <cstring> #define TEMP_BUF_SIZE 256 #define error(...) fprintf(stderr, "\033[1;31m"); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\033[0m"); exit(1); namespace Nitrogen { class Util { public: // Files static char* readFile(const char* path, int* length); // Numbers static bool isNumber(const char* str); static int toNum(const char* str, int base); // Buffers static char* createTempBuffer(); static void clearTempBuffer(char* buf); }; } #endif // NITROGEN_UTIL_H
import re def norm_doi(doi): # Remove leading and trailing whitespaces, convert to lowercase, and remove 'doi:' prefix if present doi = doi.strip().lower() doi = re.sub(r'^doi:', '', doi) return doi
#!/bin/bash # # ======= TEMPLATE GAMS-CPLEX Header ======== # No printf parameters # # Simple BASH script to run and time a series of GAMS jobs to compare the run # time of binary vs clustered unit commitment both with and without capacity # expansion decisions # # To actually submit the job use: # qsub SCRIPT_NAME # Version History # Ver Date Time Who What # --- ---------- ----- -------------- --------------------------------- # 1 2011-10-08 04:20 bpalmintier Adapted from pbs_time1.sh v4 # 2 2011-10-08 21:00 bpalmintier Implemented use of scratch space #========= Setup Job Queue Parameters ========== # IMPORTANT: The lines beginning #PBS set various queuing parameters, they are not simple comments # # name of submitted job, also name of output file unless specified # The default job name is the name of this script, so here we surpress the job naming so # we get unique names for all of our jobs ##PBS -N matlab_pbs # # Ask for all 1 node with 8 processors. this may or may not give # exclusive access to a machine, but typically the queueing system will # assign the 8 core machines first # # By requiring 20GB we ensure we get one of the machines with 24GB (or maybe a 12 core unit) #PBS -l nodes=1:ppn=8,mem=20gb # # This option merges any error messages into output file #PBS -j oe # # Select the queue based on maximum run times. options are: # short 2hr # medium 8hr # long 24hr # xlong 48hr, extendable to 168hr using -l walltime= option below #PBS -q long # And up the run time to the maximum of a full week (168 hrs) ##PBS -l walltime=168:00:00 echo "Node list:" cat $PBS_NODEFILE echo "Disk usage:" df -h #Set things up to load modules source /etc/profile.d/modules.sh #Load recent version of GAMS module load gams/23.6.3 #Set path to gams in environment variable so MATLAB can read it GAMS=`which gams` export GAMS #And load CPLEX module load cplex #Establish a working directory in scratch #Will give error if it already exists, but script continues anyway mkdir /scratch/b_p #Clean anything out of our scratch folder (Assumes exclusive machine usage) rm -r /scratch/b_p/* #Make a new subfolder for this job SCRATCH="/scratch/b_p/${PBS_JOBID}" mkdir $SCRATCH #Establish our model directory MODEL_DIR="${HOME}/projects/advpower/models/capplan/" #---------------------------- # Setup gams options #---------------------------- DATE_TIME=`date +%y%m%d-%H%M` ADVPOWER_REPO_VER=`svnversion ~/projects/advpower` echo "Date & Time:" ${DATE_TIME} echo "SVN Repository Version:" ${ADVPOWER_REPO_VER} GAMS_MODEL="StaticCapPlan" #=== END HEADER === #======= Shared Setup ======= OUT_DIR="${HOME}/projects/advpower/results/gams/" #Make sure output directory exists mkdir ${OUT_DIR} # Default GAMS options to: # errmsg: enable in-line description of errors in list file # lf & lo: store the solver log (normally printed to screen) in $OUT_DIR # o: rename the list file and store in $OUT_DIR # inputdir: Look for $include and $batinclude files in $WORK_DIR # And Advanced Power Model options to: # out_dir: specify directory for CSV output files # out_prefix: add a unique run_id to all output files # memo: encode some helpful run information in the summary file # # Plus additional user supplied options pasted into template COMMON_OPTIONS=" -errmsg=1 -lo=2 -inputdir=${MODEL_DIR} --out_dir=${OUT_DIR} --no_nse=1 --sys=mftest_sys.inc --gens=gens_all_new.inc --rps=0.2 --co2cost=80 --demscale=0.2" LONG_OPTS=" --par_threads=2 --lp_method=6 --max_solve_time=36000 " #======= Reserves with UC Ops ======= RUN_CODE="TX07_yr_x0_2_rps20_rsrv_uc" #Make a temporary run directory in scratch WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/" mkdir ${WORK_DIR} cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR} cd ${WORK_DIR} echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}" pwd # Setup run specific options GAMS_OPTIONS=" -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst ${COMMON_OPTIONS} --rsrv=flex --unit_commit=1" #Now run GAMS-CPLEX echo "Running ${GAMS_MODEL} using GAMS" echo " Options: ${GAMS_OPTIONS}" echo . gams ${GAMS_MODEL} ${GAMS_OPTIONS} --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} & echo "GAMS Done (${RUN_CODE})" echo . cd ${MODEL_DIR} pwd #======= Ramp + Reserves UC(minout) Ops ======= RUN_CODE="TX07_yr_x0_2_rps20_rsrv_ramp_uc" #Make a temporary run directory in scratch WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/" mkdir ${WORK_DIR} cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR} cd ${WORK_DIR} echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}" pwd # Setup run specific options GAMS_OPTIONS=" -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst ${COMMON_OPTIONS} ${LONG_OPTS} --ramp=1 --unit_commit=1 --rsrv=flex" #Now run GAMS-CPLEX echo "Running ${GAMS_MODEL} using GAMS" echo " Options: ${GAMS_OPTIONS}" echo . gams ${GAMS_MODEL} ${GAMS_OPTIONS} --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} & echo "GAMS Done (${RUN_CODE})" echo . cd ${MODEL_DIR} pwd #======= Minout-only (UC) Ops ======= RUN_CODE="TX07_yr_x0_2_rps20_uc" #Make a temporary run directory in scratch WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/" mkdir ${WORK_DIR} cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR} cd ${WORK_DIR} echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}" pwd # Setup run specific options GAMS_OPTIONS=" -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst ${COMMON_OPTIONS} --unit_commit=1" #Now run GAMS-CPLEX echo "Running ${GAMS_MODEL} using GAMS" echo " Options: ${GAMS_OPTIONS}" echo . gams ${GAMS_MODEL} ${GAMS_OPTIONS} --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} & echo "GAMS Done (${RUN_CODE})" echo . cd ${MODEL_DIR} pwd #======= Max Startup ======= RUN_CODE="TX07_yr_x0_2_rps20_maxstart" #Make a temporary run directory in scratch WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/" mkdir ${WORK_DIR} cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR} cd ${WORK_DIR} echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}" pwd # Setup run specific options GAMS_OPTIONS=" -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst ${COMMON_OPTIONS} ${LONG_OPTS} --max_start=1 --startup=1" #Now run GAMS-CPLEX echo "Running ${GAMS_MODEL} using GAMS" echo " Options: ${GAMS_OPTIONS}" echo . gams ${GAMS_MODEL} ${GAMS_OPTIONS} --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_v${ADVPOWER_REPO_VER}_${DATE_TIME} & echo "GAMS Done (${RUN_CODE})" echo . cd ${MODEL_DIR} pwd #=== Footer Template ==== # No printf parameters # Version History # Ver Date Time Who What # --- ---------- ----- -------------- --------------------------------- # 1 2011-10-08 04:20 bpalmintier Adapted from pbs_time1.sh v4 # 2 2011-10-08 21:00 bpalmintier Implemented use of scratch space #Wait until all background jobs are complete wait #See how much disk space we used df -h #Clean-up scratch space echo "Cleaning up our Scratch Space" cd rm -r /scratch/b_p/* df -h echo "Script Complete ${PBS_JOBID}"
#!/usr/bin/env bash # https://gist.github.com/mrbar42/ae111731906f958b396f30906004b3fa#file-create-vod-hls-sh # https://docs.peer5.com/guides/production-ready-hls-vod/ set -e # Usage create-vod-hls.sh SOURCE_FILE [OUTPUT_NAME] [[ ! "${1}" ]] && echo "Usage: create-vod-hls.sh SOURCE_FILE [OUTPUT_NAME]" && exit 1 # comment/add lines here to control which renditions would be created renditions=( # resolution bitrate audio-rate # "426x240 400k 64k" "300x300 800k 96k" "400x400 1400k 128k" "400x400 2200k 192k" "400x400 3000k 192k" ) segment_target_duration=4 # try to create a new segment every X seconds max_bitrate_ratio=1.07 # maximum accepted bitrate fluctuations rate_monitor_buffer_ratio=1.5 # maximum buffer size between bitrate conformance checks ######################################################################### source="${1}" target="${2}" if [[ ! "${target}" ]]; then target="${source##*/}" # leave only last component of path target="${target%.*}" # strip extension fi mkdir -p ${target} # key_frames_interval="$(echo `ffprobe ${source} 2>&1 | grep -oE '[[:digit:]]+(.[[:digit:]]+)? fps' | grep -oE '[[:digit:]]+(.[[:digit:]]+)?'`*2 | bc || echo '')" # https://stackoverflow.com/questions/51080992/ffmpeg-converting-webm-videos-generated-by-chrome-is-slow # force to 30 key_frames_interval=30 key_frames_interval=${key_frames_interval:-50} key_frames_interval=$(echo `printf "%.1f\n" $(bc -l <<<"$key_frames_interval/10")`*10 | bc) # round key_frames_interval=${key_frames_interval%.*} # truncate to integer # static parameters that are similar for all renditions static_params="" static_params+=" -c:a aac -ar 48000 -c:v h264 -profile:v main -pix_fmt yuv420p -crf 20 -sc_threshold 0" static_params+=" -g ${key_frames_interval} -keyint_min ${key_frames_interval} -hls_time ${segment_target_duration}" static_params+=" -hls_playlist_type vod" # misc params misc_params="-hide_banner -y" master_playlist="#EXTM3U #EXT-X-VERSION:3 " cmd="" for rendition in "${renditions[@]}"; do # drop extraneous spaces rendition="${rendition/[[:space:]]+/ }" # rendition fields resolution="$(echo ${rendition} | cut -d ' ' -f 1)" bitrate="$(echo ${rendition} | cut -d ' ' -f 2)" audiorate="$(echo ${rendition} | cut -d ' ' -f 3)" # calculated fields width="$(echo ${resolution} | grep -oE '^[[:digit:]]+')" height="$(echo ${resolution} | grep -oE '[[:digit:]]+$')" maxrate="$(echo "`echo ${bitrate} | grep -oE '[[:digit:]]+'`*${max_bitrate_ratio}" | bc)" bufsize="$(echo "`echo ${bitrate} | grep -oE '[[:digit:]]+'`*${rate_monitor_buffer_ratio}" | bc)" bandwidth="$(echo ${bitrate} | grep -oE '[[:digit:]]+')000" name="${height}p_${bitrate}" cmd+=" ${static_params} -vf scale=-2:${height},crop=${width}:${height}" cmd+=" -b:v ${bitrate} -maxrate ${maxrate%.*}k -bufsize ${bufsize%.*}k -b:a ${audiorate}" cmd+=" -hls_segment_filename ${target}/${name}_%05d.ts ${target}/${name}.m3u8" # add rendition entry in the master playlist master_playlist+="#EXT-X-STREAM-INF:BANDWIDTH=${bandwidth},RESOLUTION=${resolution}\n${name}.m3u8\n" done # start conversion echo -e "Executing command:\nffmpeg ${misc_params} -i ${source} -r 30 ${cmd}" ffmpeg ${misc_params} -i ${source} -r 30 ${cmd} # create master playlist file echo -e "${master_playlist}" > ${target}/playlist.m3u8 echo "Done - encoded HLS is at ${target}/"
#!/bin/bash if [ -z "$KF5" ]; then echo "KF5 env var not set" exit 1 fi rm -Rf "$KF5"
#!/bin/bash ## Assumptions: ## * The repository containing this script is on the revision we want to release. ## * The changelog is up to date and committed. set -euo pipefail if [ $# -ne 1 ]; then echo "Usage: ./prepare-release.sh 19.06.1" exit 1 fi # Parse version parameter VERSION=$1 if [[ ! "$VERSION" =~ ^[1-9][0-9]\.[0-9][0-9]\.[0-9]+$ ]]; then echo "Unrecognized version number '$VERSION'. Expected the format YY.MM.x (e.g. 19.06.0)." exit 1 fi YEAR=${VERSION:0:2} MONTH=${VERSION:3:2} MAJOR="$YEAR.$MONTH" MINOR=${VERSION##$MAJOR.} BRANCH="release-$MAJOR" TAG="release-$VERSION" if [[ $MINOR = 0 ]]; then PRETTY_VERSION="$MAJOR" else PRETTY_VERSION="$VERSION" fi # Set directories SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" REPODIR="$(dirname $(dirname $SCRIPTDIR))" pushd $REPODIR # Verify that repository is clean if [[ $(hg st) ]]; then echo "Repository is dirty. Please commit before preparing a release." exit 1 fi # Verify that DOWNWARD_CONTAINER_REPO exists if [ ! -d "$DOWNWARD_CONTAINER_REPO/.git" ]; then echo "Please set the envronment variable DOWNWARD_CONTAINER_REPO to the path of a local clone of https://github.com/aibasel/downward." exit 1 fi function fill_template { TEMPLATE=$1 PARAMETER=$2 VALUE=$3 sed -e "s/$PARAMETER/$VALUE/g" $SCRIPTDIR/templates/$TEMPLATE } function set_and_commit_version { LOCALVERSION=$1 fill_template _version.tpl VERSION "$LOCALVERSION" > $REPODIR/driver/version.py hg commit -m "Update version number to $LOCALVERSION." } function create_recipe_and_link_latest { CONTAINERTYPE=$1 PARAMETER=$2 VALUE=$3 fill_template "_$CONTAINERTYPE.tpl" "$PARAMETER" "$VALUE" > $MAJOR/$CONTAINERTYPE.$MAJOR ln -fs $MAJOR/$CONTAINERTYPE.$MAJOR latest/$CONTAINERTYPE } set -x # Create the branch if it doesn't exist already. if [[ $(hg branches | grep "^$BRANCH ") ]]; then if [[ $MINOR = 0 ]]; then echo "The version number '$VERSION' implies that this is the first release in branch '$BRANCH' but the branch already exists." exit 1 fi if [[ "$(hg branch)" != "$BRANCH" ]]; then echo "It looks like we want to do a bugfix release, but we are not on the branch '$BRANCH'. Update to the branch head first." exit 1 fi else if [[ $MINOR != 0 ]]; then echo "The version number '$VERSION' implies a bugfix release but there is no branch '$BRANCH' yet." exit 1 fi hg branch "$BRANCH" hg commit -m "Create branch $BRANCH." fi # Update version number. set_and_commit_version "$PRETTY_VERSION" # Tag release. hg tag $TAG -m "Create tag $TAG." # Back on the default branch, update version number. if [[ $MINOR = 0 ]]; then hg update default set_and_commit_version "${MAJOR}+" fi # Create tarball. hg archive -r $TAG -X .hg_archival.txt -X .hgignore \ -X .hgtags -X .uncrustify.cfg -X bitbucket-pipelines.yml \ -X experiments/ -X misc/ --type tgz \ fast-downward-$PRETTY_VERSION.tar.gz # Generate the different recipe files for Docker, Singularity and Vagrant. pushd $DOWNWARD_CONTAINER_REPO mkdir -p $MAJOR fill_template "_Dockerfile.tpl" "TAG" "$TAG" > $MAJOR/Dockerfile.$MAJOR fill_template "_Singularity.tpl" "MAJOR" "$MAJOR" > $MAJOR/Singularity.$MAJOR fill_template "_Vagrantfile.tpl" "TAG" "$TAG" > $MAJOR/Vagrantfile.$MAJOR git add $MAJOR mkdir -p latest ln -fs ../$MAJOR/Dockerfile.$MAJOR latest/Dockerfile ln -fs ../$MAJOR/Singularity.$MAJOR latest/Singularity ln -fs ../$MAJOR/Vagrantfile.$MAJOR latest/Vagrantfile git add latest git commit -m "Add recipe files for release $VERSION." popd popd cat << EOF Successfully prepared tag $TAG. Please take the following steps to verify the release: * Check that fast-downward-$PRETTY_VERSION.tar.gz contains the correct files * Check that the branches and tags were created as intended * Check that $DOWNWARD_CONTAINER_REPO has a commit with the correct container recipe files. Once you are satisfied with everything, execute the following commands to publish the build: cd $REPODIR hg push misc/release/push-docker.sh $MAJOR git -C $DOWNWARD_CONTAINER_REPO push EOF
<reponame>rainkinz/redwood const path = require('path') const { getPaths } = require('@redwoodjs/internal') const rwjsPaths = getPaths() const NODE_MODULES_PATH = path.join(rwjsPaths.base, 'node_modules') module.exports = { roots: ['<rootDir>/src/'], testEnvironment: path.join(__dirname, './RedwoodWebJestEnv.js'), displayName: { color: 'blueBright', name: 'web', }, globals: { __REDWOOD_API_URL: '', __REDWOOD_API_GRAPHQL_SERVER_PATH: '/', __REDWOOD__APP_TITLE: 'Redwood App', }, setupFilesAfterEnv: [path.resolve(__dirname, './jest.setup.js')], moduleNameMapper: { /** * Make sure modules that require different versions of these * dependencies end up using the same one. */ '^react$': path.join(NODE_MODULES_PATH, 'react'), '^react-dom$': path.join(NODE_MODULES_PATH, 'react-dom'), '^@apollo/client/react$': path.join( NODE_MODULES_PATH, '@apollo/client/react' ), // We replace imports to "@redwoodjs/router" with our own "mock" implementation. '^@redwoodjs/router$': path.join( NODE_MODULES_PATH, '@redwoodjs/testing/dist/web/MockRouter.js' ), '^@redwoodjs/web$': path.join(NODE_MODULES_PATH, '@redwoodjs/web'), // @NOTE: Import @redwoodjs/testing in web tests, and it automatically remaps to the web side only // This is to prevent web stuff leaking into api, and vice versa '^@redwoodjs/testing$': path.join( NODE_MODULES_PATH, '@redwoodjs/testing/web' ), '~__REDWOOD__USER_ROUTES_FOR_MOCK': rwjsPaths.web.routes, /** * Mock out files that aren't particularly useful in tests. See fileMock.js for more info. */ '\\.(jpg|jpeg|png|gif|eot|otf|webp|svg|ttf|woff|woff2|mp4|webm|wav|mp3|m4a|aac|oga|css)$': '@redwoodjs/testing/dist/web/fileMock.js', }, transform: { '\\.[jt]sx?$': [ 'babel-jest', // When jest runs tests in parallel, it serializes the config before passing down options to babel // that's why these must be serializable. Passing the reference to a config instead. { configFile: path.resolve(__dirname, './webBabelConfig.js'), }, ], }, }
<filename>mtp_api/apps/credit/tests/test_views/test_credit_list/test_security_credit_list/test_credit_list_amount_pattern.py import random from credit.tests.test_views.test_credit_list.test_security_credit_list import SecurityCreditListTestCase class AmountPatternCreditListTestCase(SecurityCreditListTestCase): def test_exclude_amount_pattern_filter_endswith_multiple(self): self._test_response_with_filters({ 'exclude_amount__endswith': ['000', '500'], }) def test_exclude_amount_pattern_filter_regex(self): self._test_response_with_filters({ 'exclude_amount__regex': '^.*000$', }) def test_amount_pattern_filter_endswith(self): self._test_response_with_filters({ 'amount__endswith': '000', }) def test_amount_pattern_filter_endswith_multiple(self): self._test_response_with_filters({ 'amount__endswith': ['000', '500'], }) def test_amount_pattern_filter_regex(self): self._test_response_with_filters({ 'amount__regex': '^.*000$', }) def test_amount_pattern_filter_less_than_regex(self): self._test_response_with_filters({ 'amount__lte': 5000, 'amount__regex': '^.*00$', }) def test_amount_pattern_filter_range(self): self._test_response_with_filters({ 'amount__gte': 5000, 'amount__lte': 10000, }) def test_amount_pattern_filter_exact(self): random_amount = random.choice(self.credits).amount self._test_response_with_filters({ 'amount': random_amount, })
<gh_stars>1-10 import React from 'react'; import { useCookies } from 'react-cookie'; import { Route, Redirect } from 'react-router-dom'; export default function AuthenticationRoute({ component: Component, ...rest }) { const [cookie] = useCookies(['StartupTrajectoryPredictor']); return ( <Route {...rest} render={props => // authentication conditional // COOKIE CHECK cookie['StartupTrajectoryPredictor'] ? ( <Component {...props} /> ) : ( <Redirect to='/' /> ) } /> ); }
'use strict'; const defaultConditions = { location: { lat: 0, lng: 0 }, maxDistance: 100, limit: 20, query: {}, }; module.exports = options => { const find = conditions => new Promise((resolve, reject) => { const { location, maxDistance, limit, query } = Object.assign({}, defaultConditions, conditions); options.model.aggregate([{ $geoNear: { near: { type: 'Point', coordinates: [location.lng, location.lat] }, distanceField: 'distanceFromLocation', maxDistance, limit, query, spherical: true, } }], (err, results) => { return (err) ? reject(err) : resolve(results); }); }); return { find }; };
#!/bin/bash # LICENSE UPL 1.0 # # Copyright (c) 1982-2016 Oracle and/or its affiliates. All rights reserved. # # Since: November, 2016 # Author: gerald.venzl@oracle.com, eric.clement@juxta.fr (11.2.0.4 version) # Description: Creates an Oracle Database based on following parameters: # $ORACLE_SID: The Oracle SID name # $ORACLE_PWD: The Oracle password # $ORACLE_MEM: The Oracle total memory # # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. # echo -e "\033[32mstarting bash script $0\033[0m" set -e # Check whether ORACLE_SID is passed on export ORACLE_SID=${1:-ORCL} echo "ORACLE_SID=$ORACLE_SID" # Auto generate ORACLE PWD if not passed on export ORACLE_PWD=${2:-"`openssl rand -base64 8`1"} echo -e "ORACLE PASSWORD FOR SYS, SYSTEM : \033[0;31m$ORACLE_PWD\033[0m"; # If there is greater than 8 CPUs default back to dbca memory calculations # dbca will automatically pick 40% of available memory for Oracle DB # The minimum of 2G is for small environments to guarantee that Oracle has enough memory to function # However, bigger environment can and should use more of the available memory # This is due to Github Issue #307 if [ `nproc` -gt 8 ]; then DBCA_TOTAL_MEMORY="-totalMemory 2048" echo "Instance memory forced to 2048 Mb, nproc=`nproc`" elif [ -z "$3" ]; then DBCA_TOTAL_MEMORY="" echo "Instance memory not defined, set to 2048 Mb" else DBCA_TOTAL_MEMORY="-totalMemory $3" echo "Instance memory set to $3 Mb" fi; # Create network related config files (sqlnet.ora, tnsnames.ora, listener.ora) mkdir -p $ORACLE_HOME/network/admin echo "NAME.DIRECTORY_PATH= (TNSNAMES, EZCONNECT, HOSTNAME)" > $ORACLE_HOME/network/admin/sqlnet.ora # Listener.ora echo "LISTENER = (DESCRIPTION_LIST = (DESCRIPTION = (ADDRESS = (PROTOCOL = IPC)(KEY = EXTPROC1)) (ADDRESS = (PROTOCOL = TCP)(HOST = 0.0.0.0)(PORT = 1521)) ) ) DEDICATED_THROUGH_BROKER_LISTENER=ON DIAG_ADR_ENABLED = off " > $ORACLE_HOME/network/admin/listener.ora echo "DBCONTROL=$DBCONTROL" if [ $DBCONTROL == "true" ]; then EM_CONFIGURATION=LOCAL else EM_CONFIGURATION=NONE fi # Start LISTENER and run DBCA lsnrctl start && dbca -silent -createDatabase -templateName General_Purpose.dbc -gdbname ${ORACLE_SID} -sid ${ORACLE_SID} -responseFile NO_VALUE -characterSet $ORACLE_CHARACTERSET $DBCA_TOTAL_MEMORY -emConfiguration ${EM_CONFIGURATION} -dbsnmpPassword ${ORACLE_PWD} -sysmanPassword ${ORACLE_PWD} -sysPassword ${ORACLE_PWD} -systemPassword ${ORACLE_PWD} -initparams java_jit_enabled=FALSE,audit_trail=NONE,audit_sys_operations=FALSE,nls_language="FRENCH",nls_territory="FRANCE",processes=300,sessions=335 -sampleSchema false|| cat /opt/oracle/cfgtoollogs/dbca/$ORACLE_SID/$ORACLE_SID.log || cat /opt/oracle/cfgtoollogs/dbca/$ORACLE_SID.log #### echo "$ORACLE_SID=localhost:1521/$ORACLE_SID" > $ORACLE_HOME/network/admin/tnsnames.ora echo "$ORACLE_SID= (DESCRIPTION = (ADDRESS = (PROTOCOL = TCP)(HOST = 0.0.0.0)(PORT = 1521)) (CONNECT_DATA = (SERVER = DEDICATED) (SERVICE_NAME = $ORACLE_SID) ) )" >> $ORACLE_HOME/network/admin/tnsnames.ora #echo -e "ALTER SYSTEM SET LOCAL_LISTENER='(ADDRESS = (PROTOCOL = TCP)(HOST = $(hostname))(PORT = 1521))' SCOPE=BOTH;\n ALTER SYSTEM REGISTER;\n EXIT" | ${ORACLE_HOME}/bin/sqlplus -s -l / as sysdba # Remove second control file, fix local_listener, enable EM global port sqlplus / as sysdba << EOF ALTER SYSTEM SET control_files='$ORACLE_BASE/oradata/$ORACLE_SID/control01.ctl' scope=spfile; --EXEC DBMS_XDB_CONFIG.SETGLOBALPORTENABLED (TRUE); exit; EOF
import { BaseAbility, BaseModifier, registerAbility, registerModifier } from "../../../lib/dota_ts_adapter" interface SaiInnate extends CDOTABaseAbility { ApplyDebuff(target: CDOTA_BaseNPC): void; } interface extra { rat_eid?: EntityIndex; enemy_eid?: EntityIndex; } interface rat extends CDOTA_BaseNPC { emitted_sound?: boolean } @registerAbility() export class sai_rat_reconnaissance extends BaseAbility { Precache(context: CScriptPrecacheContext): void{ PrecacheResource("particle", "particles/units/heroes/sai/sai_rat_reconnaissance_cast.vpcf", context); PrecacheResource("particle", "particles/units/heroes/sai/sai_rat_reconnaissance_impact.vpcf", context); PrecacheResource("soundfile", "soundevents/heroes/sai/game_sounds_sai.vsndevts", context); PrecacheResource("soundfile", "soundevents/heroes/sai/sai_rats_talking.vsndevts", context); //PrecacheResource("soundfile", "soundevents/heroes/sai/game_sounds_vo_sai.vsndevts", context); } /****************************************/ OnAbilityPhaseStart(): boolean { EmitSoundOn("Hero_Sai.RatReconnaissance.PreCast", this.GetCaster()); EmitSoundOn("sai_rats_talking", this.GetCaster()); return true } /****************************************/ OnSpellStart(): void { let caster = this.GetCaster(); let origin = caster.GetAbsOrigin(); let target_count = this.GetSpecialValueFor("max_targets") + caster.FindTalentValue("special_bonus_sai_3"); let spawn_pos = origin + caster.GetForwardVector() * 40 as Vector let rat_projectile: CreateTrackingProjectileOptions = { Ability: this, EffectName: "", Target: undefined, bDodgeable: false, bDrawsOnMinimap: false, bProvidesVision: true, iMoveSpeed: this.GetSpecialValueFor("speed"), iVisionRadius: 25, iVisionTeamNumber: caster.GetTeamNumber(), vSourceLoc: spawn_pos, ExtraData: { rat_eid: -1, enemy_eid: -1 } } let enemies = FindUnitsInRadius( caster.GetTeamNumber(), origin, undefined, this.GetEffectiveCastRange(origin, caster), UnitTargetTeam.ENEMY, UnitTargetType.HERO, UnitTargetFlags.NO_INVIS, FindOrder.CLOSEST, false ) let cast_fx = ParticleManager.CreateParticle("particles/units/heroes/sai/sai_rat_reconnaissance_cast.vpcf", ParticleAttachment.CUSTOMORIGIN, undefined); ParticleManager.SetParticleControl(cast_fx, 0, origin + caster.GetForwardVector() * 75 as Vector); ParticleManager.SetParticleControlForward(cast_fx, 0, caster.GetForwardVector()); ParticleManager.ReleaseParticleIndex(cast_fx); EmitSoundOn("Hero_Sai.RatReconnaissance.Cast", caster); for (let [k, enemy] of Object.entries(enemies)) { CreateUnitByNameAsync("npc_dota_sai_rat", spawn_pos, false, undefined, undefined, caster.GetTeamNumber(), (unit: rat) => { unit.AddNewModifier(caster, this, "modifier_sai_super_beast_drawing_rat", {duration: -1}); unit.SetForwardVector(-(enemy.GetAbsOrigin() - caster.GetAbsOrigin() as Vector).Normalized() as Vector); unit.StartGesture(GameActivity.DOTA_RUN); unit.emitted_sound = false; rat_projectile.Target = enemy; (rat_projectile.ExtraData! as extra).rat_eid = unit.entindex(); (rat_projectile.ExtraData! as extra).enemy_eid = enemy.entindex(); ProjectileManager.CreateTrackingProjectile(rat_projectile); target_count--; }); if (target_count <= 0) break; } } /****************************************/ OnProjectileThink_ExtraData(location: Vector, extraData: extra): void { let rat = EntIndexToHScript(extraData.rat_eid!) as rat; rat?.SetAbsOrigin(GetGroundPosition(location, rat)); if (!rat.emitted_sound) { rat.emitted_sound = true; EmitSoundOn("Hero_Sai.RatReconnaissance.Rat", rat); } let enemy = EntIndexToHScript(extraData.enemy_eid!) as CDOTA_BaseNPC; if (enemy && !enemy.IsNull()) { rat.SetForwardVector(-(enemy.GetAbsOrigin() - rat.GetAbsOrigin() as Vector).Normalized() as Vector); } } /****************************************/ OnProjectileHit_ExtraData(target: CDOTA_BaseNPC | undefined, location: Vector, extraData: extra): boolean | void { if (!target) { this.KillRat(extraData.rat_eid!); return true; } let caster = this.GetCaster(); let innate = caster.FindAbilityByName("sai_innate_passive") as SaiInnate; let vision_duration = this.GetSpecialValueFor("vision_duration") + caster.FindTalentValue("special_bonus_sai_1"); this.KillRat(extraData.rat_eid!); if (innate) innate.ApplyDebuff(target); ApplyDamage({ attacker: caster, victim: target, damage: this.GetSpecialValueFor("damage"), damage_type: this.GetAbilityDamageType(), ability: this }) target.AddNewModifier(caster, this, "modifier_sai_rat_reconnaissance", {duration: vision_duration}); target.AddNewModifier(caster, this, "modifier_sai_rat_reconnaissance_slow", {duration: this.GetSpecialValueFor("slow_duration") * (1 - target.GetStatusResistance())}); EmitSoundOn("Hero_Sai.RatReconnaissance.Impact", target); } /****************************************/ KillRat(id: EntityIndex) { let rat = EntIndexToHScript(id); if (rat) { let impact_fx = ParticleManager.CreateParticle("particles/units/heroes/sai/sai_rat_reconnaissance_impact.vpcf", ParticleAttachment.CUSTOMORIGIN, undefined); ParticleManager.SetParticleControl(impact_fx, 0, rat.GetAbsOrigin()); ParticleManager.ReleaseParticleIndex(impact_fx); StopSoundOn("Hero_Sai.RatReconnaissance.Rat", rat) UTIL_Remove(rat) } } } @registerModifier() export class modifier_sai_rat_reconnaissance extends BaseModifier { DeclareFunctions(){ return [ ModifierFunction.PROVIDES_FOW_POSITION, ]} /****************************************/ GetModifierProvidesFOWVision(): 0 | 1 { return 1; } } @registerModifier() export class modifier_sai_rat_reconnaissance_slow extends BaseModifier { move_slow?: number; /****************************************/ OnCreated(params: object): void { let ability = this.GetAbility(); this.move_slow = -ability!.GetSpecialValueFor("move_slow"); } /****************************************/ DeclareFunctions(){ return [ ModifierFunction.MOVESPEED_BONUS_PERCENTAGE ]} /****************************************/ GetModifierMoveSpeedBonus_Percentage(): number { return this.move_slow!; } } @registerModifier() export class modifier_sai_super_beast_drawing_rat extends BaseModifier { CheckState(): Partial<Record<ModifierState, boolean>> { return { [ModifierState.NO_HEALTH_BAR]: true, [ModifierState.DISARMED]: true, [ModifierState.NOT_ON_MINIMAP]: true, [ModifierState.OUT_OF_GAME]: true, [ModifierState.INVULNERABLE]: true, }; } }
#ifndef _BASIC_CONSOLE_H_ #define _BASIC_CONSOLE_H_ #include <3ds.h> inline void init_console(void) { gfxInitDefault(); consoleInit(GFX_TOP, NULL); } #endif // _BASIC_CONSOLE_H_
public class FibonacciSequence { public static void main(String[] args) { int n = 10; // create 10 threads for (int i=0; i<n; i++) { new Thread(new FibonacciThread(i)).start(); } } } class FibonacciThread implements Runnable { private int threadId; public FibonacciThread(int threadId) { this.threadId = threadId; } @Override public void run() { int n=10, t1=0, t2=1; System.out.println("Thread : " + this.threadId); System.out.printf("First %d terms: ",n); for (int i=1; i<=n; ++i) { if (i%2==0) { System.out.print(t2+" "); t1=t1+t2; } else { System.out.print(t1+" "); t2=t1+t2; } } } }
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/512+512+512-HPMI/model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/512+512+512-HPMI/512+512+512-SS-N-VB-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function shuffle_sentences_remove_all_but_nouns_and_verbs_first_third_sixth --eval_function last_sixth_eval
package com.yin.springboot.mybatis.controller; import com.github.pagehelper.PageHelper; import com.yin.springboot.mybatis.domain.OmsOrder; import com.yin.springboot.mybatis.mapper.OmsOrderMapper; import org.apache.ibatis.cursor.Cursor; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.RestController; import java.util.List; /** * Created by IntelliJ IDEA. * User: Administrator * Date:2019/7/14 * Time: 11:38 * To change this template use File | Settings | File Templates. */ @RestController public class TestPagehelperCtl { @Autowired private OmsOrderMapper orderMapper; @GetMapping("/") public String page() { // PageHelper.startPage(1, 2); List<OmsOrder> omsOrder = orderMapper.selectAll(); Cursor<OmsOrder> orders= orderMapper.selectCursor(); return "ok"; } }
import * as core from "@actions/core"; import { CocoapodsInstaller } from "./installer"; import { getVersionFromPodfile } from "./podfile-parser"; const run = async (): Promise<void> => { try { if (process.platform !== "darwin" && process.platform !== "linux") { throw new Error(`This task is intended for macOS and linux platforms. It can't be run on '${process.platform}' platform`); } let versionSpec = core.getInput("version", { required: false }); const podfilePath = core.getInput("podfile-path", { required: false }); if (!!versionSpec === !!podfilePath) { throw new Error("Invalid input parameters usage. Either 'version' or 'podfile-path' should be specified. Not the both ones."); } if (!versionSpec) { core.debug("Reading Podfile to determine the version of Cocoapods..."); versionSpec = getVersionFromPodfile(podfilePath); core.info(`Podfile points to the Cocoapods ${versionSpec}`); } await CocoapodsInstaller.install(versionSpec); } catch (error) { core.setFailed(error.message); } }; run();
#include "TileLayer.h" namespace Syndicate { namespace Graphics { TileLayer::TileLayer() { } TileLayer::TileLayer(Shader* shader) : Layer(synnew BatchRenderer2D(), shader, glm::ortho(-16.0f, 16.0f, -9.0f, 9.0f)) { } void TileLayer::SetProjectionMatrix() { Layer::SetProjectionMatrix(glm::ortho(-16.0f, 16.0f, -9.0f, 9.0f)); } void TileLayer::SetProjectionMatrix(const glm::mat4& prMatrix) { Layer::SetProjectionMatrix(prMatrix); } TileLayer::~TileLayer() { } }}
var _ = require('underscore'); var camshaftReference = require('builder/data/camshaft-reference'); var BaseAnalysisFormModel = require('./base-analysis-form-model'); var ARRAY_SPLIT_COMBO = ','; /** * A fallback form model in case the type is not supported (yet). */ module.exports = BaseAnalysisFormModel.extend({ initialize: function () { BaseAnalysisFormModel.prototype.initialize.apply(this, arguments); this.listenTo(this._analysisSourceOptionsModel, 'change:fetching', this._updateSchema); this._updateSchema(); }, getTemplate: function () { return undefined; }, getTemplateData: function () { return {}; }, /** * @override {BaseAnalysisFormModel._formatAttrs} */ _formatAttrs: function () { var attrs = BaseAnalysisFormModel.prototype._formatAttrs.apply(this, arguments); var params = camshaftReference.paramsForType(this.get('type')); for (var name in params) { var param = params[name]; if (param.type === 'array') { if (!_.isString(attrs[name]) || attrs[name].trim() === '') { attrs[name] = null; } else { attrs[name] = attrs[name] .split(ARRAY_SPLIT_COMBO) .map(function (val) { return val.trim(); }); } } } return attrs; }, _updateSchema: function () { var schema = { type: { type: 'Text', title: 'Type', editorAttrs: {disabled: true} } }; var params = camshaftReference.paramsForType(this.get('type')); var sourceCount = _.reduce(Object.keys(params), function (memo, name) { if (params[name].type === 'node') { memo++; } return memo; }, 0); Object.keys(params) .forEach(function (name) { var param = params[name]; var label = name + ' (' + param.type + ')'; var validators = []; var isRequired = !param.optional; if (isRequired) { label += '*'; validators.push('required'); } switch (param.type) { case 'node': schema[name] = sourceCount === 1 ? { type: 'Select', text: label, options: [ this.get('source') ], dialogMode: 'float', editorAttrs: { disabled: true } } : { type: 'Select', dialogMode: 'float', title: label, options: this._getSourceOptionsForSource({ sourceAttrName: 'target', ignorePrimarySource: true }) }; break; case 'string': var fieldDef; // If is meant to represent a column try to get columns if possible if (/column/.test(name) && sourceCount === 1) { var nodeDefModel = this._layerDefinitionModel.findAnalysisDefinitionNodeModel(this.get('source')); if (nodeDefModel && nodeDefModel.querySchemaModel.columnsCollection.length > 0) { fieldDef = { type: 'Text', title: label, validators: validators }; } } if (!fieldDef) { fieldDef = { type: 'Text', title: label, validators: validators }; } schema[name] = fieldDef; break; case 'enum': schema[name] = { type: 'Select', title: label, options: param.values.map(function (val) { return { val: val, label: val }; }), dialogMode: 'float', validators: validators }; break; case 'number': schema[name] = { type: 'Text', label: label, validators: validators }; break; case 'boolean': schema[name] = { type: 'Radio', text: label, options: [ {val: 'true', label: 'true'}, {val: 'false', label: 'false'} ], validators: validators }; break; case 'array': schema[name] = { type: 'Text', title: label, validators: validators, help: 'Separate values by "' + ARRAY_SPLIT_COMBO + '"' }; break; default: return null; } }, this); this._setSchema(schema); } });
import React from "react" import SEO from "../components/Common/seo" import About from "../components/About/About"; const AboutPage = () => ( <> <SEO title="About" /> <About/> </> ) export default AboutPage
#!/usr/bin/env python ############################################################################### # litenav by dgelessus # A simplified version of the original filenav. Only supports basic folder # listing and navigation. ############################################################################### import os # used to navigate the file structure import sys # for sys.argv import ui # duh class FileDataSource(object): # ui.TableView data source that generates a directory listing def __init__(self, path=os.getcwd()): # init self.path = full_path(path) self.refresh() self.lists = [self.folders, self.files] def refresh(self): # Refresh the list of files and folders self.folders = [] self.files = [] for f in os.listdir(self.path): if os.path.isdir(os.path.join(self.path, f)): self.folders.append(f) else: self.files.append(f) def tableview_number_of_sections(self, tableview): # Return the number of sections return len(self.lists) def tableview_number_of_rows(self, tableview, section): # Return the number of rows in the section return len(self.lists[section]) def tableview_cell_for_row(self, tableview, section, row): # Create and return a cell for the given section/row cell = ui.TableViewCell() cell.text_label.text = os.path.basename(os.path.join(self.path, self.lists[section][row])) if section == 0: cell.accessory_type = "disclosure_indicator" return cell def tableview_title_for_header(self, tableview, section): # Return a title for the given section. if section == 0: return "Folders" elif section == 1: return "Files" else: return "" def tableview_did_select(self, tableview, section, row): # Called when the user selects a row if section == 0: nav.push_view(make_file_list(os.path.join(self.path, self.folders[row]))) def close_proxy(): def _close(sender): nav.close() return _close def full_path(path): # Return absolute path with expanded ~s, input path assumed relative to cwd return os.path.abspath(os.path.join(os.getcwd(), os.path.expanduser(path))) def make_file_list(path): # Create a ui.TableView containing a directory listing of path path = full_path(path) lst = ui.TableView(flex="WH") # allow multiple selection when editing, single selection otherwise lst.allows_selection = True lst.allows_multiple_selection = False lst.background_color = 1.0 lst.data_source = lst.delegate = FileDataSource(path) lst.name = os.path.basename(path) current_list = lst return lst if __name__ == "__main__": lst = make_file_list("~") lst.left_button_items = ui.ButtonItem(image=ui.Image.named("ionicons-close-24"), action=close_proxy()), nav = ui.NavigationView(lst) nav.navigation_bar_hidden = False nav.name = "LiteNav" nav.flex = "WH" nav.height = 1000 nav.present("popover", hide_title_bar=True)
<reponame>feserm/BioDWH2<filename>biodwh2-core/src/main/java/de/unibi/agbi/biodwh2/core/Factory.java package de.unibi.agbi.biodwh2.core; import java.io.File; import java.io.IOException; import java.lang.management.ManagementFactory; import java.util.*; import java.util.jar.JarEntry; import java.util.jar.JarFile; public final class Factory { private static final List<String> IGNORED_JARS = Arrays.asList("rt.jar", "idea_rt.jar", "aws-java-sdk-ec2", "proto-", "google-cloud-", "google-api-", "openstack4j-core", "selenium-", "google-api-client", "jackson-", "guava", "jetty", "netty-", "junit-"); private static Factory instance; private final Map<String, List<Class<?>>> interfaceToImplementationsMap; private final Map<String, List<Class<?>>> baseClassToImplementationsMap; private final Set<String> allClassPaths; private Factory() { interfaceToImplementationsMap = new HashMap<>(); baseClassToImplementationsMap = new HashMap<>(); allClassPaths = new HashSet<>(); collectAllClassPaths(); loadAllClasses(); } public static Factory getInstance() { return instance != null ? instance : (instance = new Factory()); } private void loadAllClasses() { ClassLoader classLoader = ClassLoader.getSystemClassLoader(); for (String classPath : allClassPaths) loadClassPath(classLoader, classPath); } private void collectAllClassPaths() { String runtimeClassPath = ManagementFactory.getRuntimeMXBean().getClassPath(); for (String classPath : runtimeClassPath.split(File.pathSeparator)) { File file = new File(classPath); if (file.isDirectory()) iterateFileSystem(file, file.toURI().toString()); else if (isValidJarFile(file)) iterateJarFile(file); } } private static boolean isValidJarFile(File file) { String fileName = file.getName().toLowerCase(Locale.US); return file.isFile() && fileName.endsWith(".jar") && IGNORED_JARS.stream().noneMatch(fileName::contains); } private void iterateFileSystem(File directory, String rootPath) { File[] files = directory.listFiles(); if (files != null) { for (File file : files) { if (file.isDirectory()) iterateFileSystem(file, rootPath); else if (file.isFile()) addUriIfValidClassPath(file.toURI().toString().substring(rootPath.length())); } } } private void addUriIfValidClassPath(String uri) { if (isUriClassInBioDWH(uri)) allClassPaths.add(getClassPathFromUri(uri)); } private void iterateJarFile(File file) { Enumeration<JarEntry> je = tryGetJarFileEntries(file); while (je.hasMoreElements()) { JarEntry j = je.nextElement(); if (!j.isDirectory()) addUriIfValidClassPath(j.getName()); } } private Enumeration<JarEntry> tryGetJarFileEntries(File file) { try { return new JarFile(file).entries(); } catch (IOException e) { e.printStackTrace(); return Collections.emptyEnumeration(); } } private static boolean isUriClassInBioDWH(String uri) { return uri.endsWith(".class") && uri.contains("de/unibi/agbi/biodwh2"); } private static String getClassPathFromUri(String uri) { return uri.replace("/", ".").replace(".class", ""); } private void loadClassPath(ClassLoader classLoader, String classPath) { Class<?> c = tryLoadClass(classLoader, classPath); if (c != null) { linkClassToParentInterfaces(c); linkClassToSuperclass(c); } } private static Class<?> tryLoadClass(ClassLoader classLoader, String classPath) { try { return classLoader.loadClass(classPath); } catch (ClassNotFoundException e) { e.printStackTrace(); } return null; } private void linkClassToParentInterfaces(Class<?> c) { for (Class<?> classInterface : c.getInterfaces()) linkClassToParentInterface(c, classInterface.getName()); } private void linkClassToParentInterface(Class<?> c, String interfaceName) { if (!interfaceToImplementationsMap.containsKey(interfaceName)) interfaceToImplementationsMap.put(interfaceName, new ArrayList<>()); interfaceToImplementationsMap.get(interfaceName).add(c); } private void linkClassToSuperclass(Class<?> c) { if (c.getSuperclass() != null) { String superclassName = c.getSuperclass().getName(); if (!baseClassToImplementationsMap.containsKey(superclassName)) baseClassToImplementationsMap.put(superclassName, new ArrayList<>()); baseClassToImplementationsMap.get(superclassName).add(c); } } public <T> List<Class<T>> getImplementations(Class<T> type) { String typeName = type.getName(); if (interfaceToImplementationsMap.containsKey(typeName)) return mapImplementationsToType(interfaceToImplementationsMap.get(typeName)); if (baseClassToImplementationsMap.containsKey(typeName)) return mapImplementationsToType(baseClassToImplementationsMap.get(typeName)); return Collections.emptyList(); } private static <T> List<Class<T>> mapImplementationsToType(List<Class<?>> classes) { List<Class<T>> result = new ArrayList<>(); for (Class<?> class_ : classes) { //noinspection unchecked result.add((Class<T>) class_); } return result; } }
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.cassandrareaper.jmx; import io.cassandrareaper.ReaperException; import io.cassandrareaper.core.Cluster; import io.cassandrareaper.core.Snapshot; import io.cassandrareaper.core.Snapshot.Builder; import io.cassandrareaper.service.RingRange; import java.io.IOException; import java.lang.reflect.UndeclaredThrowableException; import java.math.BigInteger; import java.net.InetSocketAddress; import java.net.MalformedURLException; import java.net.UnknownHostException; import java.rmi.server.RMIClientSocketFactory; import java.rmi.server.RMISocketFactory; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import javax.management.AttributeNotFoundException; import javax.management.InstanceNotFoundException; import javax.management.JMX; import javax.management.ListenerNotFoundException; import javax.management.MBeanException; import javax.management.MBeanServerConnection; import javax.management.MalformedObjectNameException; import javax.management.Notification; import javax.management.ObjectName; import javax.management.ReflectionException; import javax.management.openmbean.TabularData; import javax.management.remote.JMXConnector; import javax.management.remote.JMXConnectorFactory; import javax.management.remote.JMXServiceURL; import javax.rmi.ssl.SslRMIClientSocketFactory; import javax.validation.constraints.NotNull; import com.codahale.metrics.Gauge; import com.codahale.metrics.MetricRegistry; import com.datastax.driver.core.policies.EC2MultiRegionAddressTranslator; import com.google.common.base.Optional; import com.google.common.collect.BiMap; import com.google.common.collect.ImmutableBiMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.cassandra.db.ColumnFamilyStoreMBean; import org.apache.cassandra.db.compaction.CompactionManager; import org.apache.cassandra.db.compaction.CompactionManagerMBean; import org.apache.cassandra.gms.FailureDetector; import org.apache.cassandra.gms.FailureDetectorMBean; import org.apache.cassandra.locator.EndpointSnitchInfoMBean; import org.apache.cassandra.repair.RepairParallelism; import org.apache.cassandra.repair.messages.RepairOption; import org.apache.cassandra.service.ActiveRepairService; import org.apache.cassandra.service.StorageServiceMBean; import org.apache.cassandra.utils.progress.ProgressEventType; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static com.google.common.base.Preconditions.checkNotNull; final class JmxProxyImpl implements JmxProxy { private static final Logger LOG = LoggerFactory.getLogger(JmxProxy.class); private static final int JMX_PORT = 7199; private static final String JMX_URL = "service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi"; private static final String SS_OBJECT_NAME = "org.apache.cassandra.db:type=StorageService"; private static final String AES_OBJECT_NAME = "org.apache.cassandra.internal:type=AntiEntropySessions"; private static final String VALIDATION_ACTIVE_OBJECT_NAME = "org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ValidationExecutor,name=ActiveTasks"; private static final String VALIDATION_PENDING_OBJECT_NAME = "org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=ValidationExecutor,name=PendingTasks"; private static final String COMP_OBJECT_NAME = "org.apache.cassandra.metrics:type=Compaction,name=PendingTasks"; private static final String VALUE_ATTRIBUTE = "Value"; private static final String FAILED_TO_CONNECT_TO_USING_JMX = "Failed to connect to {} using JMX"; private static final String ERROR_GETTING_ATTR_JMX = "Error getting attribute from JMX"; private static final long KB_FACTOR = 1000; private static final long KIB_FACTOR = 1024; private static final long MB_FACTOR = 1000 * KB_FACTOR; private static final long MIB_FACTOR = 1024 * KIB_FACTOR; private static final long GB_FACTOR = 1000 * MB_FACTOR; private static final long GIB_FACTOR = 1024 * MIB_FACTOR; private static final ExecutorService EXECUTOR = Executors.newCachedThreadPool(); private final JMXConnector jmxConnector; private final ObjectName ssMbeanName; private final MBeanServerConnection mbeanServer; private final CompactionManagerMBean cmProxy; private final EndpointSnitchInfoMBean endpointSnitchMbean; private final Object ssProxy; private final Object fdProxy; private final String host; private final String hostBeforeTranslation; private final JMXServiceURL jmxUrl; private final String clusterName; private final ConcurrentMap<Integer, RepairStatusHandler> repairStatusHandlers = Maps.newConcurrentMap(); private final MetricRegistry metricRegistry; private JmxProxyImpl( String host, String hostBeforeTranslation, JMXServiceURL jmxUrl, JMXConnector jmxConnector, Object ssProxy, ObjectName ssMbeanName, MBeanServerConnection mbeanServer, CompactionManagerMBean cmProxy, EndpointSnitchInfoMBean endpointSnitchMbean, FailureDetectorMBean fdProxy, MetricRegistry metricRegistry) { this.host = host; this.hostBeforeTranslation = hostBeforeTranslation; this.jmxUrl = jmxUrl; this.jmxConnector = jmxConnector; this.ssMbeanName = ssMbeanName; this.mbeanServer = mbeanServer; this.ssProxy = ssProxy; this.cmProxy = cmProxy; this.endpointSnitchMbean = endpointSnitchMbean; this.clusterName = Cluster.toSymbolicName(((StorageServiceMBean) ssProxy).getClusterName()); this.fdProxy = fdProxy; this.metricRegistry = metricRegistry; registerConnectionsGauge(); } /** * @see JmxProxy#connect(Optional, String, int, String, String, EC2MultiRegionAddressTranslator) */ static JmxProxy connect( String host, String username, String password, final EC2MultiRegionAddressTranslator addressTranslator, int connectionTimeout, MetricRegistry metricRegistry) throws ReaperException, InterruptedException { if (host == null) { throw new ReaperException("Null host given to JmxProxy.connect()"); } String[] parts = host.split(":"); if (parts.length == 2) { return connect( parts[0], Integer.valueOf(parts[1]), username, password, addressTranslator, connectionTimeout, metricRegistry); } else { return connect( host, JMX_PORT, username, password, addressTranslator, connectionTimeout, metricRegistry); } } /** * Connect to JMX interface on the given host and port. * * @param handler Implementation of {@link RepairStatusHandler} to process incoming notifications * of repair events. * @param host hostname or ip address of Cassandra node * @param port port number to use for JMX connection * @param username username to use for JMX authentication * @param password password to use for JMX authentication * @param addressTranslator if EC2MultiRegionAddressTranslator isn't null it will be used to * translate addresses */ private static JmxProxy connect( String originalHost, int port, String username, String password, final EC2MultiRegionAddressTranslator addressTranslator, int connectionTimeout, MetricRegistry metricRegistry) throws ReaperException, InterruptedException { ObjectName ssMbeanName; ObjectName cmMbeanName; ObjectName fdMbeanName; ObjectName endpointSnitchMbeanName; JMXServiceURL jmxUrl; String host = originalHost; if (addressTranslator != null) { host = addressTranslator.translate(new InetSocketAddress(host, port)).getAddress().getHostAddress(); LOG.debug("translated {} to {}", originalHost, host); } try { LOG.debug("Connecting to {}...", host); jmxUrl = new JMXServiceURL(String.format(JMX_URL, host, port)); ssMbeanName = new ObjectName(SS_OBJECT_NAME); cmMbeanName = new ObjectName(CompactionManager.MBEAN_OBJECT_NAME); fdMbeanName = new ObjectName(FailureDetector.MBEAN_NAME); endpointSnitchMbeanName = new ObjectName("org.apache.cassandra.db:type=EndpointSnitchInfo"); } catch (MalformedURLException | MalformedObjectNameException e) { LOG.error(String.format("Failed to prepare the JMX connection to %s:%s", host, port)); throw new ReaperException("Failure during preparations for JMX connection", e); } try { Map<String, Object> env = new HashMap<>(); if (username != null && password != null) { String[] creds = {username, password}; env.put(JMXConnector.CREDENTIALS, creds); } env.put("com.sun.jndi.rmi.factory.socket", getRmiClientSocketFactory()); JMXConnector jmxConn = connectWithTimeout(jmxUrl, connectionTimeout, TimeUnit.SECONDS, env); MBeanServerConnection mbeanServerConn = jmxConn.getMBeanServerConnection(); Object ssProxy = JMX.newMBeanProxy(mbeanServerConn, ssMbeanName, StorageServiceMBean.class); String cassandraVersion = ((StorageServiceMBean) ssProxy).getReleaseVersion(); if (cassandraVersion.startsWith("2.0") || cassandraVersion.startsWith("1.")) { ssProxy = JMX.newMBeanProxy(mbeanServerConn, ssMbeanName, StorageServiceMBean20.class); } CompactionManagerMBean cmProxy = JMX.newMBeanProxy(mbeanServerConn, cmMbeanName, CompactionManagerMBean.class); FailureDetectorMBean fdProxy = JMX.newMBeanProxy(mbeanServerConn, fdMbeanName, FailureDetectorMBean.class); EndpointSnitchInfoMBean endpointSnitchProxy = JMX.newMBeanProxy(mbeanServerConn, endpointSnitchMbeanName, EndpointSnitchInfoMBean.class); JmxProxy proxy = new JmxProxyImpl( host, originalHost, jmxUrl, jmxConn, ssProxy, ssMbeanName, mbeanServerConn, cmProxy, endpointSnitchProxy, fdProxy, metricRegistry); // registering a listener throws bunch of exceptions, so we do it here rather than in the // constructor mbeanServerConn.addNotificationListener(ssMbeanName, proxy, null, null); LOG.debug("JMX connection to {} properly connected: {}", host, jmxUrl.toString()); return proxy; } catch (IOException | ExecutionException | TimeoutException | InstanceNotFoundException e) { throw new ReaperException("Failure when establishing JMX connection to " + host + ":" + port, e); } catch (InterruptedException expected) { LOG.debug( "JMX connection to {}:{} was interrupted by Reaper. " + "Another JMX connection must have succeeded before this one.", host, port); throw expected; } } private static JMXConnector connectWithTimeout( JMXServiceURL url, long timeout, TimeUnit unit, Map<String, Object> env) throws InterruptedException, ExecutionException, TimeoutException { Future<JMXConnector> future = EXECUTOR.submit(() -> JMXConnectorFactory.connect(url, env)); return future.get(timeout, unit); } @Override public String getHost() { return host; } @Override public String getDataCenter() { return getDataCenter(hostBeforeTranslation); } @Override public String getDataCenter(String host) { try { return endpointSnitchMbean.getDatacenter(host); } catch (UnknownHostException ex) { throw new IllegalArgumentException(ex); } } @Override public List<BigInteger> getTokens() { checkNotNull(ssProxy, "Looks like the proxy is not connected"); return Lists.transform( Lists.newArrayList(((StorageServiceMBean) ssProxy).getTokenToEndpointMap().keySet()), s -> new BigInteger(s)); } @Override public Map<List<String>, List<String>> getRangeToEndpointMap(String keyspace) throws ReaperException { checkNotNull(ssProxy, "Looks like the proxy is not connected"); try { return ((StorageServiceMBean) ssProxy).getRangeToEndpointMap(keyspace); } catch (RuntimeException e) { LOG.error(e.getMessage()); throw new ReaperException(e.getMessage(), e); } } @Override public List<RingRange> getRangesForLocalEndpoint(String keyspace) throws ReaperException { checkNotNull(ssProxy, "Looks like the proxy is not connected"); List<RingRange> localRanges = Lists.newArrayList(); try { Map<List<String>, List<String>> ranges = ((StorageServiceMBean) ssProxy).getRangeToEndpointMap(keyspace); String localEndpoint = getLocalEndpoint(); // Filtering ranges for which the local node is a replica // For local mode ranges .entrySet() .stream() .forEach(entry -> { if (entry.getValue().contains(localEndpoint)) { localRanges.add( new RingRange(new BigInteger(entry.getKey().get(0)), new BigInteger(entry.getKey().get(1)))); } }); LOG.info("LOCAL RANGES {}", localRanges); return localRanges; } catch (RuntimeException e) { LOG.error(e.getMessage()); throw new ReaperException(e.getMessage(), e); } } public String getLocalEndpoint() throws ReaperException { String cassandraVersion = getCassandraVersion(); if (versionCompare(cassandraVersion, "2.1.10") >= 0) { return ((StorageServiceMBean) ssProxy) .getHostIdToEndpoint() .get(((StorageServiceMBean) ssProxy).getLocalHostId()); } else { // pre-2.1.10 compatibility BiMap<String, String> hostIdBiMap = ImmutableBiMap.copyOf(((StorageServiceMBean) ssProxy).getHostIdMap()); String localHostId = ((StorageServiceMBean) ssProxy).getLocalHostId(); return hostIdBiMap.inverse().get(localHostId); } } @NotNull @Override public List<String> tokenRangeToEndpoint(String keyspace, RingRange tokenRange) { checkNotNull(ssProxy, "Looks like the proxy is not connected"); Set<Map.Entry<List<String>, List<String>>> entries = ((StorageServiceMBean) ssProxy).getRangeToEndpointMap(keyspace).entrySet(); for (Map.Entry<List<String>, List<String>> entry : entries) { BigInteger rangeStart = new BigInteger(entry.getKey().get(0)); BigInteger rangeEnd = new BigInteger(entry.getKey().get(1)); if (new RingRange(rangeStart, rangeEnd).encloses(tokenRange)) { LOG.debug("[tokenRangeToEndpoint] Found replicas for token range {} : {}", tokenRange, entry.getValue()); return entry.getValue(); } } LOG.error("[tokenRangeToEndpoint] no replicas found for token range {}", tokenRange); LOG.debug("[tokenRangeToEndpoint] checked token ranges were {}", entries); return Lists.newArrayList(); } @NotNull @Override public Map<String, String> getEndpointToHostId() { checkNotNull(ssProxy, "Looks like the proxy is not connected"); Map<String, String> hosts; try { hosts = ((StorageServiceMBean) ssProxy).getEndpointToHostId(); } catch (UndeclaredThrowableException e) { hosts = ((StorageServiceMBean) ssProxy).getHostIdMap(); } return hosts; } @Override public String getPartitioner() { checkNotNull(ssProxy, "Looks like the proxy is not connected"); return ((StorageServiceMBean) ssProxy).getPartitionerName(); } @Override public String getClusterName() { checkNotNull(ssProxy, "Looks like the proxy is not connected"); return ((StorageServiceMBean) ssProxy).getClusterName(); } @Override public List<String> getKeyspaces() { checkNotNull(ssProxy, "Looks like the proxy is not connected"); return ((StorageServiceMBean) ssProxy).getKeyspaces(); } @Override public Set<String> getTableNamesForKeyspace(String keyspace) throws ReaperException { Set<String> tableNames = new HashSet<>(); Iterator<Map.Entry<String, ColumnFamilyStoreMBean>> proxies; try { proxies = ColumnFamilyStoreMBeanIterator.getColumnFamilyStoreMBeanProxies(mbeanServer); } catch (IOException | MalformedObjectNameException e) { throw new ReaperException("failed to get ColumnFamilyStoreMBean instances from JMX", e); } while (proxies.hasNext()) { Map.Entry<String, ColumnFamilyStoreMBean> proxyEntry = proxies.next(); String keyspaceName = proxyEntry.getKey(); if (keyspace.equalsIgnoreCase(keyspaceName)) { ColumnFamilyStoreMBean columnFamilyMBean = proxyEntry.getValue(); tableNames.add(columnFamilyMBean.getColumnFamilyName()); } } return tableNames; } @Override public int getPendingCompactions() throws MBeanException, AttributeNotFoundException, ReflectionException { checkNotNull(cmProxy, "Looks like the proxy is not connected"); try { ObjectName name = new ObjectName(COMP_OBJECT_NAME); int pendingCount = (int) mbeanServer.getAttribute(name, VALUE_ATTRIBUTE); return pendingCount; } catch (IOException ignored) { LOG.warn(FAILED_TO_CONNECT_TO_USING_JMX, host, ignored); } catch (MalformedObjectNameException ignored) { LOG.error("Internal error, malformed name", ignored); } catch (InstanceNotFoundException e) { // This happens if no repair has yet been run on the node // The AntiEntropySessions object is created on the first repair LOG.error("Error getting pending compactions attribute from JMX", e); return 0; } catch (RuntimeException e) { LOG.error(ERROR_GETTING_ATTR_JMX, e); } // If uncertain, assume it's running return 0; } @Override public boolean isRepairRunning() throws MBeanException, AttributeNotFoundException, ReflectionException { return isRepairRunningPre22() || isRepairRunningPost22() || isValidationCompactionRunning(); } /** * @return true if any repairs are running on the node. */ private boolean isRepairRunningPre22() throws MBeanException, AttributeNotFoundException, ReflectionException { // Check if AntiEntropySession is actually running on the node try { ObjectName name = new ObjectName(AES_OBJECT_NAME); int activeCount = (Integer) mbeanServer.getAttribute(name, "ActiveCount"); long pendingCount = (Long) mbeanServer.getAttribute(name, "PendingTasks"); return activeCount + pendingCount != 0; } catch (IOException ignored) { LOG.warn(FAILED_TO_CONNECT_TO_USING_JMX, host, ignored); } catch (MalformedObjectNameException ignored) { LOG.error("Internal error, malformed name", ignored); } catch (InstanceNotFoundException e) { // This happens if no repair has yet been run on the node // The AntiEntropySessions object is created on the first repair LOG.debug("No repair has run yet on the node. Ignoring exception.", e); return false; } catch (RuntimeException e) { LOG.error(ERROR_GETTING_ATTR_JMX, e); } // If uncertain, assume it's running return true; } /** * @return true if any repairs are running on the node. */ private boolean isValidationCompactionRunning() throws MBeanException, AttributeNotFoundException, ReflectionException { // Check if AntiEntropySession is actually running on the node try { int activeCount = (Integer) mbeanServer.getAttribute(new ObjectName(VALIDATION_ACTIVE_OBJECT_NAME), VALUE_ATTRIBUTE); long pendingCount = (Long) mbeanServer.getAttribute(new ObjectName(VALIDATION_PENDING_OBJECT_NAME), VALUE_ATTRIBUTE); return activeCount + pendingCount != 0; } catch (IOException ignored) { LOG.warn(FAILED_TO_CONNECT_TO_USING_JMX, host, ignored); } catch (MalformedObjectNameException ignored) { LOG.error("Internal error, malformed name", ignored); } catch (InstanceNotFoundException e) { LOG.error("Error getting pending/active validation compaction attributes from JMX", e); return false; } catch (RuntimeException e) { LOG.error(ERROR_GETTING_ATTR_JMX, e); } // If uncertain, assume it's not running return false; } /** * New way of determining if a repair is running after C* 2.2 * * @return true if any repairs are running on the node. */ private boolean isRepairRunningPost22() { try { // list all mbeans in search of one with the name Repair#?? // This is the replacement for AntiEntropySessions since Cassandra 2.2 Set beanSet = mbeanServer.queryNames(new ObjectName("org.apache.cassandra.internal:*"), null); for (Object bean : beanSet) { ObjectName objName = (ObjectName) bean; if (objName.getCanonicalName().contains("Repair#")) { return true; } } return false; } catch (IOException ignored) { LOG.warn(FAILED_TO_CONNECT_TO_USING_JMX, host, ignored); } catch (MalformedObjectNameException ignored) { LOG.error("Internal error, malformed name", ignored); } catch (RuntimeException e) { LOG.error(ERROR_GETTING_ATTR_JMX, e); } // If uncertain, assume it's running return true; } @Override public void cancelAllRepairs() { checkNotNull(ssProxy, "Looks like the proxy is not connected"); try { ((StorageServiceMBean) ssProxy).forceTerminateAllRepairSessions(); } catch (RuntimeException e) { // This can happen if the node is down (UndeclaredThrowableException), // in which case repairs will be cancelled anyway... LOG.warn("Failed to terminate all repair sessions; node down?", e); } } @Override public Map<String, List<String>> listTablesByKeyspace() { Map<String, List<String>> tablesByKeyspace = Maps.newHashMap(); try { Set<ObjectName> beanSet = mbeanServer.queryNames( new ObjectName( "org.apache.cassandra.db:type=ColumnFamilies,keyspace=*,columnfamily=*"), null); tablesByKeyspace = beanSet .stream() .map( bean -> new JmxColumnFamily( bean.getKeyProperty("keyspace"), bean.getKeyProperty("columnfamily"))) .collect( Collectors.groupingBy( JmxColumnFamily::getKeyspace, Collectors.mapping(JmxColumnFamily::getColumnFamily, Collectors.toList()))); } catch (MalformedObjectNameException | IOException e) { LOG.warn("Couldn't get a list of tables through JMX", e); } return Collections.unmodifiableMap(tablesByKeyspace); } @Override public String getCassandraVersion() { return ((StorageServiceMBean) ssProxy).getReleaseVersion(); } @Override public int triggerRepair( BigInteger beginToken, BigInteger endToken, String keyspace, RepairParallelism repairParallelism, Collection<String> columnFamilies, boolean fullRepair, Collection<String> datacenters, RepairStatusHandler repairStatusHandler) throws ReaperException { checkNotNull(ssProxy, "Looks like the proxy is not connected"); String cassandraVersion = getCassandraVersion(); boolean canUseDatacenterAware = false; try { canUseDatacenterAware = versionCompare(cassandraVersion, "2.0.12") >= 0; } catch (ReaperException e) { LOG.warn("failed on version comparison, not using dc aware repairs by default", e); } String msg = String.format( "Triggering repair of range (%s,%s] for keyspace \"%s\" on " + "host %s, with repair parallelism %s, in cluster with Cassandra " + "version '%s' (can use DATACENTER_AWARE '%s'), " + "for column families: %s", beginToken.toString(), endToken.toString(), keyspace, this.host, repairParallelism, cassandraVersion, canUseDatacenterAware, columnFamilies); LOG.info(msg); if (repairParallelism.equals(RepairParallelism.DATACENTER_AWARE) && !canUseDatacenterAware) { LOG.info( "Cannot use DATACENTER_AWARE repair policy for Cassandra cluster with version {}," + " falling back to SEQUENTIAL repair.", cassandraVersion); repairParallelism = RepairParallelism.SEQUENTIAL; } try { if (cassandraVersion.startsWith("2.0") || cassandraVersion.startsWith("1.")) { return triggerRepairPre2dot1( repairParallelism, keyspace, columnFamilies, beginToken, endToken, datacenters.size() > 0 ? datacenters : null, repairStatusHandler); } else if (cassandraVersion.startsWith("2.1")) { return triggerRepair2dot1( fullRepair, repairParallelism, keyspace, columnFamilies, beginToken, endToken, cassandraVersion, datacenters.size() > 0 ? datacenters : null, repairStatusHandler); } else { return triggerRepairPost2dot2( fullRepair, repairParallelism, keyspace, columnFamilies, beginToken, endToken, cassandraVersion, datacenters, repairStatusHandler); } } catch (RuntimeException e) { LOG.error("Segment repair failed", e); throw new ReaperException(e); } } private int triggerRepairPost2dot2( boolean fullRepair, RepairParallelism repairParallelism, String keyspace, Collection<String> columnFamilies, BigInteger beginToken, BigInteger endToken, String cassandraVersion, Collection<String> datacenters, RepairStatusHandler repairStatusHandler) { Map<String, String> options = new HashMap<>(); options.put(RepairOption.PARALLELISM_KEY, repairParallelism.getName()); // options.put(RepairOption.PRIMARY_RANGE_KEY, Boolean.toString(primaryRange)); options.put(RepairOption.INCREMENTAL_KEY, Boolean.toString(!fullRepair)); options.put(RepairOption.JOB_THREADS_KEY, Integer.toString(1)); options.put(RepairOption.TRACE_KEY, Boolean.toString(Boolean.FALSE)); options.put(RepairOption.COLUMNFAMILIES_KEY, StringUtils.join(columnFamilies, ",")); // options.put(RepairOption.PULL_REPAIR_KEY, Boolean.FALSE); if (fullRepair) { options.put(RepairOption.RANGES_KEY, beginToken.toString() + ":" + endToken.toString()); } options.put(RepairOption.DATACENTERS_KEY, StringUtils.join(datacenters, ",")); // options.put(RepairOption.HOSTS_KEY, StringUtils.join(specificHosts, ",")); int commandId = ((StorageServiceMBean) ssProxy).repairAsync(keyspace, options); repairStatusHandlers.putIfAbsent(commandId, repairStatusHandler); return commandId; } private int triggerRepair2dot1( boolean fullRepair, RepairParallelism repairParallelism, String keyspace, Collection<String> columnFamilies, BigInteger beginToken, BigInteger endToken, String cassandraVersion, Collection<String> datacenters, RepairStatusHandler repairStatusHandler) { if (fullRepair) { // full repair if (repairParallelism.equals(RepairParallelism.DATACENTER_AWARE)) { int commandId = ((StorageServiceMBean) ssProxy) .forceRepairRangeAsync( beginToken.toString(), endToken.toString(), keyspace, repairParallelism.ordinal(), datacenters, cassandraVersion.startsWith("2.2") ? new HashSet<String>() : null, fullRepair, columnFamilies.toArray(new String[columnFamilies.size()])); repairStatusHandlers.putIfAbsent(commandId, repairStatusHandler); return commandId; } boolean snapshotRepair = repairParallelism.equals(RepairParallelism.SEQUENTIAL); int commandId = ((StorageServiceMBean) ssProxy) .forceRepairRangeAsync( beginToken.toString(), endToken.toString(), keyspace, snapshotRepair ? RepairParallelism.SEQUENTIAL.ordinal() : RepairParallelism.PARALLEL.ordinal(), datacenters, cassandraVersion.startsWith("2.2") ? new HashSet<String>() : null, fullRepair, columnFamilies.toArray(new String[columnFamilies.size()])); repairStatusHandlers.putIfAbsent(commandId, repairStatusHandler); return commandId; } // incremental repair int commandId = ((StorageServiceMBean) ssProxy) .forceRepairAsync( keyspace, Boolean.FALSE, Boolean.FALSE, Boolean.FALSE, fullRepair, columnFamilies.toArray(new String[columnFamilies.size()])); repairStatusHandlers.putIfAbsent(commandId, repairStatusHandler); return commandId; } private int triggerRepairPre2dot1( RepairParallelism repairParallelism, String keyspace, Collection<String> columnFamilies, BigInteger beginToken, BigInteger endToken, Collection<String> datacenters, RepairStatusHandler repairStatusHandler) { // Cassandra 1.2 and 2.0 compatibility if (repairParallelism.equals(RepairParallelism.DATACENTER_AWARE)) { int commandId = ((StorageServiceMBean20) ssProxy) .forceRepairRangeAsync( beginToken.toString(), endToken.toString(), keyspace, repairParallelism.ordinal(), datacenters, null, columnFamilies.toArray(new String[columnFamilies.size()])); repairStatusHandlers.putIfAbsent(commandId, repairStatusHandler); return commandId; } boolean snapshotRepair = repairParallelism.equals(RepairParallelism.SEQUENTIAL); int commandId = ((StorageServiceMBean20) ssProxy) .forceRepairRangeAsync( beginToken.toString(), endToken.toString(), keyspace, snapshotRepair, false, columnFamilies.toArray(new String[columnFamilies.size()])); repairStatusHandlers.putIfAbsent(commandId, repairStatusHandler); return commandId; } @Override public String getAllEndpointsState() { return ((FailureDetectorMBean) fdProxy).getAllEndpointStates(); } @Override public Map<String, String> getSimpleStates() { return ((FailureDetectorMBean) fdProxy).getSimpleStates(); } /** * Invoked when the MBean this class listens to publishes an event. We're only interested in repair-related events. * Their format is explained at {@link org.apache.cassandra.service.StorageServiceMBean#forceRepairAsync} The forma * is: notification type: "repair" notification userData: int array of length 2 where [0] = command number [1] = * ordinal of AntiEntropyService.Status */ @Override public void handleNotification(Notification notification, Object handback) { Thread.currentThread().setName(clusterName); // we're interested in "repair" String type = notification.getType(); LOG.debug("Received notification: {} with type {}", notification, type); if (("repair").equals(type)) { processOldApiNotification(notification); } if (("progress").equals(type)) { processNewApiNotification(notification); } } /** * Handles notifications from the old repair API (forceRepairAsync) */ private void processOldApiNotification(Notification notification) { try { int[] data = (int[]) notification.getUserData(); // get the repair sequence number int repairNo = data[0]; // get the repair status ActiveRepairService.Status status = ActiveRepairService.Status.values()[data[1]]; // this is some text message like "Starting repair...", "Finished repair...", etc. String message = notification.getMessage(); // let the handler process the even if (repairStatusHandlers.containsKey(repairNo)) { LOG.debug( "Handling notification: {} with repair handler {}", notification, repairStatusHandlers.containsKey(repairNo)); repairStatusHandlers .get(repairNo) .handle(repairNo, Optional.of(status), Optional.absent(), message, this); } } catch (RuntimeException e) { LOG.error("Error while processing JMX notification", e); } } /** * Handles notifications from the new repair API (repairAsync) */ private void processNewApiNotification(Notification notification) { Map<String, Integer> data = (Map<String, Integer>) notification.getUserData(); try { // get the repair sequence number int repairNo = Integer.parseInt(((String) notification.getSource()).split(":")[1]); // get the progress status ProgressEventType progress = ProgressEventType.values()[data.get("type")]; // this is some text message like "Starting repair...", "Finished repair...", etc. String message = notification.getMessage(); // let the handler process the even if (repairStatusHandlers.containsKey(repairNo)) { LOG.debug( "Handling notification: {} with repair handler {}", notification, repairStatusHandlers.containsKey(repairNo)); repairStatusHandlers .get(repairNo) .handle(repairNo, Optional.absent(), Optional.of(progress), message, this); } } catch (RuntimeException e) { LOG.error("Error while processing JMX notification", e); } } private String getConnectionId() throws IOException { return jmxConnector.getConnectionId(); } @Override public boolean isConnectionAlive() { try { String connectionId = getConnectionId(); return null != connectionId && connectionId.length() > 0; } catch (IOException e) { LOG.debug("Couldn't get Connection Id", e); return false; } } @Override public void removeRepairStatusHandler(int commandId) { repairStatusHandlers.remove(commandId); } /** Cleanly shut down by un-registering the listener and closing the JMX connection. */ @Override public void close() { try { mbeanServer.removeNotificationListener(ssMbeanName, this); LOG.debug("Successfully removed notification listener for '{}': {}", host, jmxUrl); } catch (InstanceNotFoundException | ListenerNotFoundException | IOException e) { LOG.debug("failed on removing notification listener", e); } try { jmxConnector.close(); } catch (IOException e) { LOG.warn("failed closing a JMX connection", e); } } /** * NOTICE: This code is loosely based on StackOverflow answer: * http://stackoverflow.com/questions/6701948/efficient-way-to-compare-version-strings-in-java * * <p> * Compares two version strings. * * <p> * Use this instead of String.compareTo() for a non-lexicographical comparison that works for version strings. e.g. * "1.10".compareTo("1.6"). * * @param str1 a string of ordinal numbers separated by decimal points. * @param str2 a string of ordinal numbers separated by decimal points. * @return The result is a negative integer if str1 is _numerically_ less than str2. The result is a positive integer * if str1 is _numerically_ greater than str2. The result is zero if the strings are _numerically_ equal. It does * not work if "1.10" is supposed to be equal to "1.10.0". */ static Integer versionCompare(String str1, String str2) throws ReaperException { try { String cleanedUpStr1 = str1.split(" ")[0].replaceAll("[-_~]", "."); String cleanedUpStr2 = str2.split(" ")[0].replaceAll("[-_~]", "."); String[] parts1 = cleanedUpStr1.split("\\."); String[] parts2 = cleanedUpStr2.split("\\."); int idx = 0; // set index to first non-equal ordinal or length of shortest version string while (idx < parts1.length && idx < parts2.length) { try { Integer.parseInt(parts1[idx]); Integer.parseInt(parts2[idx]); } catch (NumberFormatException ex) { if (idx == 0) { throw ex; // just comparing two non-version strings should fail } // first non integer part, so let's just stop comparison here and ignore the res idx--; break; } if (parts1[idx].equals(parts2[idx])) { idx++; continue; } break; } // compare first non-equal ordinal number if (idx < parts1.length && idx < parts2.length) { int diff = Integer.valueOf(parts1[idx]).compareTo(Integer.valueOf(parts2[idx])); return Integer.signum(diff); } else { // the strings are equal or one string is a substring of the other // e.g. "1.2.3" = "1.2.3" or "1.2.3" < "1.2.3.4" return Integer.signum(parts1.length - parts2.length); } } catch (RuntimeException ex) { LOG.error("failed comparing strings for versions: '{}' '{}'", str1, str2); throw new ReaperException(ex); } } @Override public void clearSnapshot(String repairId, String keyspaceName) throws ReaperException { if (repairId == null || ("").equals(repairId)) { // Passing in null or empty string will clear all snapshots on the hos throw new IllegalArgumentException("repairId cannot be null or empty string"); } try { ((StorageServiceMBean) ssProxy).clearSnapshot(repairId, keyspaceName); } catch (IOException e) { throw new ReaperException(e); } } @Override public void clearSnapshot(String snapshotName) throws ReaperException { if (snapshotName == null || ("").equals(snapshotName)) { // Passing in null or empty string will clear all snapshots on the hos throw new IllegalArgumentException("snapshotName cannot be null or empty string"); } try { ((StorageServiceMBean) ssProxy).clearSnapshot(snapshotName); } catch (IOException e) { throw new ReaperException(e); } } @Override public void clearAllSnapshots() throws ReaperException { try { ((StorageServiceMBean) ssProxy).clearSnapshot(""); } catch (IOException e) { throw new ReaperException(e); } } @Override public List<String> getLiveNodes() throws ReaperException { checkNotNull(ssProxy, "Looks like the proxy is not connected"); try { return ((StorageServiceMBean) ssProxy).getLiveNodes(); } catch (RuntimeException e) { LOG.error(e.getMessage()); throw new ReaperException(e.getMessage(), e); } } private static RMIClientSocketFactory getRmiClientSocketFactory() { return Boolean.parseBoolean(System.getProperty("ssl.enable")) ? new SslRMIClientSocketFactory() : RMISocketFactory.getDefaultSocketFactory(); } private static final class JmxColumnFamily { private final String keyspace; private final String columnFamily; JmxColumnFamily(String keyspace, String columnFamily) { super(); this.keyspace = keyspace; this.columnFamily = columnFamily; } public String getKeyspace() { return keyspace; } public String getColumnFamily() { return columnFamily; } } private void registerConnectionsGauge() { try { if (!this.metricRegistry .getGauges() .containsKey( MetricRegistry.name( JmxProxyImpl.class, this.clusterName, this.host, "repairStatusHandlers"))) { this.metricRegistry.register( MetricRegistry.name( JmxProxyImpl.class, this.clusterName, this.host, "repairStatusHandlers"), (Gauge<Integer>) () -> this.repairStatusHandlers.size()); } } catch (IllegalArgumentException e) { LOG.warn("Cannot create connection gauge for node {}", this.host, e); } } @Override public List<Snapshot> listSnapshots() { List<Snapshot> snapshots = Lists.newArrayList(); final Map<String, TabularData> snapshotDetails = ((StorageServiceMBean) ssProxy).getSnapshotDetails(); if (snapshotDetails.isEmpty()) { LOG.debug("There are no snapshots on host {}", this.host); return snapshots; } final String clusterName = ((StorageServiceMBean) ssProxy).getClusterName(); final long trueSnapshotsSize = ((StorageServiceMBean) ssProxy).trueSnapshotsSize(); // display column names only once final List<String> indexNames = snapshotDetails.entrySet().iterator().next().getValue().getTabularType().getIndexNames(); for (final Map.Entry<String, TabularData> snapshotDetail : snapshotDetails.entrySet()) { Set<?> values = snapshotDetail.getValue().keySet(); for (Object eachValue : values) { int index = 0; Builder snapshotBuilder = Snapshot.builder().withHost(this.getHost()); final List<?> valueList = (List<?>) eachValue; for (Object value : valueList) { switch (indexNames.get(index)) { case "Snapshot name": snapshotBuilder.withName((String) value); break; case "Keyspace name": snapshotBuilder.withKeyspace((String) value); break; case "Column family name": snapshotBuilder.withTable((String) value); break; case "True size": snapshotBuilder.withTrueSize(parseHumanReadableSize((String) value)); break; case "Size on disk": snapshotBuilder.withSizeOnDisk(parseHumanReadableSize((String) value)); break; default: break; } index++; } snapshots.add(snapshotBuilder.withClusterName(clusterName).build()); } } return snapshots; } public static double parseHumanReadableSize(String readableSize) { int spaceNdx = readableSize.indexOf(" "); double ret = readableSize.contains(".") ? Double.parseDouble(readableSize.substring(0, spaceNdx)) : Double.parseDouble(readableSize.substring(0, spaceNdx).replace(",", ".")); switch (readableSize.substring(spaceNdx + 1)) { case "GB": return ret * GB_FACTOR; case "GiB": return ret * GIB_FACTOR; case "MB": return ret * MB_FACTOR; case "MiB": return ret * MIB_FACTOR; case "KB": return ret * KB_FACTOR; case "KiB": return ret * KIB_FACTOR; default: return 0; } } @Override public String takeSnapshot(String snapshotName, String... keyspaceNames) throws ReaperException { try { ((StorageServiceMBean) ssProxy).takeSnapshot(snapshotName, keyspaceNames); return snapshotName; } catch (IOException e) { throw new ReaperException(e); } } @Override public void takeColumnFamilySnapshot( String keyspaceName, String columnFamilyName, String snapshotName) throws ReaperException { try { ((StorageServiceMBean) ssProxy) .takeColumnFamilySnapshot(keyspaceName, columnFamilyName, snapshotName); } catch (IOException e) { throw new ReaperException(e); } } }
// // LoadingTableCell.h // newsyc // // Created by <NAME> on 5/28/11. // Copyright 2011 __MyCompanyName__. All rights reserved. // #import "LoadingIndicatorView.h" #import "ABTableViewCell.h" @interface LoadingTableCell : ABTableViewCell { LoadingIndicatorView *indicator; } @end
package GraphCreator; import org.graphstream.graph.Edge; import org.graphstream.graph.Graph; import org.graphstream.graph.Node; import org.graphstream.graph.implementations.SingleGraph; public class AstridsTestGraph { public static void main(String[] args) { Graph graph = new SingleGraph("Tutorial 1"); graph.addNode("A"); graph.addNode("B"); graph.addNode("C"); graph.addNode("D"); graph.addNode("E"); graph.addNode("F"); graph.addEdge("AB", "A", "B"); graph.addEdge("BC", "B", "C"); graph.addEdge("BD", "B", "D"); graph.addEdge("CE", "C", "E"); graph.addEdge("DE", "D", "E"); graph.addEdge("EF", "E", "F"); graph.getEdge("AB").setAttribute("layout.weight", 20); graph.getEdge("BC").setAttribute("layout.weight", 30); graph.getEdge("BD").setAttribute("layout.weight", 30); graph.getEdge("CE").setAttribute("layout.weight", 30); graph.getEdge("DE").setAttribute("layout.weight", 30); graph.getEdge("EF").setAttribute("layout.weight", 20); for (Node n : graph) n.setAttribute("ui.label", n.getId()); for (Edge e : graph.getEdgeSet()) e.setAttribute("ui.label", e.getAttribute("layout.weight").toString()); //setting the distances between all nodes in the graph Utility.setDistances(graph); //graph.display(); Utility.saveCompleteGraph("AstridsTestGraph", "data/graphs", graph); } }
import pandas as pd import numpy as np df = pd.DataFrame({ 'x': [0,1,3,4,5], 'y': [1,2,4,8,16] }) # interpolate the missing data new_x = np.linspace(0, 5, 6) df = df.set_index('x').reindex(new_x).interpolate().reset_index() print(df)
from unicodedata import ucnhash_CAPI from instances import app_state, CO_TWO, TEMPERATURE import time from slack_sdk.web.client import WebClient def invariant() -> bool: return True def iteration(): co2 = CO_TWO.read() temp = TEMPERATURE.read() tm = unchecked_get_time() diff = tm - app_state.FLOAT_0 if diff > 3600: unchecked_send_message(f"The current CO2 level is: {co2} ppm\nThe current room temperature is: {temp}°C") app_state.FLOAT_0 = tm def unchecked_get_time() -> float: return time.time() def unchecked_send_message(msg: str) -> None: token = "<KEY>" slack_client = WebClient(token=token) slack_client.chat_postMessage(channel="info-inn319", text=msg)
#!/bin/bash MYSELF=`readlink -f "$0"` echo "MYSELF=$MYSELF" CDIR=`dirname "$MYSELF"` echo "CDIR=$CDIR" #Gtk3 forced: export SWT_GTK3=1 #Gtk2 forced: #export SWT_GTK3=0 # Bug fix for Ubuntu menu proxy export UBUNTU_MENUPROXY=0 # Bug fix for overlay scrollbars export LIBOVERLAY_SCROLLBAR=0 # Remove /usr/lib/jni from LD_LIBRARY_PATH to avoid loading from Eclipse export LD_LIBRARY_PATH=`echo "$LD_LIBRARY_PATH" | perl -p -e 's!(/usr/lib/jni:)|(:?/usr/lib/jni)!!'` # Add Ubuntu JNI paths #export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib/jni" #echo "Native Library Path: $LD_LIBRARY_PATH" if [ -r "$HOME/.eclipse/eclipserc" ] then echo "Loading user configuration" . "$HOME/.eclipse/eclipserc" elif [ -r "$HOME/.eclipserc" ] then echo "Loading user configuration" . "$HOME/.eclipserc" fi echo "Launching Eclipse-DSL for SARL Developers ${project.version}" exec "$CDIR/eclipse" $VMARGS "$@"
package evilcraft.api.entities.tileentitites; import java.util.Collection; import java.util.HashMap; import java.util.Map; import net.minecraft.entity.player.EntityPlayer; import net.minecraft.inventory.ISidedInventory; import net.minecraft.item.ItemStack; import net.minecraft.nbt.NBTTagCompound; import net.minecraftforge.common.util.ForgeDirection; import evilcraft.api.Helpers; import evilcraft.api.inventory.SimpleInventory; /** * A TileEntity with an internal inventory. * @author rubensworks * */ public abstract class InventoryTileEntity extends EvilCraftTileEntity implements ISidedInventory{ protected SimpleInventory inventory; protected Map<ForgeDirection, int[]> slotSides; protected Map<ForgeDirection, Integer> slotSidesSize; protected boolean sendUpdateOnInventoryChanged = false; /** * Make new tile with an inventory. * @param inventorySize Amount of slots in the inventory. * @param inventoryName Internal name of the inventory. * @param stackSize The maximum stacksize each slot can have */ public InventoryTileEntity(int inventorySize, String inventoryName, int stackSize) { inventory = new SimpleInventory(inventorySize , inventoryName, stackSize); slotSides = new HashMap<ForgeDirection, int[]>(); slotSidesSize = new HashMap<ForgeDirection, Integer>(); for(ForgeDirection side : Helpers.DIRECTIONS) { // Init each side to it can theoretically hold all possible slots, // Integer lists are not option because Java allows to autoboxing // and that would be required in the getter methods below. slotSides.put(side, new int[inventorySize]); slotSidesSize.put(side, 0); } } /** * Make new tile with an inventory. * @param inventorySize Amount of slots in the inventory. * @param inventoryName Internal name of the inventory. */ public InventoryTileEntity(int inventorySize, String inventoryName) { this(inventorySize , inventoryName, 64); } /** * Add mappings to slots to a certain (normalized) side of this TileEntity. * @param side The side to map this slots to. * @param slots The numerical representations of the slots to map. */ protected void addSlotsToSide(ForgeDirection side, Collection<Integer> slots) { int[] currentSlots = slotSides.get(side); int offset = slotSidesSize.get(side); int i = 0; for(int slot : slots) { currentSlots[offset + i] = slot; i++; } slotSidesSize.put(side, offset + i); } /** * Get the internal inventory. * @return The SimpleInventory. */ public SimpleInventory getInventory() { return inventory; } @Override public int getSizeInventory() { return inventory.getSizeInventory(); } @Override public ItemStack getStackInSlot(int slotId) { if(slotId >= getSizeInventory() || slotId < 0) return null; return inventory.getStackInSlot(slotId); } @Override public ItemStack decrStackSize(int slotId, int count) { ItemStack itemStack = inventory.decrStackSize(slotId, count); if(isSendUpdateOnInventoryChanged()) sendUpdate(); return itemStack; } @Override public ItemStack getStackInSlotOnClosing(int slotId) { return inventory.getStackInSlotOnClosing(slotId); } @Override public void setInventorySlotContents(int slotId, ItemStack itemstack) { inventory.setInventorySlotContents(slotId, itemstack); if(isSendUpdateOnInventoryChanged()) sendUpdate(); } @Override public String getInventoryName() { return inventory.getInventoryName(); } @Override public boolean hasCustomInventoryName() { return inventory.hasCustomInventoryName(); } @Override public int getInventoryStackLimit() { return inventory.getInventoryStackLimit(); } @Override public boolean isUseableByPlayer(EntityPlayer entityPlayer) { return worldObj.getTileEntity(xCoord, yCoord, zCoord) == this && entityPlayer.getDistanceSq(xCoord + 0.5D, yCoord + 0.5D, zCoord + 0.5D) <= 64.0D; } @Override public void openInventory() { } @Override public void closeInventory() { } @Override public void readFromNBT(NBTTagCompound data) { super.readFromNBT(data); inventory.readFromNBT(data); } @Override public void writeToNBT(NBTTagCompound data) { super.writeToNBT(data); inventory.writeToNBT(data); } @Override public int[] getAccessibleSlotsFromSide(int side) { return slotSides.get(ForgeDirection.getOrientation(side)); } private boolean canAccess(int slot, int side) { boolean canAccess = false; for(int slotAccess : getAccessibleSlotsFromSide(side)) { if(slotAccess == slot) canAccess = true; } return canAccess; } @Override public boolean canInsertItem(int slot, ItemStack itemStack, int side) { return canAccess(slot, side) && this.isItemValidForSlot(slot, itemStack); } @Override public boolean canExtractItem(int slot, ItemStack itemStack, int side) { return canAccess(slot, side); } /** * If this tile should send block updates when the inventory has changed. * @return If it should send block updates. */ public boolean isSendUpdateOnInventoryChanged() { return sendUpdateOnInventoryChanged; } /** * If this tile should send block updates when the inventory has changed. * @param sendUpdateOnInventoryChanged If it should send block updates. */ public void setSendUpdateOnInventoryChanged( boolean sendUpdateOnInventoryChanged) { this.sendUpdateOnInventoryChanged = sendUpdateOnInventoryChanged; } }
def synchronize_packages(packages): def dfs(package, visited, result): if package not in visited: visited.add(package) for dependency in packages.get(package, []): dfs(dependency, visited, result) result.append(package) result = [] visited = set() for package in packages: dfs(package, visited, result) return result[::-1]
<reponame>Kmons2000/lab-08<filename>src/models/products.js<gh_stars>0 'use strict'; const Products = require('./products-schema'); class ProductsRepository { getAll() { return Products.find(); } get(_id) { if (!/^[0-9a-z]{24}$/i.test(_id)) { return Products.resolve(null); } else { return Products.findOne({ _id: _id, }); } } post(record) { let mongoCategory = new Products (record); return mongoCategory.save(); } put(_id, record) { Products.findByIdAndUpdate(_id, record, (err, category) => { return category; }); } async delete(_id) { Products.findByIdAndDelete(_id, (err, category) => { return category; }); } } module.exports = ProductsRepository;
#!/bin/bash # Run tempest vagrant ssh tempest -c "sudo /var/lib/tempest/run_tests.sh -s"
<reponame>ujjwalguptaofficial/infinity<filename>dist/ts/helpers/parse_cookie.d.ts export declare const parseCookie: (cookie: string) => { [key: string]: string; };
<gh_stars>0 ''' Author: yangzuo Date: 2021-04-20 15:48:37 Email: <EMAIL> LastEditors: yangzuo LastEditTime: 2021-04-20 15:51:18 FilePath: /leetcode_solution/solutions/Tree/104.py ''' ''' 给定一个二叉树,找出其最大深度。 二叉树的深度为根节点到最远叶子节点的最长路径上的节点数。 说明: 叶子节点是指没有子节点的节点。 示例: 给定二叉树 [3,9,20,null,null,15,7], 3 / \ 9 20 / \ 15 7 返回它的最大深度 3 。 ''' # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def maxDepth(self, root: TreeNode) -> int: if root is None: return 0 return max(self.maxDepth(root.left), self.maxDepth(root.right)) + 1
// main_module.rs mod from_plist; pub mod ops; mod plist; mod region; mod stretch; mod to_plist; pub mod font { pub use super::from_plist::FromPlist; pub use super::plist::Plist; pub use super::region::Region; pub use super::stretch::stretch; pub use super::to_plist::ToPlist; // Define other font-related components here pub struct Component; pub struct Font; pub struct Glyph; pub struct Layer; pub struct Node; pub struct NodeType; pub struct Path; } pub use font::{Component, Font, Glyph, Layer, Node, NodeType, Path}; pub use from_plist::FromPlist; pub use plist::Plist; pub use region::Region; pub use stretch::stretch; pub use to_plist::ToPlist;
<html> <head> <title>My HTML Page</title> </head> <body> <h1>My HTML Page</h1> <article> Hello world! </article> </body> </html>
<gh_stars>10-100 import React from "react"; function Card() { return ( <div className="card"> <div className="card-body"> <p className="card-text"> Aliquip dolore commodo nostrud minim. Cillum do enim non ullamco. Commodo magna eu ex mollit sunt amet fugiat. In irure eu enim id ea sit nostrud incididunt ad adipisicing.Aliquip dolore commodo nostrud minim. Cillum do enim non ullamco. Commodo magna eu ex mollit sunt amet fugiat. In irure eu enim id ea sit nostrud incididunt ad adipisicing. </p> </div> </div> ); } export default Card;
(function($) { var jClock = window.jClock = function(clock, canvas, options) { var ctx, img; // Canvas isn't supported, abort if(!(ctx = canvas.getContext('2d'))) return; options = $.extend(true, {}, jClock.defaults, options); img = new Image(); img.src = clock; // Need to wait until after the image is loaded img.onload = function() { tick(); setInterval(tick, 1000); }; // The ticker, draws the clock upon each tick function tick() { var now = new Date(), sec = now.getSeconds(), min = now.getMinutes(), hour = now.getHours(); if(hour > 12) hour = hour % 12; // do the clock drawClock(); // do the second hand if(options.secondHand === true) drawHand(sec * Math.PI/30, options.second); // do the minute hand drawHand((min + sec/60) * Math.PI/30, options.minute); // do the hour hand drawHand((hour + sec/3600 + min/60) * Math.PI/6, options.hour); } function drawClock() { ctx.clearRect(0, 0, options.height, options.width); ctx.drawImage(img, 0, 0, options.width, options.height); ctx.save(); } function drawHand(radians, opts) { radians -= 90 * Math.PI/180; // fix orientation ctx.save(); ctx.beginPath(); ctx.translate(options.height/2, options.width/2); // Set hand styles ctx.strokeStyle = opts.color; ctx.lineWidth = opts.width; ctx.globalAlpha = opts.alpha; if (options.shadow === true) { ctx.shadowOffsetX = 2; ctx.shadowOffsetY = 2; ctx.shadowBlur = 1; ctx.shadowColor = 'rgba(0, 0, 0, 0.5)'; } ctx.rotate(radians); ctx.moveTo(opts.start, 0); ctx.lineTo(opts.end, 0); ctx.stroke(); ctx.restore(); } }; // Default options jClock.defaults = { height: 125, width: 125, secondHand: true, shadow: true, second: { color: '#f00', width: 2, start: -10, end: 35, alpha: 1 }, minute: { color: '#fff', width: 3, start: -7, end: 30, alpha: 1 }, hour: { color: '#fff', width: 4, start: -7, end: 20, alpha: 1 } }; })(jQuery);
#!/bin/sh NODE_ENV=dev npx postcss resources/styles.css -o resources/public/tictactoe.css
<gh_stars>0 -- phpMyAdmin SQL Dump -- version 4.5.4.1deb2ubuntu2 -- http://www.phpmyadmin.net -- -- Host: localhost -- Generation Time: Jun 20, 2018 at 11:55 AM -- Server version: 5.7.21-0ubuntu0.16.04.1 -- PHP Version: 7.0.28-0ubuntu0.16.04.1 SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO"; SET time_zone = "+00:00"; /*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; /*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; /*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; /*!40101 SET NAMES utf8mb4 */; -- -- Database: `pssdb` -- -- -------------------------------------------------------- -- -- Table structure for table `absents` -- CREATE TABLE `absents` ( `id` int(10) UNSIGNED NOT NULL, `employee_id` int(11) NOT NULL, `day` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `month_year` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -------------------------------------------------------- -- -- Table structure for table `add_to_carts` -- CREATE TABLE `add_to_carts` ( `id` int(10) UNSIGNED NOT NULL, `user_id` int(11) NOT NULL, `category` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `brand` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `model` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `color` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `price` int(11) NOT NULL, `quantity` int(11) NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -------------------------------------------------------- -- -- Table structure for table `categories` -- CREATE TABLE `categories` ( `id` int(10) UNSIGNED NOT NULL, `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `categories` -- INSERT INTO `categories` (`id`, `name`, `created_at`, `updated_at`) VALUES (1, 'Smartphone', '2018-06-05 10:49:09', '2018-06-05 10:49:09'), (2, 'Keypad', '2018-06-05 10:49:25', '2018-06-05 10:49:25'), (3, 'Tablet', '2018-06-05 10:49:34', '2018-06-05 10:49:34'), (4, 'Battery', '2018-06-05 12:23:37', '2018-06-05 12:23:37'), (6, 'Charger', '2018-06-13 23:05:51', '2018-06-13 23:05:51'), (7, 'Power Bank', '2018-06-14 09:11:54', '2018-06-14 09:11:54'), (8, 'Smartwatch', '2018-06-16 07:07:56', '2018-06-16 07:07:56'), (9, 'Gearfit', '2018-06-16 23:19:20', '2018-06-16 23:19:20'), (11, 'Earphone', '2018-06-19 02:29:02', '2018-06-19 02:29:02'), (12, 'Speaker', '2018-06-19 12:06:01', '2018-06-19 12:06:01'), (13, 'LCD', '2018-06-19 12:06:05', '2018-06-19 12:06:05'), (14, 'Bluetooth Speaker', '2018-06-19 20:26:39', '2018-06-19 20:26:39'); -- -------------------------------------------------------- -- -- Table structure for table `category_types` -- CREATE TABLE `category_types` ( `id` int(10) UNSIGNED NOT NULL, `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `category_types` -- INSERT INTO `category_types` (`id`, `name`, `created_at`, `updated_at`) VALUES (1, 'Smartphone', '2018-06-12 12:54:52', '2018-06-12 12:59:46'), (2, 'Keypad', '2018-06-19 11:43:03', '2018-06-19 11:43:03'), (3, 'Accessory', '2018-06-12 12:55:04', '2018-06-12 12:55:04'), (4, 'Feature', '2018-06-12 12:55:10', '2018-06-12 12:55:10'), (5, 'Tablet', '2018-06-12 22:52:08', '2018-06-12 22:52:08'); -- -------------------------------------------------------- -- -- Table structure for table `costs` -- CREATE TABLE `costs` ( `id` int(10) UNSIGNED NOT NULL, `category_type` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `category` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `brand` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `model` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `color` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `quantity` int(11) NOT NULL, `price` int(11) NOT NULL, `cost` int(11) NOT NULL, `sale_or_service` tinyint(1) NOT NULL, `day` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `month_year` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `image` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `costs` -- INSERT INTO `costs` (`id`, `category_type`, `category`, `brand`, `model`, `color`, `quantity`, `price`, `cost`, `sale_or_service`, `day`, `month_year`, `image`, `created_at`, `updated_at`) VALUES (1, 'Smartphone', 'Smartphone', 'MI', 'Note 4x', 'Rose Gold', 3, 200000, 600000, 0, '20', 'June 2018', '5b293e4fe5dd8_mi1.png', '2018-06-19 11:03:03', '2018-06-19 11:03:03'), (2, 'Smartphone', 'Smartphone', 'Honor', 'H7', 'Blue', 3, 300000, 900000, 0, '20', 'June 2018', '5b293f8e1b862_hua.png', '2018-06-19 11:08:22', '2018-06-19 11:08:22'), (3, 'Smartphone', 'Smartphone', 'Oppo', 'Neo 5', 'Black', 3, 150000, 450000, 0, '20', 'June 2018', '5b293ff46acd3_yoyo_5-500x500.png', '2018-06-19 11:10:04', '2018-06-19 11:10:04'), (4, 'Smartphone', 'Smartphone', 'Samsung', 'Galaxy J7', 'Blue', 3, 250000, 750000, 0, '20', 'June 2018', '5b29414293556_Samsung-C7-Pro-Front-Back.png', '2018-06-19 11:15:38', '2018-06-19 11:15:38'), (5, 'Accessory', 'Battery', 'MI', 'Battery 1', 'Black', 3, 5000, 15000, 1, '20', 'June 2018', '5b2943c1c6120_b16.jpg', '2018-06-19 11:26:17', '2018-06-19 11:26:17'), (6, 'Keypad', 'Keypad', 'Samsung', 'samsung2a', 'Red', 3, 50000, 150000, 0, '20', 'June 2018', '5b294905cb425_redf1.png', '2018-06-19 11:48:45', '2018-06-19 11:48:45'), (7, 'Keypad', 'Keypad', 'Nokia', 'nokia2n', 'Grey', 3, 50000, 150000, 0, '20', 'June 2018', '5b2949d1ddbca_nokia.png', '2018-06-19 11:52:09', '2018-06-19 11:52:09'), (8, 'Keypad', 'Keypad', 'ZTE', 'zte2z', 'Black', 3, 50000, 150000, 0, '20', 'June 2018', '5b294a4817fa2_kf1.png', '2018-06-19 11:54:08', '2018-06-19 11:54:08'), (9, 'Keypad', 'Keypad', 'JCB', 'jcb2j', 'White', 3, 50000, 150000, 0, '20', 'June 2018', '5b294adf718bf_kf3.png', '2018-06-19 11:56:39', '2018-06-19 11:56:39'), (10, 'Accessory', 'LCD', 'MI', 'mi2lcd', 'Black', 3, 50000, 150000, 1, '20', 'June 2018', '5b294e0e65239_lcd.jpg', '2018-06-19 12:10:14', '2018-06-19 12:10:14'), (11, 'Accessory', 'Battery', 'Huawei', 'huawei2battery', 'Black', 3, 7000, 21000, 1, '20', 'June 2018', '5b294ec25cc1f_b8.jpg', '2018-06-19 12:13:14', '2018-06-19 12:13:14'), (12, 'Accessory', 'Speaker', 'Samsung', 'samsung2speaker', 'Black', 3, 8000, 24000, 1, '20', 'June 2018', '5b294f2b6c664_speaker1.jpg', '2018-06-19 12:14:59', '2018-06-19 12:14:59'), (17, 'Accessory', 'Power Bank', 'MI', 'mi2pb', 'Brown', 3, 10000, 30000, 0, '20', 'June 2018', '5b29c139cf889_p8.jpeg', '2018-06-19 20:21:37', '2018-06-19 20:21:37'), (18, 'Accessory', 'Power Bank', 'Huawei', 'huawei2pb', 'Green', 3, 10000, 30000, 0, '20', 'June 2018', '5b29c18d80d28_p6.jpeg', '2018-06-19 20:23:01', '2018-06-19 20:23:01'), (19, 'Accessory', 'Power Bank', 'Samsung', 'samsung2pb', 'Blue', 3, 10000, 30000, 0, '20', 'June 2018', '5b29c1e9e148f_p9.jpeg', '2018-06-19 20:24:33', '2018-06-19 20:24:33'), (21, 'Accessory', 'Bluetooth Speaker', 'MI', 'mi2bs', 'Purple', 3, 8000, 24000, 0, '20', 'June 2018', '5b29c34bed8a1_bs9.jpeg', '2018-06-19 20:30:27', '2018-06-19 20:30:27'), (22, 'Accessory', 'Bluetooth Speaker', 'Huawei', 'huawei2bs', 'Red', 3, 9000, 27000, 0, '20', 'June 2018', '5b29c38a7de80_bs6.jpeg', '2018-06-19 20:31:30', '2018-06-19 20:31:30'), (23, 'Accessory', 'Bluetooth Speaker', 'Samsung', 'samsung2bs', 'Black', 3, 10000, 30000, 0, '20', 'June 2018', '5b29c3d27609f_bs.jpeg', '2018-06-19 20:32:42', '2018-06-19 20:32:42'), (24, 'Feature', 'Smartwatch', 'MI', 'mi2sw', 'Black', 3, 50000, 150000, 0, '20', 'June 2018', '5b29c72b43ee9_smw2.jpg', '2018-06-19 20:46:59', '2018-06-19 20:46:59'), (25, 'Feature', 'Smartwatch', 'Huawei', 'huawei2sw', 'Green', 3, 40000, 120000, 0, '20', 'June 2018', '5b29c76d84b9b_smw1.png', '2018-06-19 20:48:05', '2018-06-19 20:48:05'), (26, 'Feature', 'Smartwatch', 'Samsung', 'samsung2sw', 'Brown', 3, 60000, 180000, 0, '20', 'June 2018', '5b29c7ba83d30_516cEX6KWPL.jpg', '2018-06-19 20:49:22', '2018-06-19 20:49:22'), (27, 'Feature', 'Gearfit', 'MI', 'mi2gf', 'Pink', 3, 50000, 150000, 0, '20', 'June 2018', '5b29c8547b251_Huawei-Honor-Z1-Smart-Watches-SDL348689156-1-6ed54-500x500.png', '2018-06-19 20:51:56', '2018-06-19 20:51:56'), (28, 'Feature', 'Gearfit', 'Huawei', 'huawei2gf', 'Brown', 3, 50000, 150000, 0, '20', 'June 2018', '5b29c8982b3fc_sgearf1.jpg', '2018-06-19 20:53:04', '2018-06-19 20:53:04'), (29, 'Feature', 'Gearfit', 'Samsung', 'samsung2gf', 'Red', 3, 50000, 150000, 0, '20', 'June 2018', '5b29c8e0e3e0c_GalleryImageR_01.jpeg', '2018-06-19 20:54:16', '2018-06-19 20:54:16'), (30, 'Tablet', 'Tablet', 'MI', 'mi2tablet', 'Rose Gold', 3, 100000, 300000, 0, '20', 'June 2018', '5b29d111305fd_Xiaomi-Mi-Pad-3-I.jpg', '2018-06-19 21:29:13', '2018-06-19 21:29:13'), (31, 'Tablet', 'Tablet', 'Huawei', 'huawei2tablet', 'Grey', 3, 80000, 240000, 0, '20', 'June 2018', '5b29d1948af2d_huaweimediam3lite10_tablet_white.jpg', '2018-06-19 21:31:24', '2018-06-19 21:31:24'), (32, 'Tablet', 'Tablet', 'Samsung', 'samsung2tablet', 'White', 3, 90000, 270000, 0, '20', 'June 2018', '5b29d1e24990b_0fbea5afdca363c4abf9c871b1fd07da.jpg', '2018-06-19 21:32:42', '2018-06-19 21:32:42'), (33, 'Smartphone', 'Smartphone', 'Meizu', 'meizu2m', 'Black', 3, 250000, 750000, 0, '21', 'May 2018', '5b010a8ee336c_768135-meizu-m3-max-3d.png', '2018-05-19 23:11:34', '2018-05-19 23:11:34'); -- -------------------------------------------------------- -- -- Table structure for table `customer_services` -- CREATE TABLE `customer_services` ( `id` int(10) UNSIGNED NOT NULL, `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `brand_id` int(10) UNSIGNED NOT NULL, `model_id` int(10) UNSIGNED NOT NULL, `error` text COLLATE utf8mb4_unicode_ci NOT NULL, `accessory_name` json NOT NULL, `accessory_model_no` json NOT NULL, `day` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `month_year` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `phone_no` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `price` int(11) NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `customer_services` -- INSERT INTO `customer_services` (`id`, `name`, `brand_id`, `model_id`, `error`, `accessory_name`, `accessory_model_no`, `day`, `month_year`, `phone_no`, `price`, `created_at`, `updated_at`) VALUES (1, '<NAME>', 1, 1, 'Battery', '["1"]', '["1"]', '19', 'June 2018', '09452337012', 7000, '2018-06-19 11:29:15', '2018-06-19 11:29:15'), (2, '<NAME>', 2, 10, 'Battery', '["1"]', '["3"]', '19', 'June 2018', '09452337010', 9000, '2018-06-19 12:18:19', '2018-06-19 12:18:19'), (3, '<NAME>', 1, 1, 'LCD', '["2"]', '["2"]', '19', 'June 2018', '09452337018', 70000, '2018-06-19 12:20:10', '2018-06-19 12:20:10'); -- -------------------------------------------------------- -- -- Table structure for table `departments` -- CREATE TABLE `departments` ( `id` int(10) UNSIGNED NOT NULL, `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `departments` -- INSERT INTO `departments` (`id`, `name`, `created_at`, `updated_at`) VALUES (1, 'Sale Department', '2018-06-09 21:05:03', '2018-06-09 21:17:23'), (2, 'Technical Department', '2018-06-09 23:33:37', '2018-06-09 23:33:37'); -- -------------------------------------------------------- -- -- Table structure for table `done_sales` -- CREATE TABLE `done_sales` ( `id` int(10) UNSIGNED NOT NULL, `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `email` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `category_type` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `category` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `brand` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `model` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `color` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `quantity` int(11) NOT NULL, `price` int(11) NOT NULL, `image` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `day` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `month_year` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `done_sales` -- INSERT INTO `done_sales` (`id`, `name`, `email`, `category_type`, `category`, `brand`, `model`, `color`, `quantity`, `price`, `image`, `day`, `month_year`, `created_at`, `updated_at`) VALUES (1, 'Admin', '<EMAIL>', 'Smartphone', 'Smartphone', 'Samsung', 'Galaxy J7', 'Blue', 1, 280000, '5b29414293556_Samsung-C7-Pro-Front-Back.png', '20', 'June 2018', '2018-06-19 22:40:11', '2018-06-19 22:40:11'), (2, '<NAME>', '<EMAIL>', 'Smartphone', 'Smartphone', 'Meizu', 'meizu2m', 'Black', 1, 300000, '5b010a8ee336c_768135-meizu-m3-max-3d.png', '20', 'May 2018', '2018-05-20 00:04:10', '2018-05-20 00:04:10'); -- -------------------------------------------------------- -- -- Table structure for table `employees` -- CREATE TABLE `employees` ( `id` int(10) UNSIGNED NOT NULL, `image` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `nrc` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `account_no` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `department_id` int(10) UNSIGNED NOT NULL, `status_id` int(10) UNSIGNED NOT NULL, `gender` tinyint(1) NOT NULL, `dob` date NOT NULL, `email` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `phone_no` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `address` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `start_date` date NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `employees` -- INSERT INTO `employees` (`id`, `image`, `name`, `nrc`, `account_no`, `department_id`, `status_id`, `gender`, `dob`, `email`, `phone_no`, `address`, `start_date`, `created_at`, `updated_at`) VALUES (5, '5b28b940ca868_user8-128x128.jpg', '<NAME>', '7/YTN(N)-129691', '2345567823454567', 2, 2, 0, '2016-01-03', '<EMAIL>', '09452337012', 'Pyay', '2017-01-01', '2018-06-19 01:35:20', '2018-06-19 01:35:20'); -- -------------------------------------------------------- -- -- Table structure for table `employee_salaries` -- CREATE TABLE `employee_salaries` ( `id` int(10) UNSIGNED NOT NULL, `employee_id` int(10) UNSIGNED NOT NULL, `employee_name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `department_id` int(10) UNSIGNED NOT NULL, `status_id` int(10) UNSIGNED NOT NULL, `salary` int(11) NOT NULL, `month_year` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -------------------------------------------------------- -- -- Table structure for table `featured_details` -- CREATE TABLE `featured_details` ( `id` int(10) UNSIGNED NOT NULL, `category` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `brand` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `model` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `image` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `color` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `price` int(11) NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `featured_details` -- INSERT INTO `featured_details` (`id`, `category`, `brand`, `model`, `image`, `color`, `price`, `created_at`, `updated_at`) VALUES (1, 'Keypad', 'Samsung', 'samsung2a', '5b294945b8546_redf1.png', 'Red', 70000, '2018-06-19 11:49:49', '2018-06-19 11:49:49'), (2, 'Keypad', 'Nokia', 'nokia2n', '5b2949ef92138_nokia.png', 'Grey', 70000, '2018-06-19 11:52:39', '2018-06-19 11:52:39'), (3, 'Keypad', 'ZTE', 'zte2z', '5b294a60e2dbf_kf1.png', 'Black', 70000, '2018-06-19 11:54:32', '2018-06-19 11:54:32'), (4, 'Keypad', 'JCB', 'jcb2j', '5b294b2289c32_kf3.png', 'White', 70000, '2018-06-19 11:57:46', '2018-06-19 11:57:46'), (8, 'Power Bank', 'MI', 'mi2pb', '5b29c152a1ef6_p8.jpeg', 'Brown', 13000, '2018-06-19 20:22:02', '2018-06-19 20:22:02'), (9, 'Power Bank', 'Huawei', 'huawei2pb', '5b29c1a59b916_p6.jpeg', 'Green', 13000, '2018-06-19 20:23:25', '2018-06-19 20:23:25'), (10, 'Power Bank', 'Samsung', 'samsung2pb', '5b29c20590c20_p9.jpeg', 'Blue', 13000, '2018-06-19 20:25:01', '2018-06-19 20:25:01'), (11, 'Bluetooth Speaker', 'MI', 'mi2bs', '5b29c36779831_bs9.jpeg', 'Purple', 10000, '2018-06-19 20:30:55', '2018-06-19 20:30:55'), (12, 'Bluetooth Speaker', 'Huawei', 'huawei2bs', '5b29c3a4564fa_bs6.jpeg', 'Red', 12000, '2018-06-19 20:31:56', '2018-06-19 20:31:56'), (13, 'Bluetooth Speaker', 'Samsung', 'samsung2bs', '5b29c3edb5ab7_bs.jpeg', 'Black', 13000, '2018-06-19 20:33:09', '2018-06-19 20:33:09'), (14, 'Smartwatch', 'MI', 'mi2sw', '5b29c74385533_smw2.jpg', 'Black', 55000, '2018-06-19 20:47:23', '2018-06-19 20:47:23'), (15, 'Smartwatch', 'Huawei', 'huawei2sw', '5b29c78620e8a_smw1.png', 'Green', 45000, '2018-06-19 20:48:30', '2018-06-19 20:48:30'), (16, 'Smartwatch', 'Samsung', 'samsung2sw', '5b29c7dd17ba4_516cEX6KWPL.jpg', 'Brown', 65000, '2018-06-19 20:49:57', '2018-06-19 20:49:57'), (17, 'Gearfit', 'MI', 'mi2gf', '5b29c86d56ef0_Huawei-Honor-Z1-Smart-Watches-SDL348689156-1-6ed54-500x500.png', 'Pink', 55000, '2018-06-19 20:52:21', '2018-06-19 20:52:21'), (18, 'Gearfit', 'Huawei', 'huawei2gf', '5b29c8af94f5a_sgearf1.jpg', 'Brown', 55000, '2018-06-19 20:53:27', '2018-06-19 20:53:27'), (19, 'Gearfit', 'Samsung', 'samsung2gf', '5b29c8f9a7de2_GalleryImageR_01.jpeg', 'Red', 55000, '2018-06-19 20:54:41', '2018-06-19 20:54:41'), (20, 'Tablet', 'Huawei', 'huawei2tablet', '5b29d1b00dfad_huaweimediam3lite10_tablet_white.jpg', 'Grey', 100000, '2018-06-19 21:31:52', '2018-06-19 21:31:52'); -- -------------------------------------------------------- -- -- Table structure for table `migrations` -- CREATE TABLE `migrations` ( `id` int(10) UNSIGNED NOT NULL, `migration` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `batch` int(11) NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `migrations` -- INSERT INTO `migrations` (`id`, `migration`, `batch`) VALUES (103, '2014_10_12_100000_create_password_resets_table', 1), (105, '2018_04_24_181134_create_phone_brands_table', 1), (132, '2018_05_16_072129_create_phone_brands_table', 2), (165, '2018_05_21_182936_create_phone_brands_table', 4), (166, '2018_05_16_072328_create_phone_models_table', 5), (167, '2018_05_16_072832_create_phone_details_table', 6), (168, '2018_05_16_082254_create_service_models_table', 7), (170, '2018_05_16_082658_create_customer_services_table', 9), (194, '2018_06_02_063708_create_phone_details_table', 13), (205, '2018_06_02_064521_create_costs_table', 15), (207, '2018_06_02_070134_create_sale_products_table', 15), (209, '2018_06_02_162841_create_costs_table', 16), (228, '2014_10_12_000000_create_users_table', 17), (229, '2018_04_21_190138_create_roles_table', 17), (230, '2018_05_02_100113_create_role_users_table', 17), (231, '2018_05_22_110605_create_password_resets_table', 17), (232, '2018_06_05_164547_create_categories_table', 18), (233, '2018_06_05_164923_create_phone_brands_table', 19), (234, '2018_06_05_165015_create_sale_models_table', 20), (235, '2018_06_05_165407_create_service_models_table', 21), (236, '2018_06_05_165506_create_phone_services_table', 22), (237, '2018_06_05_171008_create_customer_services_table', 23), (238, '2018_06_05_171129_create_other_costs_table', 24), (239, '2018_06_05_171255_create_service_products_table', 25), (240, '2018_06_05_171701_create_phone_details_table', 26), (241, '2018_06_05_171739_create_sale_products_table', 27), (242, '2018_06_05_171840_create_costs_table', 28), (243, '2018_06_05_172742_create_phone_models_table', 29), (244, '2018_06_05_192214_create_service_products_table', 30), (247, '2018_06_08_084513_create_add_to_carts_table', 31), (248, '2018_06_10_030157_create_departments_table', 32), (249, '2018_06_10_030353_create_statuses_table', 33), (250, '2018_06_10_030618_create_employees_table', 34), (251, '2018_06_10_030718_create_salaries_table', 35), (252, '2018_06_10_031203_create_employee_salaries_table', 36), (254, '2018_06_10_062404_create_absents_table', 37), (255, '2018_06_11_071456_create_sale_products_table', 38), (256, '2018_06_12_183959_create_category_types_table', 39), (257, '2018_06_12_184108_create_sale_products_table', 40), (258, '2018_06_12_184314_create_costs_table', 40), (259, '2018_06_13_110019_create_featured_details_table', 41), (260, '2018_06_14_055656_create_employee_salaries_table', 42), (263, '2018_06_14_100941_create_done_sales_table', 43), (266, '2018_06_18_092116_create_permissions_table', 44), (267, '2018_06_20_044219_create_done_sales_table', 45); -- -------------------------------------------------------- -- -- Table structure for table `other_costs` -- CREATE TABLE `other_costs` ( `id` int(10) UNSIGNED NOT NULL, `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `price` int(11) NOT NULL, `start_day` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `start_month_year` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `expire_day` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `expire_month_year` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `other_costs` -- INSERT INTO `other_costs` (`id`, `name`, `price`, `start_day`, `start_month_year`, `expire_day`, `expire_month_year`, `created_at`, `updated_at`) VALUES (1, 'Seat', 10000, '20', 'May 2018', '20', 'May 2018', '2018-05-20 00:12:45', '2018-05-20 00:12:45'); -- -------------------------------------------------------- -- -- Table structure for table `password_resets` -- CREATE TABLE `password_resets` ( `email` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `token` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -------------------------------------------------------- -- -- Table structure for table `permissions` -- CREATE TABLE `permissions` ( `id` int(10) UNSIGNED NOT NULL, `permissions` json NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `permissions` -- INSERT INTO `permissions` (`id`, `permissions`, `created_at`, `updated_at`) VALUES (3, '{"show-salary": true, "show-status": true, "delete-salary": true, "delete-status": true, "show-employee": true, "update-salary": true, "update-status": true, "create-employee": true, "delete-employee": true, "show-department": true, "update-employee": true, "show-doneservice": true, "delete-department": true, "show-employeelist": true, "show-phoneservice": true, "update-department": true, "delete-doneservice": true, "update-doneservice": true, "delete-employeelist": true, "delete-phoneservice": true, "show-employeesalary": true, "show-serviceproduct": true, "update-employeelist": true, "update-phoneservice": true, "delete-employeesalary": true, "delete-serviceproduct": true, "update-employeesalary": true, "update-serviceproduct": true}', '2018-06-18 18:33:22', '2018-06-18 18:33:22'); -- -------------------------------------------------------- -- -- Table structure for table `phone_brands` -- CREATE TABLE `phone_brands` ( `id` int(10) UNSIGNED NOT NULL, `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `phone_brands` -- INSERT INTO `phone_brands` (`id`, `name`, `created_at`, `updated_at`) VALUES (1, 'MI', '2018-06-05 10:50:20', '2018-06-05 10:50:20'), (2, 'Huawei', '2018-06-05 12:02:05', '2018-06-05 12:02:05'), (8, 'Samsung', '2018-06-07 10:45:47', '2018-06-07 10:45:47'), (9, 'Oppo', '2018-06-11 00:55:03', '2018-06-11 00:55:03'), (11, 'Nokia', '2018-06-15 22:10:16', '2018-06-15 22:10:16'), (14, 'JCB', '2018-06-15 23:27:49', '2018-06-15 23:27:49'), (15, 'ZTE', '2018-06-19 01:59:01', '2018-06-19 01:59:01'), (16, 'Honor', '2018-06-19 10:54:36', '2018-06-19 10:54:36'), (17, 'Meizu', '2018-05-19 23:09:59', '2018-05-19 23:09:59'); -- -------------------------------------------------------- -- -- Table structure for table `phone_details` -- CREATE TABLE `phone_details` ( `id` int(10) UNSIGNED NOT NULL, `brand` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `category` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `image` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `model` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `display` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `network` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `connection` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `front_camera` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `back_camera` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `android_version` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `color` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `storage` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `RAM` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `price` int(11) NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `phone_details` -- INSERT INTO `phone_details` (`id`, `brand`, `category`, `image`, `model`, `display`, `network`, `connection`, `front_camera`, `back_camera`, `android_version`, `color`, `storage`, `RAM`, `price`, `created_at`, `updated_at`) VALUES (1, 'MI', 'Smartphone', '5b293eb14d3cb_mi1.png', 'Note 4x', '100x50', '4G', 'Dual', '4MP', '2MP', '6.0.0', 'Rose Gold', '32GB', '4GB', 250000, '2018-06-19 11:04:41', '2018-06-19 11:04:41'), (2, 'Honor', 'Smartphone', '5b29407d811ee_hua.png', 'H7', '100x50', '4G', 'Dual', '4MP', '2MP', '6.0.0', 'Blue', '32GB', '4GB', 350000, '2018-06-19 11:12:21', '2018-06-19 11:12:21'), (3, 'Oppo', 'Smartphone', '5b2940c6ef8cd_yoyo_5-500x500.png', 'Neo 5', '100x50', '4G', 'Dual', '4MP', '2MP', '6.0.0', 'Black', '32GB', '4GB', 170000, '2018-06-19 11:13:34', '2018-06-19 11:13:34'), (4, 'Samsung', 'Smartphone', '5b29416b387a8_Samsung-C7-Pro-Front-Back.png', 'Galaxy J7', '100x50', '4G', 'Dual', '4MP', '2MP', '6.0.0', 'Blue', '32GB', '4GB', 280000, '2018-06-19 11:16:19', '2018-06-19 11:16:19'), (5, 'MI', 'Tablet', '5b29d16f0dabd_Xiaomi-Mi-Pad-3-I.jpg', 'mi2tablet', '200x100', '4G', 'Dual', '4MP', '2MP', '6.0.0', 'Rose Gold', '32GB', '4GB', 130000, '2018-06-19 21:30:47', '2018-06-19 21:30:47'), (6, 'Samsung', 'Tablet', '5b29d225a7175_0fbea5afdca363c4abf9c871b1fd07da.jpg', 'samsung2tablet', '200x100', '4G', 'Dual', '4MP', '2MP', '6.0.0', 'White', '32GB', '4GB', 120000, '2018-06-19 21:33:49', '2018-06-19 21:33:49'), (7, 'Meizu', 'Smartphone', '5b010ac512f70_768135-meizu-m3-max-3d.png', 'meizu2m', '100x50', '4G', 'Dual', '4MP', '2MP', '6.0.0', 'Black', '32GB', '4GB', 300000, '2018-05-19 23:12:29', '2018-05-19 23:12:29'); -- -------------------------------------------------------- -- -- Table structure for table `phone_models` -- CREATE TABLE `phone_models` ( `id` int(10) UNSIGNED NOT NULL, `category_id` int(10) UNSIGNED NOT NULL, `brand_id` int(10) UNSIGNED NOT NULL, `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `phone_models` -- INSERT INTO `phone_models` (`id`, `category_id`, `brand_id`, `name`, `created_at`, `updated_at`) VALUES (1, 1, 1, 'Note 4x', '2018-06-19 11:01:55', '2018-06-19 11:01:55'), (2, 1, 16, 'H7', '2018-06-19 11:07:15', '2018-06-19 11:07:15'), (3, 1, 9, 'Neo 5', '2018-06-19 11:08:58', '2018-06-19 11:08:58'), (4, 1, 8, 'Galaxy S8', '2018-06-19 11:11:01', '2018-06-19 11:11:01'), (5, 1, 8, 'Galaxy J7', '2018-06-19 11:14:39', '2018-06-19 11:14:39'), (6, 2, 8, 'samsung2a', '2018-06-19 11:47:09', '2018-06-19 11:47:09'), (7, 2, 11, 'nokia2n', '2018-06-19 11:50:36', '2018-06-19 11:50:36'), (8, 2, 15, 'zte2z', '2018-06-19 11:53:15', '2018-06-19 11:53:15'), (9, 2, 14, 'jcb2j', '2018-06-19 11:55:18', '2018-06-19 11:55:18'), (10, 13, 1, 'mi2lcd', '2018-06-19 12:07:55', '2018-06-19 12:07:55'), (11, 4, 2, 'huawei2battery', '2018-06-19 12:12:06', '2018-06-19 12:12:06'), (12, 12, 8, 'samsung2speaker', '2018-06-19 12:13:59', '2018-06-19 12:13:59'), (13, 7, 1, 'mi2pb', '2018-06-19 20:09:26', '2018-06-19 20:09:26'), (14, 7, 2, 'huawei2pb', '2018-06-19 20:11:28', '2018-06-19 20:11:28'), (15, 7, 8, 'samsung2pb', '2018-06-19 20:13:50', '2018-06-19 20:13:50'), (16, 14, 1, 'mi2bs', '2018-06-19 20:27:39', '2018-06-19 20:27:39'), (17, 14, 2, 'huawei2bs', '2018-06-19 20:27:53', '2018-06-19 20:27:53'), (18, 14, 8, 'samsung2bs', '2018-06-19 20:28:03', '2018-06-19 20:28:03'), (19, 8, 1, 'mi2sw', '2018-06-19 20:44:43', '2018-06-19 20:44:43'), (20, 8, 2, 'huawei2sw', '2018-06-19 20:45:12', '2018-06-19 20:45:12'), (21, 8, 8, 'samsung2sw', '2018-06-19 20:45:36', '2018-06-19 20:45:36'), (22, 9, 1, 'mi2gf', '2018-06-19 20:50:21', '2018-06-19 20:50:21'), (23, 9, 2, 'huawei2gf', '2018-06-19 20:50:45', '2018-06-19 20:50:45'), (24, 9, 8, 'samsung2gf', '2018-06-19 20:51:07', '2018-06-19 20:51:07'), (25, 3, 1, 'mi2tablet', '2018-06-19 21:25:51', '2018-06-19 21:25:51'), (26, 3, 2, 'huawei2tablet', '2018-06-19 21:26:52', '2018-06-19 21:26:52'), (27, 3, 8, 'samsung2tablet', '2018-06-19 21:27:36', '2018-06-19 21:27:36'), (28, 1, 17, 'meizu2m', '2018-05-19 23:10:28', '2018-05-19 23:10:28'); -- -------------------------------------------------------- -- -- Table structure for table `phone_services` -- CREATE TABLE `phone_services` ( `id` int(10) UNSIGNED NOT NULL, `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `brand_id` int(10) UNSIGNED NOT NULL, `model_id` int(10) UNSIGNED NOT NULL, `error` text COLLATE utf8mb4_unicode_ci NOT NULL, `accessory_name` json NOT NULL, `accessory_model_no` json NOT NULL, `start_date` date NOT NULL, `expire_date` date NOT NULL, `phone_no` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `price` int(11) NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -------------------------------------------------------- -- -- Table structure for table `roles` -- CREATE TABLE `roles` ( `id` int(10) UNSIGNED NOT NULL, `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `slug` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `permissions` json NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `roles` -- INSERT INTO `roles` (`id`, `name`, `slug`, `permissions`, `created_at`, `updated_at`) VALUES (1, 'Human Resource', 'human resource', '{"show-salary": "true", "show-status": "true", "delete-salary": "true", "delete-status": "true", "show-employee": "true", "update-salary": "true", "update-status": "true", "delete-employee": "true", "show-department": "true", "update-employee": "true", "delete-department": "true", "show-employeelist": "true", "update-department": "true", "delete-employeelist": "true", "show-employeesalary": "true", "update-employeelist": "true", "delete-employeesalary": "true", "update-employeesalary": "true"}', '2018-06-18 08:01:00', '2018-06-18 08:01:00'), (2, 'Service Provider', 'service provider', '{"show-doneservice": "true", "show-phoneservice": "true", "delete-doneservice": "true", "update-doneservice": "true", "delete-phoneservice": "true", "show-serviceproduct": "true", "update-phoneservice": "true", "delete-serviceproduct": "true", "update-serviceproduct": "true"}', '2018-06-18 08:03:36', '2018-06-18 08:03:36'), (3, 'Admin', 'admin', '{"show-salary": "true", "show-status": "true", "delete-salary": "true", "delete-status": "true", "show-employee": "true", "update-salary": "true", "update-status": "true", "create-employee": "true", "delete-employee": "true", "show-department": "true", "update-employee": "true", "show-doneservice": "true", "delete-department": "true", "show-employeelist": "true", "show-phoneservice": "true", "update-department": "true", "delete-doneservice": "true", "update-doneservice": "true", "delete-employeelist": "true", "delete-phoneservice": "true", "show-employeesalary": "true", "show-serviceproduct": "true", "update-employeelist": "true", "update-phoneservice": "true", "delete-employeesalary": "true", "delete-serviceproduct": "true", "update-employeesalary": "true", "update-serviceproduct": "true"}', '2018-06-18 17:02:41', '2018-06-18 17:02:41'); -- -------------------------------------------------------- -- -- Table structure for table `role_users` -- CREATE TABLE `role_users` ( `id` int(10) UNSIGNED NOT NULL, `user_id` int(10) UNSIGNED NOT NULL, `role_id` int(10) UNSIGNED NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `role_users` -- INSERT INTO `role_users` (`id`, `user_id`, `role_id`, `created_at`, `updated_at`) VALUES (1, 1, 2, NULL, NULL), (2, 3, 1, NULL, NULL), (4, 2, 3, NULL, NULL); -- -------------------------------------------------------- -- -- Table structure for table `salaries` -- CREATE TABLE `salaries` ( `id` int(10) UNSIGNED NOT NULL, `department_id` int(10) UNSIGNED NOT NULL, `status_id` int(10) UNSIGNED NOT NULL, `salary` int(11) NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `salaries` -- INSERT INTO `salaries` (`id`, `department_id`, `status_id`, `salary`, `created_at`, `updated_at`) VALUES (1, 2, 2, 200000, '2018-06-19 01:36:14', '2018-06-19 01:36:14'); -- -------------------------------------------------------- -- -- Table structure for table `sale_products` -- CREATE TABLE `sale_products` ( `id` int(10) UNSIGNED NOT NULL, `category_type` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `category` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `brand` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `model` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `color` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `quantity` int(11) NOT NULL, `image` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `sale_products` -- INSERT INTO `sale_products` (`id`, `category_type`, `category`, `brand`, `model`, `color`, `quantity`, `image`, `created_at`, `updated_at`) VALUES (1, 'Smartphone', 'Smartphone', 'MI', 'Note 4x', 'Rose Gold', 3, '5b293e4fe5dd8_mi1.png', '2018-06-19 11:03:04', '2018-06-19 11:03:04'), (2, 'Smartphone', 'Smartphone', 'Honor', 'H7', 'Blue', 3, '5b293f8e1b862_hua.png', '2018-06-19 11:08:22', '2018-06-19 11:22:04'), (3, 'Smartphone', 'Smartphone', 'Oppo', 'Neo 5', 'Black', 3, '5b293ff46acd3_yoyo_5-500x500.png', '2018-06-19 11:10:04', '2018-06-19 11:10:04'), (4, 'Smartphone', 'Smartphone', 'Samsung', 'Galaxy J7', 'Blue', 2, '5b29414293556_Samsung-C7-Pro-Front-Back.png', '2018-06-19 11:15:38', '2018-06-19 22:40:11'), (5, 'Keypad', 'Keypad', 'Samsung', 'samsung2a', 'Red', 3, '5b294905cb425_redf1.png', '2018-06-19 11:48:45', '2018-06-19 11:58:47'), (6, 'Keypad', 'Keypad', 'Nokia', 'nokia2n', 'Grey', 3, '5b2949d1ddbca_nokia.png', '2018-06-19 11:52:09', '2018-06-19 11:52:09'), (7, 'Keypad', 'Keypad', 'ZTE', 'zte2z', 'Black', 3, '5b294a4817fa2_kf1.png', '2018-06-19 11:54:08', '2018-06-19 11:54:08'), (8, 'Keypad', 'Keypad', 'JCB', 'jcb2j', 'White', 3, '5b294adf718bf_kf3.png', '2018-06-19 11:56:39', '2018-06-19 11:56:39'), (13, 'Accessory', 'Power Bank', 'MI', 'mi2pb', 'Brown', 3, '5b29c139cf889_p8.jpeg', '2018-06-19 20:21:37', '2018-06-19 20:21:37'), (14, 'Accessory', 'Power Bank', 'Huawei', 'huawei2pb', 'Green', 3, '5b29c18d80d28_p6.jpeg', '2018-06-19 20:23:01', '2018-06-19 20:23:01'), (15, 'Accessory', 'Power Bank', 'Samsung', 'samsung2pb', 'Blue', 3, '5b29c1e9e148f_p9.jpeg', '2018-06-19 20:24:33', '2018-06-19 20:24:33'), (17, 'Accessory', 'Bluetooth Speaker', 'MI', 'mi2bs', 'Purple', 3, '5b29c34bed8a1_bs9.jpeg', '2018-06-19 20:30:28', '2018-06-19 20:30:28'), (18, 'Accessory', 'Bluetooth Speaker', 'Huawei', 'huawei2bs', 'Red', 3, '5b29c38a7de80_bs6.jpeg', '2018-06-19 20:31:30', '2018-06-19 20:31:30'), (19, 'Accessory', 'Bluetooth Speaker', 'Samsung', 'samsung2bs', 'Black', 3, '5b29c3d27609f_bs.jpeg', '2018-06-19 20:32:42', '2018-06-19 20:32:42'), (20, 'Feature', 'Smartwatch', 'MI', 'mi2sw', 'Black', 3, '5b29c72b43ee9_smw2.jpg', '2018-06-19 20:46:59', '2018-06-19 20:46:59'), (21, 'Feature', 'Smartwatch', 'Huawei', 'huawei2sw', 'Green', 3, '5b29c76d84b9b_smw1.png', '2018-06-19 20:48:05', '2018-06-19 20:48:05'), (22, 'Feature', 'Smartwatch', 'Samsung', 'samsung2sw', 'Brown', 3, '5b29c7ba83d30_516cEX6KWPL.jpg', '2018-06-19 20:49:22', '2018-06-19 20:49:22'), (23, 'Feature', 'Gearfit', 'MI', 'mi2gf', 'Pink', 3, '5b29c8547b251_Huawei-Honor-Z1-Smart-Watches-SDL348689156-1-6ed54-500x500.png', '2018-06-19 20:51:56', '2018-06-19 20:51:56'), (24, 'Feature', 'Gearfit', 'Huawei', 'huawei2gf', 'Brown', 3, '5b29c8982b3fc_sgearf1.jpg', '2018-06-19 20:53:04', '2018-06-19 20:53:04'), (25, 'Feature', 'Gearfit', 'Samsung', 'samsung2gf', 'Red', 3, '5b29c8e0e3e0c_GalleryImageR_01.jpeg', '2018-06-19 20:54:17', '2018-06-19 20:54:17'), (26, 'Tablet', 'Tablet', 'MI', 'mi2tablet', 'Rose Gold', 3, '5b29d111305fd_Xiaomi-Mi-Pad-3-I.jpg', '2018-06-19 21:29:13', '2018-06-19 21:29:13'), (27, 'Tablet', 'Tablet', 'Huawei', 'huawei2tablet', 'Grey', 3, '5b29d1948af2d_huaweimediam3lite10_tablet_white.jpg', '2018-06-19 21:31:24', '2018-06-19 21:31:24'), (28, 'Tablet', 'Tablet', 'Samsung', 'samsung2tablet', 'White', 3, '5b29d1e24990b_0fbea5afdca363c4abf9c871b1fd07da.jpg', '2018-06-19 21:32:42', '2018-06-19 21:32:42'), (29, 'Smartphone', 'Smartphone', 'Meizu', 'meizu2m', 'Black', 2, '5b010a8ee336c_768135-meizu-m3-max-3d.png', '2018-05-19 23:11:35', '2018-05-20 00:04:07'); -- -------------------------------------------------------- -- -- Table structure for table `service_models` -- CREATE TABLE `service_models` ( `id` int(10) UNSIGNED NOT NULL, `brand_id` int(10) UNSIGNED NOT NULL, `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `service_models` -- INSERT INTO `service_models` (`id`, `brand_id`, `name`, `created_at`, `updated_at`) VALUES (1, 1, 'Note 4x', NULL, NULL), (5, 1, 'Battery 1', NULL, NULL), (8, 2, 'Battery 2', '2018-06-19 00:33:44', '2018-06-19 00:33:44'), (9, 9, 'Neo 5', '2018-06-19 00:48:41', '2018-06-19 00:48:41'), (10, 2, 'P9', '2018-06-19 00:55:07', '2018-06-19 00:55:07'), (11, 8, 'Galaxy J7', '2018-06-19 00:59:12', '2018-06-19 00:59:12'), (12, 11, 'Nokia 1', '2018-06-19 02:00:36', '2018-06-19 02:00:36'), (13, 11, 'Nokia 2', '2018-06-19 02:00:48', '2018-06-19 02:00:48'), (14, 11, 'Nokia 3', '2018-06-19 02:00:57', '2018-06-19 02:00:57'), (15, 15, 'ZTE 1', '2018-06-19 02:01:49', '2018-06-19 02:01:49'), (16, 15, 'ZTE 2', '2018-06-19 02:01:57', '2018-06-19 02:01:57'), (17, 14, 'JCB 1', '2018-06-19 02:02:56', '2018-06-19 02:02:56'), (18, 14, 'JCB 2', '2018-06-19 02:03:08', '2018-06-19 02:03:08'), (19, 1, 'MI Earphone 1', '2018-06-19 02:31:08', '2018-06-19 02:31:08'), (20, 1, 'MI Earphone 2', '2018-06-19 02:32:25', '2018-06-19 02:32:25'), (21, 2, 'Huawei Earphone 1', '2018-06-19 02:35:20', '2018-06-19 02:35:20'), (22, 2, 'Huawei Earphone 2', '2018-06-19 02:35:36', '2018-06-19 02:35:36'), (23, 1, 'MI Smartwatch 1', '2018-06-19 02:45:49', '2018-06-19 02:45:49'), (24, 2, 'Huawei Smartwatch 1', '2018-06-19 02:46:46', '2018-06-19 02:46:46'), (25, 8, 'Samsung Smartwatch 1', '2018-06-19 02:47:34', '2018-06-19 02:47:34'), (26, 1, 'MI Tablet 1', '2018-06-19 03:23:55', '2018-06-19 03:23:55'), (27, 2, 'Huawei Tablet 1', '2018-06-19 03:27:24', '2018-06-19 03:27:24'), (28, 8, 'Samsung Tablet 1', '2018-06-19 03:29:54', '2018-06-19 03:29:54'), (29, 16, 'H7', '2018-06-19 10:55:01', '2018-06-19 10:55:01'), (30, 8, 'samsung2a', '2018-06-19 11:45:40', '2018-06-19 11:45:40'), (31, 11, 'nokia2n', '2018-06-19 11:50:19', '2018-06-19 11:50:19'), (32, 15, 'zte2z', '2018-06-19 11:53:00', '2018-06-19 11:53:00'), (33, 14, 'jcb2j', '2018-06-19 11:54:55', '2018-06-19 11:54:55'), (34, 1, 'mi2lcd', '2018-06-19 12:07:37', '2018-06-19 12:07:37'), (35, 1, 'mi2lcd', '2018-06-19 12:07:37', '2018-06-19 12:07:37'), (36, 2, 'huawei2battery', '2018-06-19 12:10:49', '2018-06-19 12:10:49'), (37, 8, 'samsung2speaker', '2018-06-19 12:13:38', '2018-06-19 12:13:38'), (38, 1, 'mi2pb', '2018-06-19 20:09:12', '2018-06-19 20:09:12'), (39, 2, 'huawei2pb', '2018-06-19 20:11:13', '2018-06-19 20:11:13'), (40, 8, 'samsung2pb', '2018-06-19 20:13:35', '2018-06-19 20:13:35'), (41, 1, 'mi2bs', '2018-06-19 20:25:44', '2018-06-19 20:25:44'), (42, 2, 'huawei2bs', '2018-06-19 20:27:11', '2018-06-19 20:27:11'), (43, 8, 'samsung2bs', '2018-06-19 20:27:24', '2018-06-19 20:27:24'), (44, 1, 'mi2sw', '2018-06-19 20:44:27', '2018-06-19 20:44:27'), (45, 2, 'huawei2sw', '2018-06-19 20:44:58', '2018-06-19 20:44:58'), (46, 8, 'samsung2sw', '2018-06-19 20:45:47', '2018-06-19 20:45:47'), (47, 1, 'mi2gf', '2018-06-19 20:50:10', '2018-06-19 20:50:10'), (48, 2, 'huawei2gf', '2018-06-19 20:50:31', '2018-06-19 20:50:31'), (49, 8, 'samsung2gf', '2018-06-19 20:50:55', '2018-06-19 20:50:55'), (50, 1, 'mi2tablet', '2018-06-19 21:25:25', '2018-06-19 21:25:25'), (51, 2, 'huawei2tablet', '2018-06-19 21:26:37', '2018-06-19 21:26:37'), (52, 8, 'samsung2tablet', '2018-06-19 21:27:15', '2018-06-19 21:27:15'), (53, 17, 'meizu2m', '2018-05-19 23:10:13', '2018-05-19 23:10:13'); -- -------------------------------------------------------- -- -- Table structure for table `service_products` -- CREATE TABLE `service_products` ( `id` int(10) UNSIGNED NOT NULL, `category` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `brand` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `model` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `color` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `quantity` int(11) NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `service_products` -- INSERT INTO `service_products` (`id`, `category`, `brand`, `model`, `color`, `quantity`, `created_at`, `updated_at`) VALUES (1, 'Battery', 'MI', 'Battery 1', 'Black', 2, '2018-06-19 11:26:17', '2018-06-19 11:29:15'), (2, 'LCD', 'MI', 'mi2lcd', 'Black', 2, '2018-06-19 12:10:14', '2018-06-19 12:20:10'), (3, 'Battery', 'Huawei', 'huawei2battery', 'Black', 2, '2018-06-19 12:13:14', '2018-06-19 12:18:19'), (4, 'Speaker', 'Samsung', 'samsung2speaker', 'Black', 3, '2018-06-19 12:14:59', '2018-06-19 12:14:59'); -- -------------------------------------------------------- -- -- Table structure for table `statuses` -- CREATE TABLE `statuses` ( `id` int(10) UNSIGNED NOT NULL, `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `statuses` -- INSERT INTO `statuses` (`id`, `name`, `created_at`, `updated_at`) VALUES (1, 'Sale Staff', '2018-06-09 21:22:11', '2018-06-09 21:33:11'), (2, 'Technician', '2018-06-09 23:34:33', '2018-06-09 23:34:33'); -- -------------------------------------------------------- -- -- Table structure for table `users` -- CREATE TABLE `users` ( `id` int(10) UNSIGNED NOT NULL, `name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `email` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `password` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL, `is_admin` tinyint(1) NOT NULL DEFAULT '0', `remember_token` varchar(100) COLLATE utf8mb4_unicode_ci DEFAULT NULL, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; -- -- Dumping data for table `users` -- INSERT INTO `users` (`id`, `name`, `email`, `password`, `is_admin`, `remember_token`, `created_at`, `updated_at`) VALUES (1, 'Service Provider', '<EMAIL>', <PASSWORD>', 0, 'ZT5GNVTIBacbuZKQGlVOyHXpd7SDRt0NtjII467PWuC67K3KqSZcgySROeoU', '2018-06-06 03:43:39', '2018-06-06 03:43:39'), (2, 'Admin', '<EMAIL>', <PASSWORD>iV9OUD2Q8mToZfkGsvH4uEaOSowm/zMvVeEgBJXKn5WkD8E1DYk.', 1, 'fhhw2ca2BRXR9O4fWCfk6WZ4Krowl5b4bF1GwvTtEGdyXzT1Rt3QyRUH7ukm', '2018-06-18 00:22:10', '2018-06-18 00:22:10'), (3, 'Human Resource', '<EMAIL>', <PASSWORD>59s1IRkoF1ZZh6p7Sv.J3m6nI2G0z4t4.ynRN1U720gpYh7LHW', 0, '3qvOv9Zf4StmFeemelbfkpVEzdkaz2B5TbmkXRY9B2Z8PoTFUTqvb2tjeH15', '2018-06-18 11:11:20', '2018-06-18 11:11:20'), (4, '<NAME>', '<EMAIL>', <PASSWORD>$ABFld1<PASSWORD>IhoE.zdEFi<PASSWORD>O2Ejmv7lmcaSXBBsCv12Yju<PASSWORD>Qpq', 0, 'y7Qyj37xB5P27EM3yalb2CosakkCHWQw8FU7QmC7y21cSoNgUWhGjf7u3TwJ', '2018-06-19 01:05:12', '2018-06-19 01:05:12'); -- -- Indexes for dumped tables -- -- -- Indexes for table `absents` -- ALTER TABLE `absents` ADD PRIMARY KEY (`id`); -- -- Indexes for table `add_to_carts` -- ALTER TABLE `add_to_carts` ADD PRIMARY KEY (`id`); -- -- Indexes for table `categories` -- ALTER TABLE `categories` ADD PRIMARY KEY (`id`); -- -- Indexes for table `category_types` -- ALTER TABLE `category_types` ADD PRIMARY KEY (`id`); -- -- Indexes for table `costs` -- ALTER TABLE `costs` ADD PRIMARY KEY (`id`); -- -- Indexes for table `customer_services` -- ALTER TABLE `customer_services` ADD PRIMARY KEY (`id`), ADD KEY `customer_services_brand_id_foreign` (`brand_id`), ADD KEY `customer_services_model_id_foreign` (`model_id`); -- -- Indexes for table `departments` -- ALTER TABLE `departments` ADD PRIMARY KEY (`id`); -- -- Indexes for table `done_sales` -- ALTER TABLE `done_sales` ADD PRIMARY KEY (`id`); -- -- Indexes for table `employees` -- ALTER TABLE `employees` ADD PRIMARY KEY (`id`), ADD KEY `employees_department_id_foreign` (`department_id`), ADD KEY `employees_status_id_foreign` (`status_id`); -- -- Indexes for table `employee_salaries` -- ALTER TABLE `employee_salaries` ADD PRIMARY KEY (`id`), ADD KEY `employee_salaries_employee_id_foreign` (`employee_id`), ADD KEY `employee_salaries_department_id_foreign` (`department_id`), ADD KEY `employee_salaries_status_id_foreign` (`status_id`); -- -- Indexes for table `featured_details` -- ALTER TABLE `featured_details` ADD PRIMARY KEY (`id`); -- -- Indexes for table `migrations` -- ALTER TABLE `migrations` ADD PRIMARY KEY (`id`); -- -- Indexes for table `other_costs` -- ALTER TABLE `other_costs` ADD PRIMARY KEY (`id`); -- -- Indexes for table `password_resets` -- ALTER TABLE `password_resets` ADD KEY `password_resets_email_index` (`email`); -- -- Indexes for table `permissions` -- ALTER TABLE `permissions` ADD PRIMARY KEY (`id`); -- -- Indexes for table `phone_brands` -- ALTER TABLE `phone_brands` ADD PRIMARY KEY (`id`); -- -- Indexes for table `phone_details` -- ALTER TABLE `phone_details` ADD PRIMARY KEY (`id`); -- -- Indexes for table `phone_models` -- ALTER TABLE `phone_models` ADD PRIMARY KEY (`id`), ADD KEY `phone_models_category_id_foreign` (`category_id`), ADD KEY `phone_models_brand_id_foreign` (`brand_id`); -- -- Indexes for table `phone_services` -- ALTER TABLE `phone_services` ADD PRIMARY KEY (`id`), ADD KEY `phone_services_brand_id_foreign` (`brand_id`), ADD KEY `phone_services_model_id_foreign` (`model_id`); -- -- Indexes for table `roles` -- ALTER TABLE `roles` ADD PRIMARY KEY (`id`), ADD UNIQUE KEY `roles_slug_unique` (`slug`); -- -- Indexes for table `role_users` -- ALTER TABLE `role_users` ADD PRIMARY KEY (`id`), ADD UNIQUE KEY `role_users_user_id_role_id_unique` (`user_id`,`role_id`), ADD KEY `role_users_role_id_foreign` (`role_id`); -- -- Indexes for table `salaries` -- ALTER TABLE `salaries` ADD PRIMARY KEY (`id`), ADD KEY `salaries_department_id_foreign` (`department_id`), ADD KEY `salaries_status_id_foreign` (`status_id`); -- -- Indexes for table `sale_products` -- ALTER TABLE `sale_products` ADD PRIMARY KEY (`id`); -- -- Indexes for table `service_models` -- ALTER TABLE `service_models` ADD PRIMARY KEY (`id`), ADD KEY `service_models_brand_id_foreign` (`brand_id`); -- -- Indexes for table `service_products` -- ALTER TABLE `service_products` ADD PRIMARY KEY (`id`); -- -- Indexes for table `statuses` -- ALTER TABLE `statuses` ADD PRIMARY KEY (`id`); -- -- Indexes for table `users` -- ALTER TABLE `users` ADD PRIMARY KEY (`id`), ADD UNIQUE KEY `users_email_unique` (`email`); -- -- AUTO_INCREMENT for dumped tables -- -- -- AUTO_INCREMENT for table `absents` -- ALTER TABLE `absents` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT; -- -- AUTO_INCREMENT for table `add_to_carts` -- ALTER TABLE `add_to_carts` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT; -- -- AUTO_INCREMENT for table `categories` -- ALTER TABLE `categories` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=15; -- -- AUTO_INCREMENT for table `category_types` -- ALTER TABLE `category_types` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=6; -- -- AUTO_INCREMENT for table `costs` -- ALTER TABLE `costs` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=34; -- -- AUTO_INCREMENT for table `customer_services` -- ALTER TABLE `customer_services` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=4; -- -- AUTO_INCREMENT for table `departments` -- ALTER TABLE `departments` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=3; -- -- AUTO_INCREMENT for table `done_sales` -- ALTER TABLE `done_sales` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=3; -- -- AUTO_INCREMENT for table `employees` -- ALTER TABLE `employees` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=6; -- -- AUTO_INCREMENT for table `employee_salaries` -- ALTER TABLE `employee_salaries` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT; -- -- AUTO_INCREMENT for table `featured_details` -- ALTER TABLE `featured_details` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=21; -- -- AUTO_INCREMENT for table `migrations` -- ALTER TABLE `migrations` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=268; -- -- AUTO_INCREMENT for table `other_costs` -- ALTER TABLE `other_costs` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=2; -- -- AUTO_INCREMENT for table `permissions` -- ALTER TABLE `permissions` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=4; -- -- AUTO_INCREMENT for table `phone_brands` -- ALTER TABLE `phone_brands` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=18; -- -- AUTO_INCREMENT for table `phone_details` -- ALTER TABLE `phone_details` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=8; -- -- AUTO_INCREMENT for table `phone_models` -- ALTER TABLE `phone_models` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=29; -- -- AUTO_INCREMENT for table `phone_services` -- ALTER TABLE `phone_services` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT; -- -- AUTO_INCREMENT for table `roles` -- ALTER TABLE `roles` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=4; -- -- AUTO_INCREMENT for table `role_users` -- ALTER TABLE `role_users` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=5; -- -- AUTO_INCREMENT for table `salaries` -- ALTER TABLE `salaries` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=2; -- -- AUTO_INCREMENT for table `sale_products` -- ALTER TABLE `sale_products` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=30; -- -- AUTO_INCREMENT for table `service_models` -- ALTER TABLE `service_models` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=54; -- -- AUTO_INCREMENT for table `service_products` -- ALTER TABLE `service_products` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=5; -- -- AUTO_INCREMENT for table `statuses` -- ALTER TABLE `statuses` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=3; -- -- AUTO_INCREMENT for table `users` -- ALTER TABLE `users` MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=5; -- -- Constraints for dumped tables -- -- -- Constraints for table `customer_services` -- ALTER TABLE `customer_services` ADD CONSTRAINT `customer_services_brand_id_foreign` FOREIGN KEY (`brand_id`) REFERENCES `phone_brands` (`id`), ADD CONSTRAINT `customer_services_model_id_foreign` FOREIGN KEY (`model_id`) REFERENCES `service_models` (`id`); -- -- Constraints for table `employees` -- ALTER TABLE `employees` ADD CONSTRAINT `employees_department_id_foreign` FOREIGN KEY (`department_id`) REFERENCES `departments` (`id`) ON DELETE CASCADE, ADD CONSTRAINT `employees_status_id_foreign` FOREIGN KEY (`status_id`) REFERENCES `statuses` (`id`) ON DELETE CASCADE; -- -- Constraints for table `employee_salaries` -- ALTER TABLE `employee_salaries` ADD CONSTRAINT `employee_salaries_department_id_foreign` FOREIGN KEY (`department_id`) REFERENCES `departments` (`id`) ON DELETE CASCADE, ADD CONSTRAINT `employee_salaries_employee_id_foreign` FOREIGN KEY (`employee_id`) REFERENCES `employees` (`id`) ON DELETE CASCADE, ADD CONSTRAINT `employee_salaries_status_id_foreign` FOREIGN KEY (`status_id`) REFERENCES `statuses` (`id`) ON DELETE CASCADE; -- -- Constraints for table `phone_models` -- ALTER TABLE `phone_models` ADD CONSTRAINT `phone_models_brand_id_foreign` FOREIGN KEY (`brand_id`) REFERENCES `phone_brands` (`id`) ON DELETE CASCADE, ADD CONSTRAINT `phone_models_category_id_foreign` FOREIGN KEY (`category_id`) REFERENCES `categories` (`id`) ON DELETE CASCADE; -- -- Constraints for table `phone_services` -- ALTER TABLE `phone_services` ADD CONSTRAINT `phone_services_brand_id_foreign` FOREIGN KEY (`brand_id`) REFERENCES `phone_brands` (`id`), ADD CONSTRAINT `phone_services_model_id_foreign` FOREIGN KEY (`model_id`) REFERENCES `service_models` (`id`); -- -- Constraints for table `role_users` -- ALTER TABLE `role_users` ADD CONSTRAINT `role_users_role_id_foreign` FOREIGN KEY (`role_id`) REFERENCES `roles` (`id`) ON DELETE CASCADE, ADD CONSTRAINT `role_users_user_id_foreign` FOREIGN KEY (`user_id`) REFERENCES `users` (`id`) ON DELETE CASCADE; -- -- Constraints for table `salaries` -- ALTER TABLE `salaries` ADD CONSTRAINT `salaries_department_id_foreign` FOREIGN KEY (`department_id`) REFERENCES `departments` (`id`) ON DELETE CASCADE, ADD CONSTRAINT `salaries_status_id_foreign` FOREIGN KEY (`status_id`) REFERENCES `statuses` (`id`) ON DELETE CASCADE; -- -- Constraints for table `service_models` -- ALTER TABLE `service_models` ADD CONSTRAINT `service_models_brand_id_foreign` FOREIGN KEY (`brand_id`) REFERENCES `phone_brands` (`id`) ON DELETE CASCADE; /*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; /*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
<reponame>unixing/springboot_chowder<gh_stars>10-100 package com.oven.config; import com.oven.wsdl.UserPortType; import org.apache.cxf.jaxws.JaxWsProxyFactoryBean; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; @Configuration public class CxfClientConfig { private final static String SERVICE_ADDRESS = "http://localhost:8080/ws/user"; @Bean("cxfProxy") public UserPortType createUserPortTypeProxy() { JaxWsProxyFactoryBean jaxWsProxyFactoryBean = new JaxWsProxyFactoryBean(); jaxWsProxyFactoryBean.setServiceClass(UserPortType.class); jaxWsProxyFactoryBean.setAddress(SERVICE_ADDRESS); return (UserPortType) jaxWsProxyFactoryBean.create(); } }
<reponame>aravindaw/driver package com.github.aravinda; class DriverSet { enum OS { WINDOWS, LINUX, MAC } enum WINDOW_SIZE { FULLSCREEN, MAXIMIZE, DEFAULT } }
import BaseTemplate from './default-template'; export default BaseTemplate.extend({ type: 'invalid-file-type' });
SELECT * FROM users WHERE payment_time > DATE_SUB(NOW(), INTERVAL 12 HOUR) ORDER BY payment_time DESC;
import { types } from "taggr-shared"; import { sendToFrontendType } from "../../message-bus"; import { Type as DatabaseType } from "../../database"; import { Type as imageServiceType } from "../../services/image"; type Deps = { db: DatabaseType; imageService: imageServiceType; sendToFrontend: sendToFrontendType; }; /** * Filter images and send them to the FE */ const filterImages = ({ db, imageService, sendToFrontend }: Deps) => ( filters: types.Filters ) => { const imageMap = db.get("allImages"); const currentImageHashes = db.get("currentImageHashes"); const filtered = imageService.filterImages({ imageMap, currentImageHashes, filters, }); sendToFrontend({ type: "frontend_set-images", payload: filtered, }); }; export default filterImages;
fn main() { // Create a new wallet let mut wallet = Wallet::new(); // Deposit 100 units into the wallet wallet.deposit(100); // Check the current balance assert_eq!(wallet.balance(), 100); // Withdraw 50 units from the wallet assert_eq!(wallet.withdraw(50), true); // Check the current balance assert_eq!(wallet.balance(), 50); // Withdraw 100 units from the wallet (insufficient funds) assert_eq!(wallet.withdraw(100), false); // Check the current balance assert_eq!(wallet.balance(), 50); }
class CPeer { public: bool connected() { // Implementation for checking if the peer is connected return false; } bool connect_peer() { // Implementation for connecting the peer return true; } }; class p2p_server { public: bool connect_to_peer(CPeer* peer) { if (peer->connected()) return false; return peer->connect_peer(); } bool stop_server() { try { // Implementation for stopping the server and releasing resources return true; } catch (...) { // Handle any exceptions during server stop return false; } } bool start_server() { try { // Implementation for starting the server and initializing resources return true; } catch (...) { // Handle any initialization errors return false; } } bool disconnect_peer(CPeer* peer) { // Implementation for disconnecting the specified peer return true; } bool broadcast_message(const std::string& message) { // Implementation for broadcasting the message to all connected peers return true; } };
import * as styles from "../../styles"; export function Submit({ loading, text, dataTestid, style, className }) { if (loading) { return <div className="spinner-border mt-1" role="status" />; } return ( <button data-testid={dataTestid} type="submit" className={styles.classes("btn btn-success w-100 w-md-auto", className)} style={styles.combine(styles.success, style, { whiteSpace: "nowrap", })} > {text} </button> ); }
#!/usr/bin/env bash cosmovisor start >> sifnode.log 2>&1 & sleep 10 yes Y | sifnodecli tx gov submit-proposal software-upgrade release-20210414000000 --from sif --deposit 100000000stake --upgrade-height 20 --title release-20210414000000 --description release-20210414000000 sleep 5 yes Y | sifnodecli tx gov vote 1 yes --from sif --keyring-backend test --chain-id localnet clear sleep 5 sifnodecli query gov proposal 1 #--info '{"binaries":{"linux/amd64":"https://srv-store2.gofile.io/download/K9xJtY/sifnoded.zip?checksum=sha256:8630d1e36017ca680d572926d6a4fc7fe9a24901c52f48c70523b7d44ad0cfb2"}}'
// Copyright 2018 The Prizem Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package postgres import ( "database/sql" "encoding/json" "github.com/jmoiron/sqlx" "github.com/pkg/errors" "github.com/prizem-io/api/v1" ) func (s *Store) SetService(service api.Service) (index int64, err error) { var tx *sqlx.Tx tx, err = s.db.Beginx() if err != nil { err = errors.Wrap(err, "Could not begin transaction") return } defer s.handleDeferTx(tx, err) index = 1 err = tx.Get(&index, "SELECT index FROM source WHERE name = 'routing' FOR UPDATE") if err != nil { if err == sql.ErrNoRows { err = nil } else { err = errors.Wrap(err, "could not select current routing index") return } } var data []byte data, err = json.Marshal(&service) if err != nil { err = errors.Wrapf(err, "Could not marshal service %s", service.Name) return } var result sql.Result result, err = tx.Exec("UPDATE service SET config = $1 WHERE service_name = $2", json.RawMessage(data), service.Name) if err != nil { err = errors.Wrapf(err, "Could not update service %s", service.Name) return } var affected int64 affected, err = result.RowsAffected() if err != nil { err = errors.Wrapf(err, "Could not determine if service %s was updated", service.Name) return } if affected == 0 { _, err = tx.Exec("INSERT INTO service (service_name, config) VALUES ($1, $2)", service.Name, json.RawMessage(data)) if err != nil { err = errors.Wrapf(err, "Could not update insert service %s", service.Name) return } } index++ if index == 0 { index++ } err = updateSource(tx, index, "routing") if err != nil { return } return }
<reponame>efe3535/pythonexamples class hayvanlar: def __init__(self, hayvan, hayvanismi): self.hayvan = hayvan self.hayvanismi = hayvanismi def hayvanbilgisi(self): return "Hayvan ismi:" + str(self.hayvanismi) + "\nHayvan: " + str(self.hayvan) devam = True hayvanlistesi = ["Köpek", "Kedi", "Kuş", "Hamster"] hayvansayisi = 0 kayitlihayvanlar = [] while devam: sec = "" try: sec = int(input(""" 1-Köpek 2-Kedi 3-Kuş 4-Hamster Hangi hayvan (sayı girin lütfen) """)) except ValueError: print("Arkadaşım sen anlamıyor musun? Sayı gireceksin bak yazdık oraya!") exit() isim = input(" Adı ne olsun?\n ") hayvankaydi = hayvanlar(hayvanlistesi[int(sec)-1], isim) kayitlihayvanlar.append(hayvankaydi.hayvanbilgisi() + "\n\n\n") print("="*30) print("HAYVAN(LAR)") print("="*30) for kayit in kayitlihayvanlar: print(kayit) devamsoru = input("Bir hayvan daha tanımlamak ister misiniz? (e/h)") if devamsoru == "h": devam = False print("İyi günler.")
<filename>app/controllers/DbConnect.scala package controllers import play.api.Logger import scalikejdbc._ import scalikejdbc.config._ import models._ import java.util.Date object DbConnect extends DbService{ DBs.setupAll() def findUser(apikey: String): Option[User] = { implicit val session: DBSession = ReadOnlyAutoSession sql"select userid, apikey from fiasuser where apikey = ${apikey}".map(rs => User.fromRs(rs)).single.apply() } def listRegion(formalName: Option[String]): List[AddrObjRsp] = { implicit val session: DBSession = ReadOnlyAutoSession val aolevel = 1 val stext: String = tt(formalName) sql"""select regioncode, postalcode, shortname, offname, aolevel, aoguid from addressobject where livestatus = 1 and aolevel = ${aolevel} and lower(formalname) like ${stext} order by formalname""".map(rs => AddrObjRsp.fromRs(rs)).list.apply() } def listChild(parent: Option[String], formalName: Option[String]): List[AddrObjRsp] = { implicit val session: DBSession = ReadOnlyAutoSession val parentguid = checkParentGuid(parent) val stext: String = tt(formalName) sql"""select regioncode, postalcode, shortname, offname, aolevel, aoguid from addressobject where parentguid = ${parentguid} and livestatus = 1 and lower(formalname) like ${stext} order by formalname""".map(rs => AddrObjRsp.fromRs(rs)).list.apply() } def listHouseOnly(parentguid: String, stext: String, date: Date): List[HouseRsp] = { implicit val session: DBSession = ReadOnlyAutoSession sql"""select aoguid, houseguid, postalcode, housenum, eststatus, buildnum, strucnum, strstatus from house where aoguid = ${parentguid} and enddate > ${date} and (lower(housenum) like ${stext} or lower(buildnum) like ${stext} or lower(strucnum) like ${stext}) order by housenum""".map(rs => HouseRsp.fromRs(rs)).list.apply() } def listHouseInt(parentguid: String, housenum: Option[Int], date: Date): List[HouseInt] = { implicit val session: DBSession = ReadOnlyAutoSession housenum match { case Some(hn) => val even: Int = if(hn%2==0) 3 else 2 sql"""select aoguid, intguid, postalcode, intstart, intend, intstatus from houseinterval where aoguid = ${parentguid} and enddate > ${date} and intend >= ${hn} and intstatus <> ${even}""".map(rs => HouseInt.fromRs(rs)).list.apply() case None => sql"""select aoguid, intguid, postalcode, intstart, intend, intstatus from houseinterval where aoguid = ${parentguid} and enddate > ${date}""".map(rs => HouseInt.fromRs(rs)).list.apply() } } def listLocality(regioncode: Option[String], formalName: Option[String]): List[AddrObjRsp] = { implicit val session: DBSession = ReadOnlyAutoSession val stext: String = tt(formalName) val region: String = checkRegion(regioncode) sql"""select a.regioncode regioncode, a.postalcode postalcode, a.shortname shortname, a.offname offname, a.aolevel aolevel, a.aoguid aoguid, p.shortname pshortname, p.offname poffname, p.aoguid paoguid from addressobject a left join addressobject p on a.parentguid = p.aoguid where p.livestatus = 1 and a.livestatus = 1 and a.regioncode = ${region} and a.aolevel in (4,6) and lower(a.formalname) like ${stext} order by a.formalname""".map(rs => AddrObjRsp.fromRsWithParent(rs)).list.apply() } def listStreet(parent: Option[String], formalName: Option[String]): List[AddrObjRsp] = { implicit val session: DBSession = ReadOnlyAutoSession val parentguid = checkParentGuid(parent) val stext: String = tt(formalName) sql"""select regioncode, postalcode, shortname, offname, aolevel, aoguid from addressobject where parentguid = ${parentguid} and aolevel = 7 and livestatus = 1 and lower(formalname) like ${stext} order by formalname""".map(rs => AddrObjRsp.fromRs(rs)).list.apply() } }
rm -rf doc/* javadoc -classpath lib/jblas-1.2.5.jar:. -d doc $(find minet -name "*.java")
def has_unique_characters(str): # Use a hash set to keep track of characters chars = set() # Iterate through characters of the string for char in str: if char in chars: # Return false if we have seen a character before return False else: # Otherwise, add the character to the set chars.add(char) # Return true if all characters are unique return True
#!/bin/bash # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. set -euo pipefail . $IMPALA_HOME/bin/report_build_error.sh setup_report_build_error HIVE_SERVER_PORT=10000 export HIVE_SERVER2_THRIFT_PORT=11050 HIVE_METASTORE_PORT=9083 LOGDIR=${IMPALA_CLUSTER_LOGS_DIR}/hive HIVES2_TRANSPORT="plain_sasl" METASTORE_TRANSPORT="buffered" ONLY_METASTORE=0 ENABLE_RANGER_AUTH=0 CLUSTER_BIN=${IMPALA_HOME}/testdata/bin if ${CLUSTER_DIR}/admin is_kerberized; then # Making a kerberized cluster... set some more environment variables. . ${MINIKDC_ENV} HIVES2_TRANSPORT="kerberos" # The metastore isn't kerberized yet: # METASTORE_TRANSPORT="kerberos" fi mkdir -p ${LOGDIR} while [ -n "$*" ] do case $1 in -only_metastore) ONLY_METASTORE=1 ;; -with_ranger) ENABLE_RANGER_AUTH=1 echo "Starting Hive with Ranger authorization." ;; -help|-h|*) echo "run-hive-server.sh : Starts the hive server and the metastore." echo "[-only_metastore] : Only starts the hive metastore." echo "[-with_ranger] : Starts with Ranger authorization (only for Hive 3)." exit 1; ;; esac shift; done # TODO: We should have a retry loop for every service we start. # Kill for a clean start. ${CLUSTER_BIN}/kill-hive-server.sh &> /dev/null export HIVE_METASTORE_HADOOP_OPTS="-Xdebug -Xrunjdwp:transport=dt_socket,server=y,\ suspend=n,address=30010" # If this is CDP Hive we need to manually add the sentry jars in the classpath since # CDH Hive metastore scripts do not do so. This is currently to make sure that we can run # all the tests including sentry tests # TODO: This can be removed when we move to Ranger completely if [[ -n "$SENTRY_HOME" ]]; then for f in ${SENTRY_HOME}/lib/sentry-binding-hive*.jar; do FILE_NAME=$(basename $f) # exclude all the hive jars from being included in the classpath since Sentry # depends on Hive 2.1.1 if [[ ! $FILE_NAME == hive* ]]; then export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:${f} fi done fi # Add Ranger dependencies if we are starting with Ranger authorization enabled. if [[ $ENABLE_RANGER_AUTH -eq 1 ]]; then export HIVE_CONF_DIR="$HADOOP_CONF_DIR/hive-site-ranger-auth/" for f in "$RANGER_HOME"/ews/webapp/WEB-INF/classes/ranger-plugins/hive/ranger-*.jar \ "$RANGER_HOME"/ews/webapp/WEB-INF/lib/*.jar \ "$RANGER_HOME"/ews/lib/ranger-*.jar; do FILE_NAME=$(basename $f) # Exclude unneccessary jars. if [[ ! $FILE_NAME == hive* && ! $FILE_NAME == hadoop* && ! $FILE_NAME == hbase* \ && ! $FILE_NAME == zookeeper* ]]; then export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:${f} fi done fi # For Hive 3, we use Tez for execution. We have to add it to the classpath. # NOTE: it would seem like this would only be necessary on the HS2 classpath, # but compactions are initiated from the HMS in Hive 3. This may change at # some point in the future, in which case we can add this to only the # HS2 classpath. export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:${TEZ_HOME}/* # This is a little hacky, but Tez bundles a bunch of junk into lib/, such # as extra copies of the hadoop libraries, etc, and we want to avoid conflicts. # So, we'll be a bit choosy about what we add to the classpath here. for jar in $TEZ_HOME/lib/* ; do case $(basename $jar) in commons-*|RoaringBitmap*) export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$jar ;; esac done # Add kudu-hive.jar to the Hive Metastore classpath, so that Kudu's HMS # plugin can be loaded. for file in ${IMPALA_KUDU_JAVA_HOME}/*kudu-hive*jar; do export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:${file} done # Default to skip validation on Kudu tables if KUDU_SKIP_HMS_PLUGIN_VALIDATION # is unset. export KUDU_SKIP_HMS_PLUGIN_VALIDATION=${KUDU_SKIP_HMS_PLUGIN_VALIDATION:-1} # Starts a Hive Metastore Server on the specified port. # To debug log4j2 loading issues, add to HADOOP_CLIENT_OPTS: # -Dorg.apache.logging.log4j.simplelog.StatusLogger.level=TRACE HADOOP_CLIENT_OPTS="-Xmx2024m -Dhive.log.file=hive-metastore.log" hive \ --service metastore -p $HIVE_METASTORE_PORT > ${LOGDIR}/hive-metastore.out 2>&1 & # Wait for the Metastore to come up because HiveServer2 relies on it being live. ${CLUSTER_BIN}/wait-for-metastore.py --transport=${METASTORE_TRANSPORT} if [ ${ONLY_METASTORE} -eq 0 ]; then # Starts a HiveServer2 instance on the port specified by the HIVE_SERVER2_THRIFT_PORT # environment variable. HADOOP_HEAPSIZE should be set to at least 2048 to avoid OOM # when loading ORC tables like widerow. HADOOP_CLIENT_OPTS="-Xmx2048m -Dhive.log.file=hive-server2.log" hive \ --service hiveserver2 > ${LOGDIR}/hive-server2.out 2>&1 & # Wait for the HiveServer2 service to come up because callers of this script # may rely on it being available. ${CLUSTER_BIN}/wait-for-hiveserver2.py --transport=${HIVES2_TRANSPORT} fi
/* For license: see LICENSE file at top-level */ #ifdef HAVE_CONFIG_H # include "config.h" #endif /* HAVE_CONFIG_H */ #include "thispe.h" #include "shmemu.h" #include "state.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/types.h> #include <pmix.h> #include <ucp/api/ucp.h> /* -------------------------------------------------------------- */ /* * Make local info avaialable to PMIx */ static const char *wrkr_exch_fmt = "wrkr:%d"; /* pe */ void shmemc_pmi_publish_worker(void) { pmix_status_t ps; pmix_info_t pi; pmix_byte_object_t *bop; /* shortcut */ PMIX_INFO_CONSTRUCT(&pi); /* everyone publishes their info */ snprintf(pi.key, PMIX_MAX_KEYLEN, wrkr_exch_fmt, proc.rank); pi.value.type = PMIX_BYTE_OBJECT; bop = &pi.value.data.bo; bop->bytes = (char *) proc.comms.xchg_wrkr_info[proc.rank].addr; bop->size = proc.comms.xchg_wrkr_info[proc.rank].len; ps = PMIx_Publish(&pi, 1); shmemu_assert(ps == PMIX_SUCCESS, "can't publish worker blob"); } static const char *rkey_exch_fmt = "rkey:%lu:%d"; /* region, pe */ #ifndef ENABLE_ALIGNED_ADDRESSES static const char *region_base_fmt = "base:%lu:%d"; /* region, pe */ static const char *region_size_fmt = "size:%lu:%d"; /* region, pe */ #endif /* ! ENABLE_ALIGNED_ADDRESSES */ void shmemc_pmi_publish_rkeys_and_heaps(void) { pmix_status_t ps; pmix_info_t pi; #ifndef ENABLE_ALIGNED_ADDRESSES pmix_info_t *hi; #endif /* ! ENABLE_ALIGNED_ADDRESSES */ void *packed_rkey; size_t rkey_len; size_t r; PMIX_INFO_CONSTRUCT(&pi); #ifndef ENABLE_ALIGNED_ADDRESSES PMIX_INFO_CREATE(hi, 2); #endif /* ! ENABLE_ALIGNED_ADDRESSES */ for (r = 0; r < proc.comms.nregions; r += 1) { pmix_byte_object_t *bop = &pi.value.data.bo; const ucs_status_t s = ucp_rkey_pack(proc.comms.ucx_ctxt, proc.comms.regions[r].minfo[proc.rank].racc.mh, &packed_rkey, &rkey_len ); shmemu_assert(s == UCS_OK, "can't unpack rkey"); snprintf(pi.key, PMIX_MAX_KEYLEN, rkey_exch_fmt, r, proc.rank); pi.value.type = PMIX_BYTE_OBJECT; bop->bytes = (char *) packed_rkey; bop->size = rkey_len; ps = PMIx_Publish(&pi, 1); shmemu_assert(ps == PMIX_SUCCESS, "can't publish rkey"); ucp_rkey_buffer_release(packed_rkey); #ifndef ENABLE_ALIGNED_ADDRESSES if (shmemu_likely(r > 0)) { snprintf(hi[0].key, PMIX_MAX_KEYLEN, region_base_fmt, r, proc.rank); hi[0].value.type = PMIX_UINT64; hi[0].value.data.uint64 = (uint64_t) proc.comms.regions[r].minfo[proc.rank].base; snprintf(hi[1].key, PMIX_MAX_KEYLEN, region_size_fmt, r, proc.rank); hi[1].value.type = PMIX_SIZE; hi[1].value.data.size = proc.comms.regions[r].minfo[proc.rank].len; /* newer PMIx should be able to do this at one go */ #if PMIX_VERSION_MAJOR > 2 ps = PMIx_Publish(hi, 2); shmemu_assert(ps == PMIX_SUCCESS, "can't publish heap base/size"); #else /* PMIX_VERSION_MAJOR */ ps = PMIx_Publish(&hi[0], 1); shmemu_assert(ps == PMIX_SUCCESS, "can't publish heap base"); ps = PMIx_Publish(&hi[1], 1); shmemu_assert(ps == PMIX_SUCCESS, "can't publish heap size"); #endif /* PMIX_VERSION_MAJOR */ } #endif /* ! ENABLE_ALIGNED_ADDRESSES */ } #ifndef ENABLE_ALIGNED_ADDRESSES PMIX_INFO_FREE(hi, 2); #endif /* ! ENABLE_ALIGNED_ADDRESSES */ } /* -------------------------------------------------------------- */ /* * Get remote info out of PMIx */ void shmemc_pmi_exchange_workers(void) { pmix_status_t ps; pmix_pdata_t fetch; pmix_info_t waiter; int all = 0; int pe; PMIX_INFO_CONSTRUCT(&waiter); PMIX_INFO_LOAD(&waiter, PMIX_WAIT, &all, PMIX_INT); PMIX_PDATA_CONSTRUCT(&fetch); for (pe = 0; pe < proc.nranks; pe += 1) { const pmix_byte_object_t *bop = &fetch.value.data.bo; const int i = (pe + proc.rank) % proc.nranks; snprintf(fetch.key, PMIX_MAX_KEYLEN, wrkr_exch_fmt, i); ps = PMIx_Lookup(&fetch, 1, &waiter, 1); shmemu_assert(ps == PMIX_SUCCESS, "can't find remote worker blob"); /* save published worker */ proc.comms.xchg_wrkr_info[i].buf = (char *) malloc(bop->size); shmemu_assert(proc.comms.xchg_wrkr_info[i].buf != NULL, "can't allocate memory for remote workers"); memcpy(proc.comms.xchg_wrkr_info[i].buf, bop->bytes, bop->size); } } void shmemc_pmi_exchange_rkeys_and_heaps(void) { pmix_status_t ps; pmix_pdata_t rd; #ifndef ENABLE_ALIGNED_ADDRESSES pmix_pdata_t *hd; #endif /* ! ENABLE_ALIGNED_ADDRESSES */ pmix_info_t waiter; int all = 0; int pe; size_t r; PMIX_INFO_CONSTRUCT(&waiter); PMIX_INFO_LOAD(&waiter, PMIX_WAIT, &all, PMIX_INT); PMIX_PDATA_CONSTRUCT(&rd); #ifndef ENABLE_ALIGNED_ADDRESSES PMIX_PDATA_CREATE(hd, 2); #endif /* ! ENABLE_ALIGNED_ADDRESSES */ for (r = 0; r < proc.comms.nregions; r += 1) { for (pe = 0; pe < proc.nranks; pe += 1) { const pmix_byte_object_t *bop = &rd.value.data.bo; const int i = (pe + proc.rank) % proc.nranks; ucs_status_t s; /* rkey first */ snprintf(rd.key, PMIX_MAX_KEYLEN, rkey_exch_fmt, r, i); ps = PMIx_Lookup(&rd, 1, &waiter, 1); shmemu_assert(ps == PMIX_SUCCESS, "can't fetch remote rkey"); proc.comms.regions[r].minfo[i].racc.rkey = (ucp_rkey_h) malloc(bop->size); shmemu_assert(proc.comms.regions[r].minfo[i].racc.rkey != NULL, "can't allocate memory for remote rkey"); s = ucp_ep_rkey_unpack(proc.comms.eps[i], bop->bytes, &proc.comms.regions[r].minfo[i].racc.rkey ); shmemu_assert(s == UCS_OK, "can't unpack remote rkey"); #ifndef ENABLE_ALIGNED_ADDRESSES /* now heaps, but skip globals */ if (shmemu_likely(r > 0)) { snprintf(hd[0].key, PMIX_MAX_KEYLEN, region_base_fmt, r, i); snprintf(hd[1].key, PMIX_MAX_KEYLEN, region_size_fmt, r, i); #if PMIX_VERSION_MAJOR > 2 ps = PMIx_Lookup(hd, 2, &waiter, 1); shmemu_assert(ps == PMIX_SUCCESS, "can't fetch heap base/size"); #else /* PMIX_VERSION_MAJOR */ ps = PMIx_Lookup(&hd[0], 1, &waiter, 1); shmemu_assert(ps == PMIX_SUCCESS, "can't fetch heap base"); ps = PMIx_Lookup(&hd[1], 1, &waiter, 1); shmemu_assert(ps == PMIX_SUCCESS, "can't fetch heap size"); #endif /* PMIX_VERSION_MAJOR */ proc.comms.regions[r].minfo[i].base = hd[0].value.data.uint64; proc.comms.regions[r].minfo[i].len = hd[1].value.data.size; /* slightly redundant storage, but useful */ proc.comms.regions[r].minfo[i].end = proc.comms.regions[r].minfo[i].base + hd[1].value.data.size; } #endif /* ! ENABLE_ALIGNED_ADDRESSES */ } } #ifndef ENABLE_ALIGNED_ADDRESSES PMIX_PDATA_FREE(hd, 2); #endif /* ! ENABLE_ALIGNED_ADDRESSES */ } /* -------------------------------------------------------------- */ /* * read out the peer PE numbers */ inline static void parse_peers(char *peerstr) { size_t i = 0; char *next; const char *sep = ","; /* parse the PE #s out of the string */ proc.peers = (int *) calloc(proc.npeers, sizeof(*proc.peers)); /* free at end */ shmemu_assert(proc.peers != NULL, "can't allocate memory for peer list"); next = strtok(peerstr, sep); while (next != NULL) { proc.peers[i] = (int) strtol(next, NULL, 10); i += 1; next = strtok(NULL, sep); } } /* -------------------------------------------------------------- */ /* * this barrier is purely for internal use with PMIx, nothing to do * with SHMEM/UCX */ void shmemc_pmi_barrier_all(void) { PMIx_Fence(NULL, 0, NULL, 0); } /* * handle the different init/fini APIs */ inline static pmix_status_t pmix_init_wrapper(pmix_proc_t *pp) { pmix_status_t ps; #ifdef HAVE_PMIX_NO_INIT_HINTS ps = PMIx_Init(pp); #else ps = PMIx_Init(pp, NULL, 0); #endif /* HAVE_PMIX_NO_INIT_HINTS */ return ps; } inline static pmix_status_t pmix_finalize_wrapper(void) { pmix_status_t ps; #ifdef HAVE_PMIX_NO_INIT_HINTS ps = PMIx_Finalize(); #else ps = PMIx_Finalize(NULL, 0); #endif /* HAVE_PMIX_NO_INIT_HINTS */ return ps; } /* * get the PMIx client-side up and running */ void shmemc_pmi_client_init(void) { pmix_proc_t my_proc; /* about me */ pmix_proc_t wc_proc; /* wildcard lookups */ pmix_value_t v; pmix_value_t *vp = &v; /* holds things we get from PMIx */ pmix_status_t ps; ps = pmix_init_wrapper(&my_proc); shmemu_assert(ps == PMIX_SUCCESS, "PMIx can't initialize (%s)", PMIx_Error_string(ps)); /* we can get our own rank immediately */ proc.rank = (int) my_proc.rank; shmemu_assert(proc.rank >= 0, "PMIx PE rank %d is not valid (%s)", proc.rank, PMIx_Error_string(ps)); /* make a new proc to query things not linked to a specific rank */ PMIX_PROC_CONSTRUCT(&wc_proc); strncpy(wc_proc.nspace, my_proc.nspace, PMIX_MAX_NSLEN + 1); wc_proc.rank = PMIX_RANK_WILDCARD; ps = PMIx_Get(&wc_proc, PMIX_JOB_SIZE, NULL, 0, &vp); shmemu_assert(ps == PMIX_SUCCESS, "PMIx can't get program size (%s)", PMIx_Error_string(ps)); proc.nranks = (int) vp->data.uint32; /* number of ranks/PEs */ ps = PMIx_Get(&wc_proc, PMIX_UNIV_SIZE, NULL, 0, &vp); shmemu_assert(ps == PMIX_SUCCESS, "PMIx can't get universe size (%s)", PMIx_Error_string(ps)); proc.maxranks = (int) vp->data.uint32; /* total ranks available */ /* is the world a sane size? */ shmemu_assert(proc.nranks > 0, "PMIx count of PE ranks %d is not valid", proc.nranks); shmemu_assert(proc.maxranks > 0, "PMIx PE universe size %d is not valid", proc.maxranks); shmemu_assert(IS_VALID_PE_NUMBER(proc.rank), "PMIx PE rank %d is not valid", proc.rank); /* what's on this node? */ ps = PMIx_Get(&wc_proc, PMIX_LOCAL_SIZE, NULL, 0, &vp); shmemu_assert(ps == PMIX_SUCCESS, "PMIx can't find PE's peers (%s)", PMIx_Error_string(ps)); proc.npeers = (int) vp->data.uint32; /* how's the 'hood look? */ shmemu_assert(proc.npeers >= 0, "PMIx PE's peer count %d must be >= 0", proc.npeers); shmemu_assert(proc.npeers <= proc.nranks, "PMIx PE's peer count %d bigger than program %d", proc.npeers, proc.nranks); ps = PMIx_Get(&wc_proc, PMIX_LOCAL_PEERS, NULL, 0, &vp); shmemu_assert(ps == PMIX_SUCCESS, "PMIx can't find PE's peer list (s)", PMIx_Error_string(ps)); parse_peers(vp->data.string); } /* * shut down PMIx client-side */ void shmemc_pmi_client_finalize(void) { pmix_status_t ps; int pe; ps = pmix_finalize_wrapper(); shmemu_assert(ps == PMIX_SUCCESS, "PMIx can't finalize (%s)", PMIx_Error_string(ps)); for (pe = 0; pe < proc.nranks; pe += 1) { size_t r; /* clean up allocations for exchanged buffers */ free(proc.comms.xchg_wrkr_info[pe].buf); for (r = 0; r < proc.comms.nregions; r += 1) { ucp_rkey_destroy(proc.comms.regions[r].minfo[pe].racc.rkey); } } /* clean up memory recording peer PEs */ free(proc.peers); } /* * crunch out if fatal error */ void shmemc_pmi_client_abort(const char *msg, int status) { pmix_status_t ps; ps = PMIx_Abort(status, msg, NULL, 0); shmemu_assert(ps == PMIX_SUCCESS, "PMIx can't abort (%s)", PMIx_Error_string(ps)); }
<gh_stars>1-10 require "rails_helper" describe EndCoverageRequest, "from form" do subject { EndCoverageRequest } let(:form_params) do { cancel_terminate: { operation: "terminate", reason: "termination_of_benefits", benefit_end_date: "01/16/2015", people_attributes: { "0" => {include_selected: "1", m_id: "1983410", name: "<NAME>", role: "self"}, "1" => {include_selected: "0", m_id: "1983470", name: "<NAME>", role: "spouse"}, "2" => {include_selected: "0", m_id: "1984090", name: "<NAME>", role: "child"} } }, id: "4301" } end let(:request) do { policy_id: "4301", affected_enrollee_ids: ["1983410", "1983470", "1984090"], coverage_end: "01/16/2015", operation: "terminate", reason: "termination_of_benefits", transmit: false, current_user: current_user } end let(:current_user) { '<EMAIL>' } it "should create a request for all members to be terminated " do expect(subject.from_form(form_params, current_user)).to eq request end end
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.ic_local_pharmacy_outline = void 0; var ic_local_pharmacy_outline = { "viewBox": "0 0 24 24", "children": [{ "name": "path", "attribs": { "d": "M0 0h24v24H0V0z", "fill": "none" }, "children": [] }, { "name": "path", "attribs": { "d": "M21 5h-2.64l1.14-3.14L17.15 1l-1.46 4H3v2l2 6-2 6v2h18v-2l-2-6 2-6V5zm-3.9 8.63L18.89 19H5.11l1.79-5.37.21-.63-.21-.63L5.11 7h13.78l-1.79 5.37-.21.63.21.63zM13 9h-2v3H8v2h3v3h2v-3h3v-2h-3z" }, "children": [] }] }; exports.ic_local_pharmacy_outline = ic_local_pharmacy_outline;
import os import pandas as pd import matplotlib.pyplot as plt class DataProcessor: def __init__(self, created_directory_path, df, path): self.created_directory_path = created_directory_path self.df = df self.path = path self.direction_list = [] def create_directory(self): try: os.mkdir(self.created_directory_path) except OSError: print("Directory already exists") else: print("Successfully created the directory") def compare_data(self): self.df['check'] = (self.df['gt_corners'] == self.df['rb_corners']) check_df = self.df['check'].value_counts() new = check_df.plot.pie(figsize=(5, 5), colors=['green', 'red'], ylabel="") new.set_xlabel("Количество совпадающих значений к не совпадающим") plt.savefig('{valid_path}\\valid.png'.format(valid_path=self.path)) self.direction_list.append("{path}\\valid.png".format(path=self.path)) # Example usage data = {'gt_corners': [True, False, True, False, True], 'rb_corners': [True, True, False, False, True]} df = pd.DataFrame(data) processor = DataProcessor("output_directory", df, "output_path") processor.create_directory() processor.compare_data()
# Create an instance of the DatasetManager class manager = DatasetManager() # Add datasets to the manager print(manager.add_dataset('BaselineDataset')) # Output: True print(manager.add_dataset('RefinementDataset')) # Output: True print(manager.add_dataset('BaselineDataset')) # Output: False (already exists) # List the datasets managed by the manager print(manager.list_datasets()) # Output: ['BaselineDataset', 'RefinementDataset'] # Check if a dataset exists print(manager.dataset_exists('BaselineDataset')) # Output: True print(manager.dataset_exists('NewDataset')) # Output: False # Remove a dataset from the manager print(manager.remove_dataset('RefinementDataset')) # Output: True print(manager.remove_dataset('NonExistentDataset')) # Output: False print(manager.list_datasets()) # Output: ['BaselineDataset']
// importando as funções do arquivo modal.js import Modal from "./modal.js"; const modal = Modal(); // pegando os elementos da modal como h2, p e button const modalTitle = document.querySelector(".modal h2"); const modalDescription = document.querySelector(".modal p"); const modalButton = document.querySelector(".modal button"); // pega todas tags "a" que estão dentro de algo com a class "actions" e que a tag "a" tenha a class "check" const checkButtons = document.querySelectorAll(".actions a.check"); checkButtons.forEach((button) => { // add escuta de eventos em cada button / laço do loop // primeiro passamos oq queremos escutar, nessa caso "click" // depois passamos o evento em si "event" e oq queremos fazer com isso // no caso damos um modal.open() button.addEventListener("click", ("click", handleClick)); }); // pega todas as tags "a" com a class "delete" dentro de algo com a class "actions" const deleteButton = document.querySelectorAll(".actions a.delete"); // escuta o evento e abre a modal, mesma coisa com o código acima deleteButton.forEach((button) => { button.addEventListener("click", (event) => handleClick(event, false)); }); // chamamos essa função nos addEventListener acima // e caso a pessoa selecione o deleteButton ele passa o check como false // com isso podemos reduzir um pouco o código de ficar trocando os elementos da modal conforme o usuário clica nos buttons function handleClick(event, check = true) { // com a linha abaixo quando clicarmos nas tags "a" ela não irá add # no link do site // as tags "a" deixam de se comportar de certa forma como links event.preventDefault(); // form / action (pegando as infos) const slug = check ? "check" : "delete"; const questionId = event.target.dataset.id; const roomId = document.querySelector("#room-id").dataset.id; const form = document.querySelector(".modal form"); form.setAttribute("action", `/question/${roomId}/${questionId}/${slug}`); // mudando HTML da modal const text = check ? "Marcar como lida" : "Excluir"; modalTitle.innerHTML = `${text} esta pergunta`; modalDescription.innerHTML = `Tem certeza que deseja ${text.toLowerCase()} essa pergunta?`; modalButton.innerHTML = `Sim, ${text.toLowerCase()}`; check ? modalButton.classList.remove("red") : modalButton.classList.add("red"); // abre a modal modal.open(); }
public extension Comparable { /** Generates a value that, given a closed interval, will be within the bounds, as close to the original value as possible. - parameter lower: The lowest value allowed in the closed interval - parameter upper: The highest value allowed in the closed interval - returns: A value within the closed interval, as close to the original value as possible */ func generateValueWithinBounds(lower: Self, upper: Self) -> Self { if self < lower { return lower } else if self > upper { return upper } else { return self } } }
<reponame>Chexxo/Server<filename>src/server/ChexxoServer.ts<gh_stars>0 import { APIProvider } from "../api/APIProvider"; import { CertificateProvider } from "../certificate/CertificateProvider"; import { Logger } from "../shared/logger/Logger"; /** * Represents the chexxo server. Holds all server * components and initalizes objects needed. */ export class ChexxoServer { public constructor( /** * The {@link APIProvider} that will be used * in order to expose the server functionality * to the user. */ private apiProvider: APIProvider, /** * The {@link CertificateProvider} which will * be used to get the certificate from a * requested domain. */ private certificateProvider: CertificateProvider, /** * The {@link Logger} which will * be used to log events. */ private logger: Logger ) {} /** * Initalizes the {@link APIProvider}. In case of * express this starts the express instance. */ public init(): void { this.apiProvider.init(this.certificateProvider, this.logger); } }
#!/bin/sh # Wrapper script to run the OSG Flocking report # Example: ./topoppusage_run.sh weekly # This assumes you're running the reports from a virtualenv TOPDIR=/home/sbhat/gracc-reporting LOGFILE=/var/log/gracc-reporting/topoppusage_run.log # Ideally should be in /var/log/gracc-reporting VENVDIR=gracc_venv function usage { echo "Usage: ./topoppusage_run.sh <time period>" echo "" echo "Time periods are: daily, weekly, bimonthly, monthly, yearly" exit } function set_dates { case $1 in "daily") starttime=`date --date='1 day ago' +"%F %T"`;; "weekly") starttime=`date --date='1 week ago' +"%F %T"`;; "bimonthly") starttime=`date --date='2 month ago' +"%F %T"`;; "monthly") starttime=`date --date='1 month ago' +"%F %T"`;; "yearly") starttime=`date --date='1 year ago' +"%F %T"`;; *) echo "Error: unknown period $1. Use weekly, monthly or yearly" exit 1;; esac echo $starttime } # Initialize everything # Check arguments if [[ $# -ne 1 ]] || [[ $1 == "-h" ]] || [[ $1 == "--help" ]] ; then usage fi set_dates $1 endtime=`date +"%F %T"` # Activate the virtualenv cd $TOPDIR source $VENVDIR/bin/activate # Run the report echo "START" `date` >> $LOGFILE osgtopoppusagereport -s "$starttime" -e "$endtime" -N 10 # Error handling if [ $? -ne 0 ] then echo "Error sending report. Please investigate" >> $LOGFILE else echo "Sent report" >> $LOGFILE fi echo "END" `date` >> $LOGFILE
public class ProgEx9_6 { public static void main(String args[]) { MyComplex z = new MyComplex(); z.real = Double.parseDouble(args[0]); z.imag = Double.parseDouble(args[1]); display(z); } static void display(MyComplex z) { char s = ( z.imag < 0 ) ? '-' : '+'; System.out.printf("%f %c i%f\n", z.real ,s, Math.abs(z.imag)); } }
<filename>core/src/mindustry/entities/effect/GroundEffectEntity.java package mindustry.entities.effect; import arc.math.Mathf; import arc.util.Time; import mindustry.Vars; import mindustry.entities.Effects; import mindustry.entities.Effects.Effect; import mindustry.entities.Effects.EffectRenderer; import mindustry.entities.type.EffectEntity; import mindustry.world.Tile; /** * A ground effect contains an effect that is rendered on the ground layer as opposed to the top layer. */ public class GroundEffectEntity extends EffectEntity{ private boolean once; @Override public void update(){ GroundEffect effect = (GroundEffect)this.effect; if(effect.isStatic){ time += Time.delta(); time = Mathf.clamp(time, 0, effect.staticLife); if(!once && time >= lifetime()){ once = true; time = 0f; Tile tile = Vars.world.tileWorld(x, y); if(tile != null && tile.floor().isLiquid){ remove(); } }else if(once && time >= effect.staticLife){ remove(); } }else{ super.update(); } } @Override public void draw(){ GroundEffect effect = (GroundEffect)this.effect; if(once && effect.isStatic) Effects.renderEffect(id, effect, color, lifetime(), rotation, x, y, data); else Effects.renderEffect(id, effect, color, time, rotation, x, y, data); } @Override public void reset(){ super.reset(); once = false; } /** * An effect that is rendered on the ground layer as opposed to the top layer. */ public static class GroundEffect extends Effect{ /** * How long this effect stays on the ground when static. */ public final float staticLife; /** * If true, this effect will stop and lie on the ground for a specific duration, * after its initial lifetime is over. */ public final boolean isStatic; public GroundEffect(float life, float staticLife, EffectRenderer draw){ super(life, draw); this.staticLife = staticLife; this.isStatic = true; } public GroundEffect(boolean isStatic, float life, EffectRenderer draw){ super(life, draw); this.staticLife = 0f; this.isStatic = isStatic; } public GroundEffect(float life, EffectRenderer draw){ super(life, draw); this.staticLife = 0f; this.isStatic = false; } } }
def classify_odd_even(input_list): odd_nums = [] even_nums = [] for item in input_list: if item % 2 == 0: even_nums.append(item) else: odd_nums.append(item) return odd_nums, even_nums input_list = [1, 2, 3, 4, 5] odd_nums, even_nums = classify_odd_even(input_list) print("Odd Numbers: ", odd_nums) print("Even Numbers: ", even_nums)
def find_shortest_path(graph, start, end): # Store all the possible paths queue = [] # Add the starting node to the queue queue.append([start]) while queue: # Get the first path from the queue path = queue.pop(0) # Get the last node from the path node = path[-1] # Check if the node is the end node if node == end: return path # Get the adjacent nodes of the node children = graph[node] # Add the paths on the queue for child in children: if child not in path: new_path = list(path) new_path.append(child) queue.append(new_path) # Create a graph graph = { 'A': ['B', 'C','E'], 'B': ['F', 'D'], 'C': ['G'], 'D': [], 'E': [], 'F': ['D'], 'G': [] } # Find the shortest path print(find_shortest_path(graph, 'A', 'D'))
#/bin/bash echo "Starting server" mvn clean install -DskipTests=true mvn ninja:run curl -X POST 127.0.0.1:8080/initialize
<reponame>paullewallencom/spring-978-1-7883-9242-6 package com.packtpub.yummy.reverseproxy; import com.netflix.zuul.ZuulFilter; import com.netflix.zuul.context.RequestContext; import com.netflix.zuul.exception.ZuulException; import org.slf4j.MDC; import org.springframework.stereotype.Component; import java.util.UUID; import static org.springframework.cloud.netflix.zuul.filters.support.FilterConstants.PRE_TYPE; import static org.springframework.integration.IntegrationMessageHeaderAccessor.CORRELATION_ID; @Component public class CorrelationIdZuulFilter extends ZuulFilter { @Override public String filterType() { return PRE_TYPE; } @Override public int filterOrder() { return 0; } @Override public boolean shouldFilter() { return true; } @Override public Object run() throws ZuulException { String id = UUID.randomUUID().toString(); RequestContext.getCurrentContext().addZuulRequestHeader(CORRELATION_ID, id); MDC.put(CORRELATION_ID, id); return null; } }
#!/bin/bash set -e # parse command line arguments shared=false cuda=false for key in "$@"; do key="$1" case $key in --shared) shared=true shift ;; --cuda) cuda=true shift ;; esac done # add repository with recent versions of compilers apt-get -y update apt-get -y install software-properties-common add-apt-repository -y ppa:ubuntu-toolchain-r/test apt-get -y clean # install requirements apt-get -y update apt-get -y install \ build-essential \ curl \ git \ cmake \ unzip \ autoconf \ autogen \ libtool \ mlocate \ zlib1g-dev \ g++-6 \ python \ python3-numpy \ python3-dev \ python3-pip \ python3-wheel \ wget if $shared; then # install bazel for the shared library version echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list curl https://bazel.build/bazel-release.pub.gpg | apt-key add - apt-get -y update apt-get -y install openjdk-8-jdk bazel fi if $cuda; then # install libcupti apt-get -y install cuda-command-line-tools-9-1 fi apt-get -y clean # when building TF with Intel MKL support, `locate` database needs to exist updatedb # build and install tensorflow_cc ./tensorflow_cc/Dockerfiles/install-common.sh "$@"
import axios from "axios"; export const FETCH_QUOTE_START = "FETCH_QUOTE_START"; export const FETCH_QUOTE_SUCCESS = "FETCH_QUOTE_SUCCESS"; export const FETCH_QUOTE_FAIL = "FETCH_QUOTE_FAIL"; export const getQuote = () => dispatch => { dispatch({ type: FETCH_QUOTE_START }); axios .get("https://api.quotable.io/random") .then(res => { console.log("RESPONSE", res.data); dispatch({ type: FETCH_QUOTE_SUCCESS, payload: res.data }); }) .catch(err => dispatch({ type: FETCH_QUOTE_FAIL, payload: err })); };
#!/bin/sh # Check reading directories matching non pure ascii idents # See bug #5715 (utf-8 working on macos X and linux) # Windows is still not compliant a=$(uname) if [ "$a" = "Darwin" ] || [ "$a" = "Linux" ]; then rm -f misc/deps/théorèmes/*.v tmpoutput=$(mktemp /tmp/coqcheck.XXXXXX) $coqc -R misc/deps AlphaBêta misc/deps/αβ/γδ.v R=$? $coqtop -R misc/deps AlphaBêta -load-vernac-source misc/deps/αβ/εζ.v S=$? if [ $R = 0 ] && [ $S = 0 ]; then exit 0 else exit 1 fi fi
const envimorent = { apiUrl: '' } export default envimorent
<filename>src/components/Highlight/index.js import Highlight from './Highlight' export default Highlight
#!/bin/bash ARCUS_ADMIN_EMAIL=${ARCUS_ADMIN_EMAIL:-me@example.com} if [ "$ARCUS_ADMIN_EMAIL" = "me@example.com" ]; then prompt ARCUS_ADMIN_EMAIL "Please enter your admin email address (or set ARCUS_ADMIN_EMAIL): " fi mkdir -p overlays/local-production-local cp -r overlays/local-production/* overlays/local-production-local sed -i "s/me@example.com/$ARCUS_ADMIN_EMAIL/" overlays/local-production-local/cert-provider.yaml ARCUS_DOMAIN_NAME=${ARCUS_DOMAIN_NAME:-example.com} if [ "$ARCUS_DOMAIN_NAME" = "example.com" ]; then prompt ARCUS_DOMAIN_NAME "Please enter your domain name (or set ARCUS_DOMAIN_NAME): " fi cp config/shared-config/config.yml overlays/local-production-local/shared-config.yaml sed -i "s/arcussmarthome.com/$ARCUS_DOMAIN_NAME/" overlays/local-production-local/shared-config.yaml cp config/service/ui-service-ingress.yml overlays/local-production-local/ui-service-ingress.yml sed -i "s/arcussmarthome.com/$ARCUS_DOMAIN_NAME/" overlays/local-production-local/ui-service-ingress.yml
<gh_stars>100-1000 //===------------------------------------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // Also available under a BSD-style license. See LICENSE. // //===----------------------------------------------------------------------===// #ifndef TORCHMLIR_DIALECT_TORCHCONVERSION_TRANSFORMS_PASSES_H #define TORCHMLIR_DIALECT_TORCHCONVERSION_TRANSFORMS_PASSES_H #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Pass/Pass.h" #include "torch-mlir/Dialect/Torch/Transforms/Passes.h" #include <memory> namespace mlir { class ModuleOp; namespace torch { namespace TorchConversion { /// Creates a pipeline that lowers from the torch backend contract to the /// linalg-on-tensors backend contract. void createTorchBackendToLinalgOnTensorsBackendPipeline( OpPassManager &pm, const torch::Torch::TorchLoweringPipelineOptions &options); /// Creates a pipeline that lowers from the torch backend contract to the /// TOSA backend contract. void createTorchBackendToTosaBackendPipeline( OpPassManager &pm, const torch::Torch::TorchLoweringPipelineOptions &options); std::unique_ptr<OperationPass<ModuleOp>> createVerifyInvariantsBeforeBackendLoweringPass(); std::unique_ptr<OperationPass<ModuleOp>> createFuncBackendTypeConversionPass(); std::unique_ptr<OperationPass<func::FuncOp>> createFinalizingBackendTypeConversionPass(); std::unique_ptr<OperationPass<ModuleOp>> createVerifyLinalgOnTensorsBackendContractPass(); std::unique_ptr<OperationPass<ModuleOp>> createVerifyTosaBackendContractPass(); } // namespace TorchConversion /// Registers all Torch transformation passes. void registerTorchConversionPasses(); } // namespace torch } // namespace mlir #endif // TORCHMLIR_DIALECT_TORCHCONVERSION_TRANSFORMS_PASSES_H
def removeVowels(list): result = [] vowels = ["a", "e", "i", "o", "u"] for word in list: if not any(c in word for c in vowels): result.append(word) return result
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) # NOTE: spack-completion.bash is auto-generated by: # # $ spack commands --aliases --format=bash # --header=bash/spack-completion.in --update=spack-completion.bash # # Please do not manually modify this file. # The following global variables are set by Bash programmable completion: # # COMP_CWORD: An index into ${COMP_WORDS} of the word containing the # current cursor position # COMP_KEY: The key (or final key of a key sequence) used to invoke # the current completion function # COMP_LINE: The current command line # COMP_POINT: The index of the current cursor position relative to the # beginning of the current command # COMP_TYPE: Set to an integer value corresponding to the type of # completion attempted that caused a completion function # to be called # COMP_WORDBREAKS: The set of characters that the readline library treats # as word separators when performing word completion # COMP_WORDS: An array variable consisting of the individual words in # the current command line # # The following global variable is used by Bash programmable completion: # # COMPREPLY: An array variable from which bash reads the possible # completions generated by a shell function invoked by the # programmable completion facility # # See `man bash` for more details. # Bash programmable completion for Spack _bash_completion_spack() { # In all following examples, let the cursor be denoted by brackets, i.e. [] # For our purposes, flags should not affect tab completion. For instance, # `spack install []` and `spack -d install --jobs 8 []` should both give the same # possible completions. Therefore, we need to ignore any flags in COMP_WORDS. local COMP_WORDS_NO_FLAGS=() local index=0 while [[ "$index" -lt "$COMP_CWORD" ]] do if [[ "${COMP_WORDS[$index]}" == [a-z]* ]] then COMP_WORDS_NO_FLAGS+=("${COMP_WORDS[$index]}") fi let index++ done # Options will be listed by a subfunction named after non-flag arguments. # For example, `spack -d install []` will call _spack_install # and `spack compiler add []` will call _spack_compiler_add local subfunction=$(IFS='_'; echo "_${COMP_WORDS_NO_FLAGS[*]}") # Translate dashes to underscores, as dashes are not permitted in # compatibility mode. See https://github.com/spack/spack/pull/4079 subfunction=${subfunction//-/_} # However, the word containing the current cursor position needs to be # added regardless of whether or not it is a flag. This allows us to # complete something like `spack install --keep-st[]` COMP_WORDS_NO_FLAGS+=("${COMP_WORDS[$COMP_CWORD]}") # Since we have removed all words after COMP_CWORD, we can safely assume # that COMP_CWORD_NO_FLAGS is simply the index of the last element local COMP_CWORD_NO_FLAGS=$((${#COMP_WORDS_NO_FLAGS[@]} - 1)) # There is no guarantee that the cursor is at the end of the command line # when tab completion is envoked. For example, in the following situation: # `spack -d [] install` # if the user presses the TAB key, a list of valid flags should be listed. # Note that we cannot simply ignore everything after the cursor. In the # previous scenario, the user should expect to see a list of flags, but # not of other subcommands. Obviously, `spack -d list install` would be # invalid syntax. To accomplish this, we use the variable list_options # which is true if the current word starts with '-' or if the cursor is # not at the end of the line. local list_options=false if [[ "${COMP_WORDS[$COMP_CWORD]}" == -* || "$COMP_POINT" -ne "${#COMP_LINE}" ]] then list_options=true fi # In general, when envoking tab completion, the user is not expecting to # see optional flags mixed in with subcommands or package names. Tab # completion is used by those who are either lazy or just bad at spelling. # If someone doesn't remember what flag to use, seeing single letter flags # in their results won't help them, and they should instead consult the # documentation. However, if the user explicitly declares that they are # looking for a flag, we can certainly help them out. # `spack install -[]` # and # `spack install --[]` # should list all flags and long flags, respectively. Furthermore, if a # subcommand has no non-flag completions, such as `spack arch []`, it # should list flag completions. local cur=${COMP_WORDS_NO_FLAGS[$COMP_CWORD_NO_FLAGS]} # If the cursor is in the middle of the line, like: # `spack -d [] install` # COMP_WORDS will not contain the empty character, so we have to add it. if [[ "${COMP_LINE:$COMP_POINT:1}" == " " ]] then cur="" fi # Uncomment this line to enable logging #_test_vars >> temp # Make sure function exists before calling it if [[ "$(type -t $subfunction)" == "function" ]] then $subfunction COMPREPLY=($(compgen -W "$SPACK_COMPREPLY" -- "$cur")) fi } # Helper functions for subcommands # Results of each query are cached via environment variables _subcommands() { if [[ -z "${SPACK_SUBCOMMANDS:-}" ]] then SPACK_SUBCOMMANDS="$(spack commands)" fi SPACK_COMPREPLY="$SPACK_SUBCOMMANDS" } _all_packages() { if [[ -z "${SPACK_ALL_PACKAGES:-}" ]] then SPACK_ALL_PACKAGES="$(spack list)" fi SPACK_COMPREPLY="$SPACK_ALL_PACKAGES" } _all_resource_hashes() { if [[ -z "${SPACK_ALL_RESOURCES_HASHES:-}" ]] then SPACK_ALL_RESOURCE_HASHES="$(spack resource list --only-hashes)" fi SPACK_COMPREPLY="$SPACK_ALL_RESOURCE_HASHES" } _installed_packages() { if [[ -z "${SPACK_INSTALLED_PACKAGES:-}" ]] then SPACK_INSTALLED_PACKAGES="$(spack --color=never find --no-groups)" fi SPACK_COMPREPLY="$SPACK_INSTALLED_PACKAGES" } _installed_compilers() { if [[ -z "${SPACK_INSTALLED_COMPILERS:-}" ]] then SPACK_INSTALLED_COMPILERS="$(spack compilers | egrep -v "^(-|=)")" fi SPACK_COMPREPLY="$SPACK_INSTALLED_COMPILERS" } _providers() { if [[ -z "${SPACK_PROVIDERS:-}" ]] then SPACK_PROVIDERS="$(spack providers)" fi SPACK_COMPREPLY="$SPACK_PROVIDERS" } _mirrors() { if [[ -z "${SPACK_MIRRORS:-}" ]] then SPACK_MIRRORS="$(spack mirror list | awk '{print $1}')" fi SPACK_COMPREPLY="$SPACK_MIRRORS" } _repos() { if [[ -z "${SPACK_REPOS:-}" ]] then SPACK_REPOS="$(spack repo list | awk '{print $1}')" fi SPACK_COMPREPLY="$SPACK_REPOS" } _tests() { if [[ -z "${SPACK_TESTS:-}" ]] then SPACK_TESTS="$(spack test -l)" fi SPACK_COMPREPLY="$SPACK_TESTS" } _environments() { if [[ -z "${SPACK_ENVIRONMENTS:-}" ]] then SPACK_ENVIRONMENTS="$(spack env list)" fi SPACK_COMPREPLY="$SPACK_ENVIRONMENTS" } _keys() { if [[ -z "${SPACK_KEYS:-}" ]] then SPACK_KEYS="$(spack gpg list)" fi SPACK_COMPREPLY="$SPACK_KEYS" } _config_sections() { if [[ -z "${SPACK_CONFIG_SECTIONS:-}" ]] then SPACK_CONFIG_SECTIONS="$(spack config list)" fi SPACK_COMPREPLY="$SPACK_CONFIG_SECTIONS" } _extensions() { if [[ -z "${SPACK_EXTENSIONS:-}" ]] then SPACK_EXTENSIONS="$(spack extensions)" fi SPACK_COMPREPLY="$SPACK_EXTENSIONS" } # Testing functions # Function for unit testing tab completion # Syntax: _spack_completions spack install py- _spack_completions() { local COMP_CWORD COMP_KEY COMP_LINE COMP_POINT COMP_TYPE COMP_WORDS COMPREPLY # Set each variable the way bash would COMP_LINE="$*" COMP_POINT=${#COMP_LINE} COMP_WORDS=("$@") if [[ ${COMP_LINE: -1} == ' ' ]] then COMP_WORDS+=('') fi COMP_CWORD=$((${#COMP_WORDS[@]} - 1)) COMP_KEY=9 # ASCII 09: Horizontal Tab COMP_TYPE=64 # ASCII 64: '@', to list completions if the word is not unmodified # Run Spack's tab completion function _bash_completion_spack # Return the result echo "${COMPREPLY[@]:-}" } # Log the environment variables used # Syntax: _test_vars >> temp _test_vars() { echo "-----------------------------------------------------" echo "Variables set by bash:" echo echo "COMP_LINE: '$COMP_LINE'" echo "# COMP_LINE: '${#COMP_LINE}'" echo "COMP_WORDS: $(_pretty_print COMP_WORDS[@])" echo "# COMP_WORDS: '${#COMP_WORDS[@]}'" echo "COMP_CWORD: '$COMP_CWORD'" echo "COMP_KEY: '$COMP_KEY'" echo "COMP_POINT: '$COMP_POINT'" echo "COMP_TYPE: '$COMP_TYPE'" echo "COMP_WORDBREAKS: '$COMP_WORDBREAKS'" echo echo "Intermediate variables:" echo echo "COMP_WORDS_NO_FLAGS: $(_pretty_print COMP_WORDS_NO_FLAGS[@])" echo "# COMP_WORDS_NO_FLAGS: '${#COMP_WORDS_NO_FLAGS[@]}'" echo "COMP_CWORD_NO_FLAGS: '$COMP_CWORD_NO_FLAGS'" echo echo "Subfunction: '$subfunction'" if $list_options then echo "List options: 'True'" else echo "List options: 'False'" fi echo "Current word: '$cur'" } # Pretty-prints one or more arrays # Syntax: _pretty_print array1[@] ... _pretty_print() { for arg in $@ do local array=("${!arg}") printf "$arg: [" printf "'%s'" "${array[0]}" printf ", '%s'" "${array[@]:1}" echo "]" done } complete -o bashdefault -o default -F _bash_completion_spack spack # Completion for spacktivate complete -o bashdefault -o default -F _bash_completion_spack spacktivate _spacktivate() { _spack_env_activate } # Spack commands # # Everything below here is auto-generated. _spack() { if $list_options then SPACK_COMPREPLY="-h --help -H --all-help --color -C --config-scope -d --debug --timestamp --pdb -e --env -D --env-dir -E --no-env --use-env-repo -k --insecure -l --enable-locks -L --disable-locks -m --mock -p --profile --sorted-profile --lines -v --verbose --stacktrace -V --version --print-shell-vars" else SPACK_COMPREPLY="activate add arch blame build-env buildcache cd checksum ci clean clone commands compiler compilers concretize config containerize create deactivate debug dependencies dependents deprecate dev-build docs edit env extensions external fetch find flake8 gc gpg graph help info install license list load location log-parse maintainers mirror module patch pkg providers pydoc python reindex remove rm repo resource restage setup spec stage test uninstall unload url verify versions view" fi } _spack_activate() { if $list_options then SPACK_COMPREPLY="-h --help -f --force -v --view" else _installed_packages fi } _spack_add() { if $list_options then SPACK_COMPREPLY="-h --help -l --list-name" else _all_packages fi } _spack_arch() { SPACK_COMPREPLY="-h --help --known-targets -p --platform -o --operating-system -t --target -f --frontend -b --backend" } _spack_blame() { if $list_options then SPACK_COMPREPLY="-h --help -t --time -p --percent -g --git" else _all_packages fi } _spack_build_env() { if $list_options then SPACK_COMPREPLY="-h --help --clean --dirty --dump --pickle" else _all_packages fi } _spack_buildcache() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="create install list keys preview check download get-buildcache-name save-yaml copy update-index" fi } _spack_buildcache_create() { if $list_options then SPACK_COMPREPLY="-h --help -r --rel -f --force -u --unsigned -a --allow-root -k --key -d --directory -m --mirror-name --mirror-url --rebuild-index -y --spec-yaml --only" else _all_packages fi } _spack_buildcache_install() { if $list_options then SPACK_COMPREPLY="-h --help -f --force -m --multiple -a --allow-root -u --unsigned -o --otherarch" else _all_packages fi } _spack_buildcache_list() { if $list_options then SPACK_COMPREPLY="-h --help -l --long -L --very-long -v --variants -a --allarch" else _all_packages fi } _spack_buildcache_keys() { SPACK_COMPREPLY="-h --help -i --install -t --trust -f --force" } _spack_buildcache_preview() { if $list_options then SPACK_COMPREPLY="-h --help" else _installed_packages fi } _spack_buildcache_check() { SPACK_COMPREPLY="-h --help -m --mirror-url -o --output-file --scope -s --spec -y --spec-yaml --rebuild-on-error" } _spack_buildcache_download() { SPACK_COMPREPLY="-h --help -s --spec -y --spec-yaml -p --path -c --require-cdashid" } _spack_buildcache_get_buildcache_name() { SPACK_COMPREPLY="-h --help -s --spec -y --spec-yaml" } _spack_buildcache_save_yaml() { SPACK_COMPREPLY="-h --help --root-spec --root-spec-yaml -s --specs -y --yaml-dir" } _spack_buildcache_copy() { SPACK_COMPREPLY="-h --help --base-dir --spec-yaml --destination-url" } _spack_buildcache_update_index() { SPACK_COMPREPLY="-h --help -d --mirror-url" } _spack_cd() { if $list_options then SPACK_COMPREPLY="-h --help -m --module-dir -r --spack-root -i --install-dir -p --package-dir -P --packages -s --stage-dir -S --stages -b --build-dir -e --env" else _all_packages fi } _spack_checksum() { if $list_options then SPACK_COMPREPLY="-h --help --keep-stage -b --batch" else _all_packages fi } _spack_ci() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="generate rebuild" fi } _spack_ci_generate() { SPACK_COMPREPLY="-h --help --output-file --copy-to --spack-repo --spack-ref --optimize --dependencies" } _spack_ci_rebuild() { SPACK_COMPREPLY="-h --help" } _spack_clean() { if $list_options then SPACK_COMPREPLY="-h --help -s --stage -d --downloads -f --failures -m --misc-cache -p --python-cache -a --all" else _all_packages fi } _spack_clone() { if $list_options then SPACK_COMPREPLY="-h --help -r --remote" else SPACK_COMPREPLY="" fi } _spack_commands() { if $list_options then SPACK_COMPREPLY="-h --help --update-completion -a --aliases --format --header --update" else SPACK_COMPREPLY="" fi } _spack_compiler() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="find add remove rm list info" fi } _spack_compiler_find() { if $list_options then SPACK_COMPREPLY="-h --help --scope" else SPACK_COMPREPLY="" fi } _spack_compiler_add() { if $list_options then SPACK_COMPREPLY="-h --help --scope" else SPACK_COMPREPLY="" fi } _spack_compiler_remove() { if $list_options then SPACK_COMPREPLY="-h --help -a --all --scope" else _installed_compilers fi } _spack_compiler_rm() { if $list_options then SPACK_COMPREPLY="-h --help -a --all --scope" else _installed_compilers fi } _spack_compiler_list() { SPACK_COMPREPLY="-h --help --scope" } _spack_compiler_info() { if $list_options then SPACK_COMPREPLY="-h --help --scope" else _installed_compilers fi } _spack_compilers() { SPACK_COMPREPLY="-h --help --scope" } _spack_concretize() { SPACK_COMPREPLY="-h --help -f --force" } _spack_config() { if $list_options then SPACK_COMPREPLY="-h --help --scope" else SPACK_COMPREPLY="get blame edit list add remove rm update revert" fi } _spack_config_get() { if $list_options then SPACK_COMPREPLY="-h --help" else _config_sections fi } _spack_config_blame() { if $list_options then SPACK_COMPREPLY="-h --help" else _config_sections fi } _spack_config_edit() { if $list_options then SPACK_COMPREPLY="-h --help --print-file" else _config_sections fi } _spack_config_list() { SPACK_COMPREPLY="-h --help" } _spack_config_add() { if $list_options then SPACK_COMPREPLY="-h --help -f --file" else SPACK_COMPREPLY="" fi } _spack_config_remove() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="" fi } _spack_config_rm() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="" fi } _spack_config_update() { if $list_options then SPACK_COMPREPLY="-h --help -y --yes-to-all" else _config_sections fi } _spack_config_revert() { if $list_options then SPACK_COMPREPLY="-h --help -y --yes-to-all" else _config_sections fi } _spack_containerize() { SPACK_COMPREPLY="-h --help" } _spack_create() { if $list_options then SPACK_COMPREPLY="-h --help --keep-stage -n --name -t --template -r --repo -N --namespace -f --force --skip-editor -b --batch" else SPACK_COMPREPLY="" fi } _spack_deactivate() { if $list_options then SPACK_COMPREPLY="-h --help -f --force -v --view -a --all" else _installed_packages fi } _spack_debug() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="create-db-tarball report" fi } _spack_debug_create_db_tarball() { SPACK_COMPREPLY="-h --help" } _spack_debug_report() { SPACK_COMPREPLY="-h --help" } _spack_dependencies() { if $list_options then SPACK_COMPREPLY="-h --help -i --installed -t --transitive --deptype -V --no-expand-virtuals" else _all_packages fi } _spack_dependents() { if $list_options then SPACK_COMPREPLY="-h --help -i --installed -t --transitive" else _all_packages fi } _spack_deprecate() { if $list_options then SPACK_COMPREPLY="-h --help -y --yes-to-all -d --dependencies -D --no-dependencies -i --install-deprecator -I --no-install-deprecator -l --link-type" else _all_packages fi } _spack_dev_build() { if $list_options then SPACK_COMPREPLY="-h --help -j --jobs -d --source-path -i --ignore-dependencies -n --no-checksum --keep-prefix --skip-patch -q --quiet --drop-in -b --before -u --until --clean --dirty" else _all_packages fi } _spack_docs() { SPACK_COMPREPLY="-h --help" } _spack_edit() { if $list_options then SPACK_COMPREPLY="-h --help -b --build-system -c --command -d --docs -t --test -m --module -r --repo -N --namespace" else _all_packages fi } _spack_env() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="activate deactivate create remove rm list ls status st loads view update revert" fi } _spack_env_activate() { if $list_options then SPACK_COMPREPLY="-h --help --sh --csh --fish -v --with-view -V --without-view -d --dir -p --prompt" else _environments fi } _spack_env_deactivate() { SPACK_COMPREPLY="-h --help --sh --csh --fish" } _spack_env_create() { if $list_options then SPACK_COMPREPLY="-h --help -d --dir --without-view --with-view" else _environments fi } _spack_env_remove() { if $list_options then SPACK_COMPREPLY="-h --help -y --yes-to-all" else _environments fi } _spack_env_rm() { if $list_options then SPACK_COMPREPLY="-h --help -y --yes-to-all" else _environments fi } _spack_env_list() { SPACK_COMPREPLY="-h --help" } _spack_env_ls() { SPACK_COMPREPLY="-h --help" } _spack_env_status() { SPACK_COMPREPLY="-h --help" } _spack_env_st() { SPACK_COMPREPLY="-h --help" } _spack_env_loads() { if $list_options then SPACK_COMPREPLY="-h --help -m --module-type --input-only -p --prefix -x --exclude -r --dependencies" else _environments fi } _spack_env_view() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="" fi } _spack_env_update() { if $list_options then SPACK_COMPREPLY="-h --help -y --yes-to-all" else _environments fi } _spack_env_revert() { if $list_options then SPACK_COMPREPLY="-h --help -y --yes-to-all" else _environments fi } _spack_extensions() { if $list_options then SPACK_COMPREPLY="-h --help -l --long -L --very-long -d --deps -p --paths -s --show -v --view" else _extensions fi } _spack_external() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="find list" fi } _spack_external_find() { if $list_options then SPACK_COMPREPLY="-h --help --not-buildable --scope" else _all_packages fi } _spack_external_list() { SPACK_COMPREPLY="-h --help" } _spack_fetch() { if $list_options then SPACK_COMPREPLY="-h --help -n --no-checksum -m --missing -D --dependencies" else _all_packages fi } _spack_find() { if $list_options then SPACK_COMPREPLY="-h --help --format --json -d --deps -p --paths --groups --no-groups -l --long -L --very-long -t --tags -c --show-concretized -f --show-flags --show-full-compiler -x --explicit -X --implicit -u --unknown -m --missing -v --variants --loaded -M --only-missing --deprecated --only-deprecated -N --namespace --start-date --end-date" else _installed_packages fi } _spack_flake8() { if $list_options then SPACK_COMPREPLY="-h --help -b --base -k --keep-temp -a --all -o --output -r --root-relative -U --no-untracked" else SPACK_COMPREPLY="" fi } _spack_gc() { SPACK_COMPREPLY="-h --help -y --yes-to-all" } _spack_gpg() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="verify trust untrust sign create list init export" fi } _spack_gpg_verify() { if $list_options then SPACK_COMPREPLY="-h --help" else _installed_packages fi } _spack_gpg_trust() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="" fi } _spack_gpg_untrust() { if $list_options then SPACK_COMPREPLY="-h --help --signing" else _keys fi } _spack_gpg_sign() { if $list_options then SPACK_COMPREPLY="-h --help --output --key --clearsign" else _installed_packages fi } _spack_gpg_create() { if $list_options then SPACK_COMPREPLY="-h --help --comment --expires --export" else SPACK_COMPREPLY="" fi } _spack_gpg_list() { SPACK_COMPREPLY="-h --help --trusted --signing" } _spack_gpg_init() { SPACK_COMPREPLY="-h --help --from" } _spack_gpg_export() { if $list_options then SPACK_COMPREPLY="-h --help" else _keys fi } _spack_graph() { if $list_options then SPACK_COMPREPLY="-h --help -a --ascii -d --dot -s --static -i --installed --deptype" else _all_packages fi } _spack_help() { if $list_options then SPACK_COMPREPLY="-h --help -a --all --spec" else _subcommands fi } _spack_info() { if $list_options then SPACK_COMPREPLY="-h --help" else _all_packages fi } _spack_install() { if $list_options then SPACK_COMPREPLY="-h --help --only -u --until -j --jobs --overwrite --fail-fast --keep-prefix --keep-stage --dont-restage --use-cache --no-cache --cache-only --no-check-signature --show-log-on-error --source -n --no-checksum -v --verbose --fake --only-concrete -f --file --clean --dirty --test --run-tests --log-format --log-file --help-cdash -y --yes-to-all --cdash-upload-url --cdash-build --cdash-site --cdash-track --cdash-buildstamp" else _all_packages fi } _spack_license() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="list-files verify" fi } _spack_license_list_files() { SPACK_COMPREPLY="-h --help" } _spack_license_verify() { SPACK_COMPREPLY="-h --help --root" } _spack_list() { if $list_options then SPACK_COMPREPLY="-h --help -d --search-description --format --update -t --tags" else _all_packages fi } _spack_load() { if $list_options then SPACK_COMPREPLY="-h --help -r --dependencies --sh --csh --fish --first --only" else _installed_packages fi } _spack_location() { if $list_options then SPACK_COMPREPLY="-h --help -m --module-dir -r --spack-root -i --install-dir -p --package-dir -P --packages -s --stage-dir -S --stages -b --build-dir -e --env" else _all_packages fi } _spack_log_parse() { if $list_options then SPACK_COMPREPLY="-h --help --show -c --context -p --profile -w --width -j --jobs" else SPACK_COMPREPLY="" fi } _spack_maintainers() { if $list_options then SPACK_COMPREPLY="-h --help --maintained --unmaintained -a --all --by-user" else _all_packages fi } _spack_mirror() { if $list_options then SPACK_COMPREPLY="-h --help -n --no-checksum" else SPACK_COMPREPLY="create add remove rm set-url list" fi } _spack_mirror_create() { if $list_options then SPACK_COMPREPLY="-h --help -d --directory -a --all -f --file --exclude-file --exclude-specs --skip-unstable-versions -D --dependencies -n --versions-per-spec" else _all_packages fi } _spack_mirror_add() { if $list_options then SPACK_COMPREPLY="-h --help --scope" else _mirrors fi } _spack_mirror_remove() { if $list_options then SPACK_COMPREPLY="-h --help --scope" else _mirrors fi } _spack_mirror_rm() { if $list_options then SPACK_COMPREPLY="-h --help --scope" else _mirrors fi } _spack_mirror_set_url() { if $list_options then SPACK_COMPREPLY="-h --help --push --scope" else _mirrors fi } _spack_mirror_list() { SPACK_COMPREPLY="-h --help --scope" } _spack_module() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="lmod tcl" fi } _spack_module_lmod() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="refresh find rm loads setdefault" fi } _spack_module_lmod_refresh() { if $list_options then SPACK_COMPREPLY="-h --help --delete-tree --upstream-modules -y --yes-to-all" else _installed_packages fi } _spack_module_lmod_find() { if $list_options then SPACK_COMPREPLY="-h --help --full-path -r --dependencies" else _installed_packages fi } _spack_module_lmod_rm() { if $list_options then SPACK_COMPREPLY="-h --help -y --yes-to-all" else _installed_packages fi } _spack_module_lmod_loads() { if $list_options then SPACK_COMPREPLY="-h --help --input-only -p --prefix -x --exclude -r --dependencies" else _installed_packages fi } _spack_module_lmod_setdefault() { if $list_options then SPACK_COMPREPLY="-h --help" else _installed_packages fi } _spack_module_tcl() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="refresh find rm loads" fi } _spack_module_tcl_refresh() { if $list_options then SPACK_COMPREPLY="-h --help --delete-tree --upstream-modules -y --yes-to-all" else _installed_packages fi } _spack_module_tcl_find() { if $list_options then SPACK_COMPREPLY="-h --help --full-path -r --dependencies" else _installed_packages fi } _spack_module_tcl_rm() { if $list_options then SPACK_COMPREPLY="-h --help -y --yes-to-all" else _installed_packages fi } _spack_module_tcl_loads() { if $list_options then SPACK_COMPREPLY="-h --help --input-only -p --prefix -x --exclude -r --dependencies" else _installed_packages fi } _spack_patch() { if $list_options then SPACK_COMPREPLY="-h --help -n --no-checksum" else _all_packages fi } _spack_pkg() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="add list diff added changed removed" fi } _spack_pkg_add() { if $list_options then SPACK_COMPREPLY="-h --help" else _all_packages fi } _spack_pkg_list() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="" fi } _spack_pkg_diff() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="" fi } _spack_pkg_added() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="" fi } _spack_pkg_changed() { if $list_options then SPACK_COMPREPLY="-h --help -t --type" else SPACK_COMPREPLY="" fi } _spack_pkg_removed() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="" fi } _spack_providers() { if $list_options then SPACK_COMPREPLY="-h --help" else _providers fi } _spack_pydoc() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="" fi } _spack_python() { if $list_options then SPACK_COMPREPLY="-h --help -V --version -c -m" else SPACK_COMPREPLY="" fi } _spack_reindex() { SPACK_COMPREPLY="-h --help" } _spack_remove() { if $list_options then SPACK_COMPREPLY="-h --help -a --all -l --list-name -f --force" else _all_packages fi } _spack_rm() { if $list_options then SPACK_COMPREPLY="-h --help -a --all -l --list-name -f --force" else _all_packages fi } _spack_repo() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="create list add remove rm" fi } _spack_repo_create() { if $list_options then SPACK_COMPREPLY="-h --help" else _repos fi } _spack_repo_list() { SPACK_COMPREPLY="-h --help --scope" } _spack_repo_add() { if $list_options then SPACK_COMPREPLY="-h --help --scope" else SPACK_COMPREPLY="" fi } _spack_repo_remove() { if $list_options then SPACK_COMPREPLY="-h --help --scope" else _repos fi } _spack_repo_rm() { if $list_options then SPACK_COMPREPLY="-h --help --scope" else _repos fi } _spack_resource() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="list show" fi } _spack_resource_list() { SPACK_COMPREPLY="-h --help --only-hashes" } _spack_resource_show() { if $list_options then SPACK_COMPREPLY="-h --help" else _all_resource_hashes fi } _spack_restage() { if $list_options then SPACK_COMPREPLY="-h --help" else _all_packages fi } _spack_setup() { if $list_options then SPACK_COMPREPLY="-h --help -i --ignore-dependencies -n --no-checksum -v --verbose --clean --dirty" else _all_packages fi } _spack_spec() { if $list_options then SPACK_COMPREPLY="-h --help -l --long -L --very-long -I --install-status -y --yaml -j --json -c --cover -N --namespaces -t --types" else _all_packages fi } _spack_stage() { if $list_options then SPACK_COMPREPLY="-h --help -n --no-checksum -p --path" else _all_packages fi } _spack_test() { if $list_options then SPACK_COMPREPLY="-h --help -H --pytest-help -l --list -L --list-long -N --list-names --extension -s -k --showlocals" else _tests fi } _spack_uninstall() { if $list_options then SPACK_COMPREPLY="-h --help -f --force -R --dependents -y --yes-to-all -a --all" else _installed_packages fi } _spack_unload() { if $list_options then SPACK_COMPREPLY="-h --help --sh --csh --fish -a --all" else _installed_packages fi } _spack_url() { if $list_options then SPACK_COMPREPLY="-h --help" else SPACK_COMPREPLY="parse list summary stats" fi } _spack_url_parse() { if $list_options then SPACK_COMPREPLY="-h --help -s --spider" else SPACK_COMPREPLY="" fi } _spack_url_list() { SPACK_COMPREPLY="-h --help -c --color -e --extrapolation -n --incorrect-name -N --correct-name -v --incorrect-version -V --correct-version" } _spack_url_summary() { SPACK_COMPREPLY="-h --help" } _spack_url_stats() { SPACK_COMPREPLY="-h --help" } _spack_verify() { if $list_options then SPACK_COMPREPLY="-h --help -l --local -j --json -a --all -s --specs -f --files" else _all_packages fi } _spack_versions() { if $list_options then SPACK_COMPREPLY="-h --help -s --safe-only -c --concurrency" else _all_packages fi } _spack_view() { if $list_options then SPACK_COMPREPLY="-h --help -v --verbose -e --exclude -d --dependencies" else SPACK_COMPREPLY="symlink add soft hardlink hard copy relocate remove rm statlink status check" fi } _spack_view_symlink() { if $list_options then SPACK_COMPREPLY="-h --help --projection-file -i --ignore-conflicts" else _all_packages fi } _spack_view_add() { if $list_options then SPACK_COMPREPLY="-h --help --projection-file -i --ignore-conflicts" else _all_packages fi } _spack_view_soft() { if $list_options then SPACK_COMPREPLY="-h --help --projection-file -i --ignore-conflicts" else _all_packages fi } _spack_view_hardlink() { if $list_options then SPACK_COMPREPLY="-h --help --projection-file -i --ignore-conflicts" else _all_packages fi } _spack_view_hard() { if $list_options then SPACK_COMPREPLY="-h --help --projection-file -i --ignore-conflicts" else _all_packages fi } _spack_view_copy() { if $list_options then SPACK_COMPREPLY="-h --help --projection-file -i --ignore-conflicts" else _all_packages fi } _spack_view_relocate() { if $list_options then SPACK_COMPREPLY="-h --help --projection-file -i --ignore-conflicts" else _all_packages fi } _spack_view_remove() { if $list_options then SPACK_COMPREPLY="-h --help --no-remove-dependents -a --all" else _all_packages fi } _spack_view_rm() { if $list_options then SPACK_COMPREPLY="-h --help --no-remove-dependents -a --all" else _all_packages fi } _spack_view_statlink() { if $list_options then SPACK_COMPREPLY="-h --help" else _all_packages fi } _spack_view_status() { if $list_options then SPACK_COMPREPLY="-h --help" else _all_packages fi } _spack_view_check() { if $list_options then SPACK_COMPREPLY="-h --help" else _all_packages fi }
<filename>frontend/src/router/routes/routes.js import { lazy } from "react"; const Home = lazy(_ => import('../../views/home/home.jsx')); const Main = lazy(_ => import("../../views/main/main.jsx")); const Routes = [ { path: '/', component: Home, title: '首页', exact: true, name: 'home' }, { path: '/home', component: Home, title: '首页', exact: true, name: 'home' }, { path: '/main', component: Main, title: '主页', exact: true, name: 'main' }, ]; export default Routes;
#!/bin/sh #create missing files... (mkdir settings/ > /dev/null 2>&1) \ && (echo "[]" > settings/ops.json) \ && touch settings/server.properties \ && (echo "[]" > settings/whitelist.json) \ && (echo "[]" > settings/banned-ips.json) \ && (echo "eula=true" > settings/eula.txt) \ && (echo "[]" > settings/banned-players.json) \ mkdir world/ > /dev/null 2>&1 mkdir logs/ > /dev/null 2>&1 mkdir crash-reports/ > /dev/null 2>&1 #must be the same as settings/server.properties.level-name... LEVEL_NAME=world while getopts "m:n:i:t:" opt; do case $opt in m ) MAX_MEMORY=$OPTARG ;; n ) NAME=$OPTARG ;; i ) IMAGE_TAG=$OPTARG ;; t ) TYPE=$OPTARG ;; * ) echo "Usage: create_container [-m] (max memory) [-n] (container name) [-i] (docker image tag) [-t] (type)" echo "Example: create_container -m 3GB -n -t spigot minecraft_test -i minecraft-server:test" exit 1 ;; esac done #set default values... [ -z "$MAX_MEMORY" ] \ && MAX_MEMORY=2GB [ -z "$NAME" ] \ && ID=$(date +%N) \ && NAME=minecraft_server_$ID [ -z "$IMAGE_TAG" ] \ && echo "No docker iamge specified for create_container.sh!" \ && echo "Usage: build_image.sh -i (docker image tag)" \ && exit 1 [ -z "$TYPE" ] \ && TYPE=vanilla #create generic command... CMD="docker run -d \ --name $NAME \ -p 25565:25565 \ -p 25575:25575 \ --memory $MAX_MEMORY \ --restart on-failure \ --mount type=bind,source=$PWD/logs,target=/minecraft/logs \ --mount type=bind,source=$PWD/world,target=/minecraft/$LEVEL_NAME \ --mount type=bind,source=$PWD/settings/eula.txt,target=/minecraft/eula.txt \ --mount type=bind,source=$PWD/settings/ops.json,target=/minecraft/ops.json \ --mount type=bind,source=$PWD/crash-reports/,target=/minecraft/crash-reports \ --mount type=bind,source=$PWD/settings/whitelist.json,target=/minecraft/whitelist.json \ --mount type=bind,source=$PWD/settings/banned-ips.json,target=/minecraft/banned-ips.json \ --mount type=bind,source=$PWD/settings/server.properties,target=/minecraft/server.properties \ --mount type=bind,source=$PWD/settings/banned-players.json,target=/minecraft/banned-players.json" #add the specific mounts... case "$TYPE" in vanilla ) ;; forge ) CMD="$CMD \ --mount type=bind,source=$PWD/mods,target=/minecraft/mods \ --mount type=bind,source=$PWD/settings,target=/minecraft/config" mkdir mods/ > /dev/null 2>&1 ;; spigot ) CMD="$CMD \ --mount type=bind,source=$PWD/settings/bukkit.yml,target=/minecraft/bukkit.yml \ --mount type=bind,source=$PWD/settings/spigot.yml,target=/minecraft/spigot.yml \ --mount type=bind,source=$PWD/settings/commands.yml,target=/minecraft/commands.yml \ --mount type=bind,source=$PWD/plugins,target=/minecraft/plugins" touch settings/bukkit.yml touch settings/spigot.yml touch settings/commands.yml mkdir plugins/ > /dev/null 2>&1 ;; paper ) CMD="$CMD \ --mount type=bind,source=$PWD/settings/paper.yml,target=/minecraft/paper.yml \ --mount type=bind,source=$PWD/settings/permissions.yml,target=/minecraft/permissions.yml \ --mount type=bind,source=$PWD/settings/help.yml,target=/minecraft/help.yml \ --mount type=bind,source=$PWD/settings/bukkit.yml,target=/minecraft/bukkit.yml \ --mount type=bind,source=$PWD/settings/spigot.yml,target=/minecraft/spigot.yml \ --mount type=bind,source=$PWD/settings/commands.yml,target=/minecraft/commands.yml \ --mount type=bind,source=$PWD/plugins,target=/minecraft/plugins" touch settings/bukkit.yml touch settings/spigot.yml touch settings/commands.yml touch settings/paper.yml touch settings/permissions.yml touch settings/help.yml mkdir plugins/ > /dev/null 2>&1 ;; * ) echo "$TYPE is an unsupported server type!" exit 1 ;; esac #start the container... $CMD "$IMAGE_TAG" \ && echo "Your container's name is \e[1;31m$NAME\e[0m" \
/***************************************************************************** * Licensed to Qualys, Inc. (QUALYS) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * QUALYS licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ****************************************************************************/ /** * @file * @brief IronBee --- State Notification Implementation */ #include "ironbee_config_auto.h" #include <ironbee/state_notify.h> #include "state_notify_private.h" #include "engine_private.h" #include <ironbee/context.h> #include <ironbee/dso.h> #include <ironbee/engine.h> #include <ironbee/engine_state.h> #include <ironbee/field.h> #include <ironbee/flags.h> #include <ironbee/log.h> #include <ironbee/mm_mpool_lite.h> #include <assert.h> /** * Generate and log a message about a hook function returning an error. * * An error is any return value that is not IB_OK or IB_DECLINED. * * @param[in] ib The engine to log through. * @param[in] state The state being processed. * @param[in] hook_rc The return code signaling the failure. * @param[in] hook_fn A pointer to the callback hook. This will * be resolved to a symbol, if possible. * */ static void log_hook_failure( ib_engine_t *ib, ib_state_t state, ib_status_t hook_rc, void *hook_fn ) { const char *hook_file = NULL; const char *hook_symbol = NULL; ib_status_t rc; ib_mpool_lite_t *mp = NULL; ib_mm_t mm; /* Construct memory pool. */ rc = ib_mpool_lite_create(&mp); if (rc != IB_OK) { goto no_mm_log; } mm = ib_mm_mpool_lite(mp); rc = ib_dso_sym_name_find(&hook_file, &hook_symbol, mm, hook_fn); if (rc != IB_OK) { hook_file = "[unavailable]"; hook_symbol = "[unavailable]"; } if (hook_file == NULL) { hook_file = ""; } if (hook_symbol == NULL) { hook_symbol = ""; } ib_log_notice( ib, "Hook %s from %s failed during state %s: %s", hook_symbol, hook_file, ib_state_name(state), ib_status_to_string(rc) ); ib_mpool_lite_destroy(mp); return; no_mm_log: ib_log_notice( ib, "Hook failed during state %s: %s", ib_state_name(state), ib_status_to_string(rc) ); } static ib_status_t ib_state_notify_null( ib_engine_t *ib, ib_state_t state ) { assert(ib != NULL); const ib_list_node_t *node; ib_status_t rc = ib_hook_check(ib, state, IB_STATE_HOOK_NULL); if (rc != IB_OK) { ib_log_error(ib, "Error checking hook for \"%s\": %s", ib_state_name(state), ib_status_to_string(rc)); return rc; } ib_log_debug3(ib, "NULL EVENT: %s", ib_state_name(state)); IB_LIST_LOOP_CONST(ib->hooks[state], node) { const ib_hook_t *hook = (const ib_hook_t *)node->data; rc = hook->callback.null(ib, state, hook->cbdata); if (rc == IB_DECLINED) { ib_log_debug(ib, "Hook declined: %s", ib_state_name(state)); } else if (rc != IB_OK) { log_hook_failure(ib, state, rc, hook->callback.null); return rc; } } return IB_OK; } static ib_status_t ib_state_notify_context( ib_engine_t *ib, ib_context_t *ctx, ib_state_t state ) { assert(ib != NULL); assert(ctx != NULL); const ib_list_node_t *node; ib_status_t rc = ib_hook_check(ib, state, IB_STATE_HOOK_CTX); if (rc != IB_OK) { ib_log_error(ib, "Error checking hook for \"%s\": %s", ib_state_name(state), ib_status_to_string(rc)); return rc; } ib_log_debug3(ib, "CTX EVENT: %s", ib_state_name(state)); IB_LIST_LOOP_CONST(ib->hooks[state], node) { const ib_hook_t *hook = (const ib_hook_t *)node->data; rc = hook->callback.ctx(ib, ctx, state, hook->cbdata); if (rc == IB_DECLINED) { ib_log_debug(ib, "Hook declined: %s", ib_state_name(state)); } else if (rc != IB_OK) { log_hook_failure(ib, state, rc, hook->callback.ctx); return rc; } } return IB_OK; } static ib_status_t ib_state_notify_conn( ib_engine_t *ib, ib_conn_t *conn, ib_state_t state ) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(conn != NULL); const ib_list_node_t *node; ib_status_t rc = ib_hook_check(ib, state, IB_STATE_HOOK_CONN); if (rc != IB_OK) { ib_log_error(ib, "Error checking hook for \"%s\": %s", ib_state_name(state), ib_status_to_string(rc)); return rc; } ib_log_debug3(ib, "CONN EVENT: %s", ib_state_name(state)); if (conn->ctx == NULL) { ib_log_notice(ib, "Connection context is null."); } IB_LIST_LOOP_CONST(ib->hooks[state], node) { const ib_hook_t *hook = (const ib_hook_t *)node->data; rc = hook->callback.conn(ib, conn, state, hook->cbdata); if (rc == IB_DECLINED) { ib_log_debug(ib, "Hook declined: %s", ib_state_name(state)); } else if (rc != IB_OK) { log_hook_failure(ib, state, rc, hook->callback.conn); } } return IB_OK; } static ib_status_t ib_state_notify_req_line( ib_engine_t *ib, ib_tx_t *tx, ib_state_t state, ib_parsed_req_line_t *line ) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); assert(line != NULL); assert(line->raw != NULL); assert(line->method != NULL); assert(line->uri != NULL); assert(line->protocol != NULL); const ib_list_node_t *node; ib_status_t rc; rc = ib_hook_check(ib, state, IB_STATE_HOOK_REQLINE); if (rc != IB_OK) { ib_log_error_tx(tx, "Error checking hook for \"%s\": %s", ib_state_name(state), ib_status_to_string(rc)); return rc; } /* Is this a HTTP/0.9 request (has no protocol specification)? */ if (ib_bytestr_length(line->protocol) == 0) { ib_tx_flags_set(tx, IB_TX_FHTTP09); } tx->request_line = line; if (tx->ctx == NULL) { ib_log_notice_tx(tx, "Connection context is null."); } IB_LIST_LOOP_CONST(ib->hooks[state], node) { const ib_hook_t *hook = (const ib_hook_t *)node->data; rc = hook->callback.requestline(ib, tx, state, line, hook->cbdata); if (rc == IB_DECLINED) { ib_log_debug_tx(tx, "Hook declined: %s", ib_state_name(state)); } else if (rc != IB_OK) { log_hook_failure(ib, state, rc, hook->callback.requestline); } } return IB_OK; } static ib_status_t ib_state_notify_resp_line(ib_engine_t *ib, ib_tx_t *tx, ib_state_t state, ib_parsed_resp_line_t *line) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); assert((line == NULL) || (line->raw != NULL)); assert((line == NULL) || (line->protocol != NULL)); assert((line == NULL) || (line->status != NULL)); assert((line == NULL) || (line->msg != NULL)); const ib_list_node_t *node; ib_status_t rc; rc = ib_hook_check(ib, state, IB_STATE_HOOK_RESPLINE); if (rc != IB_OK) { ib_log_error_tx(tx, "Error checking hook for \"%s\": %s", ib_state_name(state), ib_status_to_string(rc)); return rc; } /* Validate response line data. * * The response line may be NULL only for HTTP/0.9 requests * which contain neither a line nor headers. */ if ((line == NULL) && !ib_flags_all(tx->flags, IB_TX_FHTTP09)) { ib_log_notice_tx(tx, "Invalid response line."); return IB_OK; } tx->response_line = line; if (tx->ctx == NULL) { ib_log_notice_tx(tx, "Connection context is null."); } IB_LIST_LOOP_CONST(ib->hooks[state], node) { const ib_hook_t *hook = (const ib_hook_t *)node->data; rc = hook->callback.responseline(ib, tx, state, line, hook->cbdata); if (rc == IB_DECLINED) { ib_log_debug_tx(tx, "Hook declined: %s", ib_state_name(state)); } else if (rc != IB_OK) { log_hook_failure(ib, state, rc, hook->callback.responseline); } } return IB_OK; } static ib_status_t ib_state_notify_tx(ib_engine_t *ib, ib_state_t state, ib_tx_t *tx) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); const ib_list_node_t *node; ib_status_t rc = ib_hook_check(ib, state, IB_STATE_HOOK_TX); if (rc != IB_OK) { return rc; } ib_log_debug3_tx(tx, "TX EVENT: %s", ib_state_name(state)); /* This transaction is now the current (for pipelined). */ tx->conn->tx = tx; if (tx->ctx == NULL) { ib_log_notice_tx(tx, "Connection context is null."); } IB_LIST_LOOP_CONST(ib->hooks[state], node) { const ib_hook_t *hook = (const ib_hook_t *)node->data; rc = hook->callback.tx(ib, tx, state, hook->cbdata); if (rc == IB_DECLINED) { ib_log_debug_tx(tx, "Hook declined: %s", ib_state_name(state)); } else if (rc != IB_OK) { log_hook_failure(ib, state, rc, hook->callback.tx); } } return IB_OK; } ib_status_t ib_state_notify_request_started( ib_engine_t *ib, ib_tx_t *tx, ib_parsed_req_line_t *line) { assert(ib != NULL); assert(tx != NULL); ib_status_t rc; /* Validate. */ if (ib_flags_all(tx->flags, IB_TX_FREQ_STARTED)) { ib_log_error_tx(tx, "Attempted to notify previously notified state: %s", ib_state_name(request_started_state)); return IB_EINVAL; } /* Mark the time. */ tx->t.request_started = ib_clock_get_time(); ib_tx_flags_set(tx, IB_TX_FREQ_STARTED); /* Notify everybody */ rc = ib_state_notify_tx(ib, tx_started_state, tx); if (rc != IB_OK) { return rc; } /* Notify the request line if it's present */ if (line == NULL) { ib_log_info_tx(tx, "Request started with no line."); } else if ( (line->raw == NULL) || (line->method == NULL) || (line->uri == NULL) || (line->protocol == NULL) ) { ib_log_error_tx(tx, "Request started with malformed line."); return IB_EINVAL; } else { ib_tx_flags_set(tx, IB_TX_FREQ_HAS_DATA); rc = ib_state_notify_req_line(ib, tx, request_started_state, line); if (rc != IB_OK) { return rc; } ib_tx_flags_set(tx, IB_TX_FREQ_LINE); } return IB_OK; } ib_status_t ib_state_notify_conn_opened(ib_engine_t *ib, ib_conn_t *conn) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(conn != NULL); ib_status_t rc; /* Validate. */ if (ib_flags_all(conn->flags, IB_CONN_FOPENED)) { ib_log_error(ib, "Attempted to notify previously notified state: %s", ib_state_name(conn_opened_state)); return IB_EINVAL; } ib_flags_set(conn->flags, IB_CONN_FOPENED); rc = ib_state_notify_conn(ib, conn, conn_started_state); if (rc != IB_OK) { return rc; } rc = ib_state_notify_conn(ib, conn, conn_opened_state); if (rc != IB_OK) { return rc; } /* Select the connection context to use. */ rc = ib_ctxsel_select_context(ib, conn, NULL, &conn->ctx); if (rc != IB_OK) { return rc; } rc = ib_state_notify_conn(ib, conn, handle_context_conn_state); if (rc != IB_OK) { return rc; } rc = ib_state_notify_conn(ib, conn, handle_connect_state); if (rc != IB_OK) { return rc; } return IB_OK; } ib_status_t ib_state_notify_conn_closed(ib_engine_t *ib, ib_conn_t *conn) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(conn != NULL); ib_status_t rc; /* Validate. */ if (! ib_flags_all(conn->flags, IB_CONN_FOPENED)) { ib_log_error(ib, "No connection opened: Ignoring %s", ib_state_name(conn_closed_state)); return IB_EINVAL; } if (ib_flags_all(conn->flags, IB_CONN_FCLOSED)) { ib_log_error(ib, "Attempted to notify previously notified state: %s", ib_state_name(conn_closed_state)); return IB_EINVAL; } /* Notify any pending transaction states on connection close state. */ if (conn->tx != NULL) { ib_tx_t *tx = conn->tx; if ( ib_flags_all(tx->flags, IB_TX_FREQ_STARTED) && !ib_flags_all(tx->flags, IB_TX_FREQ_FINISHED)) { ib_log_debug_tx(tx, "Automatically triggering %s", ib_state_name(request_finished_state)); ib_state_notify_request_finished(ib, tx); } if ( ib_flags_all(tx->flags, IB_TX_FREQ_STARTED) && !ib_flags_all(tx->flags, IB_TX_FRES_STARTED)) { ib_log_debug_tx(tx, "Automatically triggering %s", ib_state_name(response_started_state)); ib_state_notify_response_started(ib, tx, NULL); } if ( ib_flags_all(tx->flags, IB_TX_FRES_STARTED) && !ib_flags_all(tx->flags, IB_TX_FRES_FINISHED)) { ib_log_debug_tx(tx, "Automatically triggering %s", ib_state_name(response_finished_state)); ib_state_notify_response_finished(ib, tx); } if (!ib_flags_all(tx->flags, IB_TX_FPOSTPROCESS)) { ib_log_debug_tx(tx, "Automatically triggering %s", ib_state_name(handle_postprocess_state)); ib_state_notify_postprocess(ib, tx); } if (!ib_flags_all(tx->flags, IB_TX_FLOGGING)) { ib_log_debug_tx(tx, "Automatically triggering %s", ib_state_name(handle_logging_state)); ib_state_notify_logging(ib, tx); } } /* Mark the time. */ conn->t.finished = ib_clock_get_time(); ib_flags_set(conn->flags, IB_CONN_FCLOSED); rc = ib_state_notify_conn(ib, conn, conn_closed_state); if (rc != IB_OK) { return rc; } rc = ib_state_notify_conn(ib, conn, handle_disconnect_state); if (rc != IB_OK) { return rc; } rc = ib_state_notify_conn(ib, conn, conn_finished_state); if (rc != IB_OK) { return rc; } return IB_OK; } static ib_status_t ib_state_notify_header_data(ib_engine_t *ib, ib_tx_t *tx, ib_state_t state, ib_parsed_headers_t *header) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); assert(header != NULL); const ib_list_node_t *node; ib_status_t rc = ib_hook_check(ib, state, IB_STATE_HOOK_HEADER); if (rc != IB_OK) { ib_log_error_tx(tx, "Error checking hook for \"%s\": %s", ib_state_name(state), ib_status_to_string(rc)); return rc; } ib_log_debug3_tx(tx, "HEADER EVENT: %s", ib_state_name(state)); if (tx->ctx == NULL) { ib_log_notice_tx(tx, "Connection context is null."); } IB_LIST_LOOP_CONST(ib->hooks[state], node) { const ib_hook_t *hook = (const ib_hook_t *)node->data; rc = hook->callback.headerdata(ib, tx, state, header->head, hook->cbdata); if (rc == IB_DECLINED) { ib_log_debug_tx(tx, "Hook declined: %s", ib_state_name(state)); } else if (rc != IB_OK) { log_hook_failure(ib, state, rc, hook->callback.headerdata); } } return IB_OK; } static ib_status_t ib_state_notify_txdata(ib_engine_t *ib, ib_tx_t *tx, ib_state_t state, const char *data, size_t data_length) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); assert(data != NULL); const ib_list_node_t *node; ib_status_t rc = ib_hook_check(ib, state, IB_STATE_HOOK_TXDATA); if (rc != IB_OK) { ib_log_error_tx(tx, "Error checking hook for \"%s\": %s", ib_state_name(state), ib_status_to_string(rc)); return rc; } if (ib_logger_level_get(ib_engine_logger_get(ib)) >= 9) { ib_log_debug3_tx(tx, "TX DATA EVENT: %s", ib_state_name(state)); } /* This transaction is now the current (for pipelined). */ tx->conn->tx = tx; if (tx->ctx == NULL) { ib_log_notice_tx(tx, "Connection context is null."); } IB_LIST_LOOP_CONST(ib->hooks[state], node) { const ib_hook_t *hook = (const ib_hook_t *)node->data; rc = hook->callback.txdata(ib, tx, state, data, data_length, hook->cbdata); if (rc == IB_DECLINED) { ib_log_debug_tx(tx, "Hook declined: %s", ib_state_name(state)); } else if (rc != IB_OK) { log_hook_failure(ib, state, rc, hook->callback.txdata); } } return IB_OK; } ib_status_t ib_state_notify_request_header_data(ib_engine_t *ib, ib_tx_t *tx, ib_parsed_headers_t *header) { assert(ib != NULL); assert(tx != NULL); assert(header != NULL); ib_status_t rc; /* Validate. */ if (! ib_flags_all(tx->flags, IB_TX_FREQ_STARTED)) { ib_log_debug_tx(tx, "No request started: Ignoring %s", ib_state_name(request_header_data_state)); return IB_OK; } if (! ib_flags_all(tx->flags, IB_TX_FREQ_HAS_DATA)) { ib_log_debug_tx(tx, "No request data: Ignoring %s", ib_state_name(request_header_data_state)); return IB_OK; } /* Mark the time. */ if (tx->t.request_started == 0) { tx->t.request_started = ib_clock_get_time(); } if ( tx->request_header == NULL ) { tx->request_header = header; } else { rc = ib_parsed_headers_append(tx->request_header, header); if (rc != IB_OK) { return rc; } } /* Notify the engine and any callbacks of the data. */ rc = ib_state_notify_header_data(ib, tx, request_header_data_state, header); if (rc != IB_OK) { return rc; } return IB_OK; } ib_status_t ib_state_notify_request_header_finished(ib_engine_t *ib, ib_tx_t *tx) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); ib_status_t rc; /* Validate. */ if (! ib_flags_all(tx->flags, IB_TX_FREQ_STARTED)) { ib_log_debug_tx(tx, "No request started: Ignoring %s", ib_state_name(request_header_finished_state)); return IB_OK; } if (! ib_flags_all(tx->flags, IB_TX_FREQ_HAS_DATA)) { ib_log_debug_tx(tx, "No request data: Ignoring %s", ib_state_name(request_header_finished_state)); return IB_OK; } if (ib_flags_all(tx->flags, IB_TX_FREQ_HEADER)) { ib_log_error_tx(tx, "Attempted to notify previously notified state: %s", ib_state_name(request_header_finished_state)); return IB_EINVAL; } if (!ib_flags_all(tx->flags, IB_TX_FREQ_STARTED)) { ib_log_debug_tx(tx, "Automatically triggering %s", ib_state_name(request_started_state)); rc = ib_state_notify_request_started(ib, tx, tx->request_line); if (rc != IB_OK) { return rc; } } /* Mark the time. */ tx->t.request_header = ib_clock_get_time(); /// @todo Seems this gets there too late. rc = ib_fctl_meta_add(tx->fctl, IB_STREAM_EOH); if (rc != IB_OK) { return rc; } ib_tx_flags_set(tx, IB_TX_FREQ_HEADER); rc = ib_state_notify_tx(ib, request_header_process_state, tx); if (rc != IB_OK) { return rc; } /* Select the transaction context to use. */ rc = ib_ctxsel_select_context(ib, tx->conn, tx, &tx->ctx); if (rc != IB_OK) { return rc; } rc = ib_state_notify_tx(ib, handle_context_tx_state, tx); if (rc != IB_OK) { return rc; } rc = ib_state_notify_tx(ib, request_header_finished_state, tx); if (rc != IB_OK) { return rc; } /* Notify the engine and any callbacks of the data. */ rc = ib_state_notify_tx(ib, handle_request_header_state, tx); if (rc != IB_OK) { return rc; } return IB_OK; } ib_status_t ib_state_notify_request_body_data(ib_engine_t *ib, ib_tx_t *tx, const char *data, size_t data_length) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); assert(data != NULL); ib_status_t rc; /* Validate. */ if (! ib_flags_all(tx->flags, IB_TX_FREQ_STARTED)) { ib_log_debug_tx(tx, "No request started: Ignoring %s", ib_state_name(request_body_data_state)); return IB_OK; } if (! ib_flags_all(tx->flags, IB_TX_FREQ_HAS_DATA)) { ib_log_debug_tx(tx, "No request data: Ignoring %s", ib_state_name(request_body_data_state)); return IB_OK; } /* We should never get NULL data. */ if ( (data == NULL) || (data_length == 0) ) { ib_log_debug_tx(tx, "Request body data with no data. Ignoring."); return IB_OK; } if (! ib_flags_all(tx->flags, IB_TX_FREQ_LINE)) { if (tx->request_line == NULL) { ib_log_error_tx(tx, "Request has no request line."); return IB_EINVAL; } rc = ib_state_notify_request_started(ib, tx, tx->request_line); if (rc != IB_OK) { return rc; } rc = ib_state_notify_request_header_finished(ib, tx); if (rc != IB_OK) { return rc; } } /* Note that we have request data. */ ib_tx_flags_set(tx, IB_TX_FREQ_HAS_DATA); /* Validate. */ if (!ib_flags_all(tx->flags, IB_TX_FREQ_HEADER)) { ib_log_debug_tx(tx, "Automatically triggering %s", ib_state_name(request_header_finished_state)); ib_state_notify_request_header_finished(ib, tx); } /* On the first call, record the time and mark that there is a body. */ if (tx->t.request_body == 0) { tx->t.request_body = ib_clock_get_time(); ib_tx_flags_set(tx, IB_TX_FREQ_BODY); } /* Notify the engine and any callbacks of the data. */ rc = ib_state_notify_txdata(ib, tx, request_body_data_state, data, data_length); if (rc != IB_OK) { return rc; } return IB_OK; } ib_status_t ib_state_notify_request_finished(ib_engine_t *ib, ib_tx_t *tx) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); ib_status_t rc; /* Validate. */ if (! ib_flags_all(tx->flags, IB_TX_FREQ_STARTED)) { ib_log_debug_tx(tx, "No request started: Ignoring %s", ib_state_name(request_finished_state)); return IB_OK; } if (! ib_flags_any(tx->flags, IB_TX_FREQ_STARTED)) { ib_log_debug_tx(tx, "No request started: Ignoring %s", ib_state_name(request_finished_state)); return IB_OK; } if (ib_flags_all(tx->flags, IB_TX_FREQ_FINISHED)) { ib_log_error_tx(tx, "Attempted to notify previously notified state: %s", ib_state_name(request_finished_state)); return IB_EINVAL; } if (!ib_flags_all(tx->flags, IB_TX_FREQ_HEADER)) { ib_log_debug_tx(tx, "Automatically triggering %s", ib_state_name(request_header_finished_state)); ib_state_notify_request_header_finished(ib, tx); } /* Mark the time. */ tx->t.request_finished = ib_clock_get_time(); /* Notify filters of the end-of-body (EOB) if there was a body. */ if (ib_flags_all(tx->flags, IB_TX_FREQ_BODY) != 0) { rc = ib_fctl_meta_add(tx->fctl, IB_STREAM_EOB); if (rc != IB_OK) { return rc; } } /* Notify filters of the end-of-stream (EOS). */ rc = ib_fctl_meta_add(tx->fctl, IB_STREAM_EOS); if (rc != IB_OK) { return rc; } ib_tx_flags_set(tx, IB_TX_FREQ_FINISHED); rc = ib_state_notify_tx(ib, request_finished_state, tx); if (rc != IB_OK) { return rc; } rc = ib_state_notify_tx(ib, handle_request_state, tx); if (rc != IB_OK) { return rc; } rc = ib_state_notify_tx(ib, tx_process_state, tx); if (rc != IB_OK) { return rc; } return IB_OK; } ib_status_t ib_state_notify_response_started(ib_engine_t *ib, ib_tx_t *tx, ib_parsed_resp_line_t *line) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); ib_status_t rc; /* Validate. */ if (! ib_flags_all(tx->flags, IB_TX_FREQ_STARTED)) { ib_log_debug_tx(tx, "No request started: Ignoring %s", ib_state_name(response_started_state)); return IB_OK; } if (! ib_flags_all(tx->flags, IB_TX_FREQ_HAS_DATA)) { ib_log_debug_tx(tx, "No request data: Ignoring %s", ib_state_name(response_started_state)); return IB_OK; } tx->t.response_started = ib_clock_get_time(); /* Validate. */ if (ib_flags_all(tx->flags, IB_TX_FRES_STARTED)) { ib_log_error_tx(tx, "Attempted to notify previously notified state: %s", ib_state_name(response_started_state)); return IB_EINVAL; } /* If the request was started, but not finished, notify it now */ if ( ib_flags_all(tx->flags, IB_TX_FREQ_STARTED) && !ib_flags_all(tx->flags, IB_TX_FREQ_FINISHED)) { ib_log_debug_tx(tx, "Automatically triggering %s", ib_state_name(request_finished_state)); ib_state_notify_request_finished(ib, tx); } /* Mark the time. */ tx->t.response_started = ib_clock_get_time(); ib_tx_flags_set(tx, IB_TX_FRES_STARTED); /* Notify the world about it */ rc = ib_state_notify_resp_line(ib, tx, response_started_state, line); if (rc != IB_OK) { return rc; } /* Record if we saw a line. */ if ( (line != NULL) && (line->raw != NULL) && (ib_bytestr_const_ptr(line->raw) != NULL) ) { ib_tx_flags_set(tx, IB_TX_FRES_HAS_DATA); ib_tx_flags_set(tx, IB_TX_FRES_LINE); } return IB_OK; } ib_status_t ib_state_notify_response_header_data(ib_engine_t *ib, ib_tx_t *tx, ib_parsed_headers_t *header) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); assert(header != NULL); ib_status_t rc; /* Validate. */ if (! ib_flags_all(tx->flags, IB_TX_FREQ_HAS_DATA)) { ib_log_debug_tx(tx, "No request data: Ignoring %s", ib_state_name(response_header_data_state)); return IB_OK; } /* Mark the time. */ if (tx->t.response_started == 0) { tx->t.response_started = ib_clock_get_time(); } if ( tx->response_header == NULL ) { tx->response_header = header; } else { rc = ib_parsed_headers_append(tx->response_header, header); if (rc != IB_OK) { return rc; } } /* Notify the engine and any callbacks of the data. */ rc = ib_state_notify_header_data( ib, tx, response_header_data_state, header); if (rc != IB_OK) { return rc; } return IB_OK; } ib_status_t ib_state_notify_response_header_finished(ib_engine_t *ib, ib_tx_t *tx) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); ib_status_t rc; /* Check for data first. */ if (!ib_flags_all(tx->flags, IB_TX_FREQ_HAS_DATA)) { ib_log_debug_tx(tx, "No request data: Ignoring %s", ib_state_name(response_header_finished_state)); return IB_OK; } /* Generate the response line state if it hasn't been seen */ if (! ib_flags_all(tx->flags, IB_TX_FRES_STARTED)) { /* For HTTP/0.9 there is no response line, so this is normal, but * for others this is not normal and should be logged. */ if (!ib_flags_all(tx->flags, IB_TX_FHTTP09)) { ib_log_debug_tx(tx, "Automatically triggering %s", ib_state_name(response_started_state)); } rc = ib_state_notify_response_started(ib, tx, NULL); if (rc != IB_OK) { return rc; } } /* Validate. */ if (ib_flags_all(tx->flags, IB_TX_FRES_HEADER)) { ib_log_error_tx(tx, "Attempted to notify previously notified state: %s", ib_state_name(response_header_finished_state)); return IB_EINVAL; } if (!ib_flags_all(tx->flags, IB_TX_FRES_STARTED)) { /* For HTTP/0.9 there are no response headers, so this is normal, but * for others this is not normal and should be logged. */ if (!ib_flags_all(tx->flags, IB_TX_FHTTP09)) { ib_log_debug_tx(tx, "Automatically triggering %s", ib_state_name(response_started_state)); if (tx->response_line == NULL) { ib_log_notice_tx(tx, "Attempted to notify response header finished" " before response started."); return IB_EINVAL; } } rc = ib_state_notify_response_started(ib, tx, tx->response_line); if (rc != IB_OK) { return rc; } } /* Mark the time. */ tx->t.response_header = ib_clock_get_time(); ib_tx_flags_set(tx, IB_TX_FRES_HEADER); rc = ib_state_notify_tx(ib, response_header_finished_state, tx); if (rc != IB_OK) { return rc; } /* Notify the engine and any callbacks of the data. */ rc = ib_state_notify_tx(ib, handle_response_header_state, tx); if (rc != IB_OK) { return rc; } return IB_OK; } ib_status_t ib_state_notify_response_body_data(ib_engine_t *ib, ib_tx_t *tx, const char *data, size_t data_length) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); assert(data != NULL); ib_status_t rc; /* Check for data first. */ if (! ib_flags_all(tx->flags, IB_TX_FREQ_HAS_DATA)) { ib_log_debug_tx(tx, "No request data: Ignoring %s", ib_state_name(response_body_data_state)); return IB_OK; } /* We should never get empty data */ if ( (data == NULL) || (data_length == 0) ) { ib_log_debug_tx(tx, "Response body data with no data. Ignoring."); return IB_OK; } /* Validate the header has already been seen. */ if (! ib_flags_all(tx->flags, IB_TX_FRES_HEADER)) { /* For HTTP/0.9 there are no response headers, so this is normal, but * for others this is not normal and should be logged. */ if (!ib_flags_all(tx->flags, IB_TX_FHTTP09)) { ib_log_debug_tx(tx, "Automatically triggering %s", ib_state_name(response_header_finished_state)); if (tx->response_line == NULL) { ib_log_notice_tx(tx, "Attempted to notify response body data" " before response started."); return IB_EINVAL; } } rc = ib_state_notify_response_header_finished(ib, tx); if (rc != IB_OK) { return rc; } } /* On the first call, record the time and mark that there is a body. */ if (tx->t.response_body == 0) { tx->t.response_body = ib_clock_get_time(); ib_tx_flags_set(tx, IB_TX_FRES_HAS_DATA); ib_tx_flags_set(tx, IB_TX_FRES_BODY); } /* Notify the engine and any callbacks of the data. */ rc = ib_state_notify_txdata(ib, tx, response_body_data_state, data, data_length); if (rc != IB_OK) { return rc; } return IB_OK; } ib_status_t ib_state_notify_response_finished(ib_engine_t *ib, ib_tx_t *tx) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); ib_status_t rc; /* Check for response started first. */ if (! ib_flags_all(tx->flags, IB_TX_FREQ_HAS_DATA)) { ib_log_debug_tx(tx, "No request data: Ignoring %s", ib_state_name(response_finished_state)); return IB_OK; } if (!ib_flags_any(tx->flags, IB_TX_FRES_STARTED)) { ib_log_debug_tx(tx, "No response started: Ignoring %s", ib_state_name(response_finished_state)); return IB_OK; } if (ib_flags_all(tx->flags, IB_TX_FRES_FINISHED)) { ib_log_error_tx(tx, "Attempted to notify previously notified state: %s", ib_state_name(response_finished_state)); return IB_EINVAL; } if (!ib_flags_all(tx->flags, IB_TX_FRES_HEADER)) { ib_log_debug_tx(tx, "Automatically triggering %s", ib_state_name(response_header_finished_state)); ib_state_notify_response_header_finished(ib, tx); } /* Mark the time. */ tx->t.response_finished = ib_clock_get_time(); ib_tx_flags_set(tx, IB_TX_FRES_FINISHED); rc = ib_state_notify_tx(ib, response_finished_state, tx); if (rc != IB_OK) { return rc; } rc = ib_state_notify_tx(ib, handle_response_state, tx); if (rc != IB_OK) { return rc; } if (! ib_flags_all(tx->flags, IB_TX_FPOSTPROCESS)) { rc = ib_state_notify_postprocess(ib, tx); if (rc != IB_OK) { return rc; } } if (! ib_flags_all(tx->flags, IB_TX_FLOGGING)) { rc = ib_state_notify_logging(ib, tx); if (rc != IB_OK) { return rc; } } /* Mark the time. */ tx->t.finished = ib_clock_get_time(); rc = ib_state_notify_tx(ib, tx_finished_state, tx); if (rc != IB_OK) { return rc; } return IB_OK; } ib_status_t ib_state_notify_postprocess(ib_engine_t *ib, ib_tx_t *tx) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); ib_status_t rc; if (ib_flags_all(tx->flags, IB_TX_FPOSTPROCESS)) { ib_log_error_tx(tx, "Attempted to notify previously notified state: %s", ib_state_name(handle_postprocess_state)); return IB_EINVAL; } /* Mark time. */ tx->t.postprocess = ib_clock_get_time(); ib_tx_flags_set(tx, IB_TX_FPOSTPROCESS); rc = ib_state_notify_tx(ib, handle_postprocess_state, tx); if (rc != IB_OK) { return rc; } return IB_OK; } ib_status_t ib_state_notify_logging(ib_engine_t *ib, ib_tx_t *tx) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); ib_status_t rc; if (ib_flags_all(tx->flags, IB_TX_FLOGGING)) { ib_log_error_tx(tx, "Attempted to notify previously notified state: %s", ib_state_name(handle_logging_state)); return IB_EINVAL; } ib_tx_flags_set(tx, IB_TX_FLOGGING); rc = ib_state_notify_tx(ib, handle_logging_state, tx); if (rc != IB_OK) { return rc; } return IB_OK; } ib_status_t ib_state_notify_logevent(ib_engine_t *ib, ib_tx_t *tx) { assert(ib != NULL); assert(ib->cfg_state == CFG_FINISHED); assert(tx != NULL); ib_status_t rc; rc = ib_state_notify_tx(ib, handle_logevent_state, tx); if (rc != IB_OK) { return rc; } return IB_OK; } ib_status_t ib_state_notify_context_open(ib_engine_t *ib, ib_context_t *ctx) { assert(ib != NULL); assert(ctx != NULL); ib_status_t rc; rc = ib_state_notify_context(ib, ctx, context_open_state); if (rc != IB_OK) { return rc; } return IB_OK; } ib_status_t ib_state_notify_context_close(ib_engine_t *ib, ib_context_t *ctx) { assert(ib != NULL); assert(ctx != NULL); ib_status_t rc; rc = ib_state_notify_context(ib, ctx, context_close_state); if (rc != IB_OK) { return rc; } return IB_OK; } ib_status_t ib_state_notify_context_destroy(ib_engine_t *ib, ib_context_t *ctx) { assert(ib != NULL); assert(ctx != NULL); ib_status_t rc; rc = ib_state_notify_context(ib, ctx, context_destroy_state); if (rc != IB_OK) { return rc; } return IB_OK; } ib_status_t ib_state_notify_engine_shutdown_initiated(ib_engine_t *ib) { assert(ib != NULL); ib_status_t rc; ib_log_info(ib, "IronBee engine shutdown requested."); rc = ib_state_notify_null(ib, engine_shutdown_initiated_state); if (rc != IB_OK) { return rc; } return IB_OK; }