text stringlengths 1 1.05M |
|---|
<filename>src/components/toolbarGroup/analysis/sectionAnalysis.ts
/*
* @Date: 2021-11-05 20:44:03
* @LastEditors: huangzh873
* @LastEditTime: 2022-03-28 19:48:46
* @FilePath: /cesium-web-vue/src/components/toolbarGroup/analysis/sectionAnalysis.ts
*/
import * as echarts from 'echarts/core';
import {
GridComponent,
ToolboxComponent,
TooltipComponent,
DatasetComponent,
} from 'echarts/components';
import { LineChart } from 'echarts/charts';
import { UniversalTransition } from 'echarts/features';
import { CanvasRenderer } from 'echarts/renderers';
import { DrawPolyline, polylineOptions } from '@/utils/vue-utils/draw/drawUtils';
import { CESIUM_3D_TILE, TERRAIN } from '@/constant/index';
import { Math as CMath, HeightReference, Viewer, Cartesian3, sampleTerrainMostDetailed, Cartographic, createWorldTerrain, EllipsoidGeodesic, Entity, VerticalOrigin, NearFarScalar, ConstantPositionProperty } from 'cesium';
echarts.use([GridComponent, LineChart, CanvasRenderer, UniversalTransition, ToolboxComponent, TooltipComponent, DatasetComponent]);
class sectionAnalysis {
viewer: Viewer;
DrawPolylineIns: DrawPolyline;
firstPoint: Cartesian3 = new Cartesian3();
start: Cartesian3 = new Cartesian3();
end: Cartesian3 = new Cartesian3();
echartDataGroup: {
distance: number,
height: number,
lng: number,
lat: number
}[] = []
echartOptions: any = {}
echartIns: echarts.ECharts | undefined = undefined
tipGraphic: Entity | undefined = undefined
drawType = TERRAIN
constructor(viewer: Viewer, options?: polylineOptions) {
this.viewer = viewer;
this.DrawPolylineIns = new DrawPolyline(viewer, options);
options && this.initOptions(options)
this.echartOptions = {
tooltip: {
trigger: 'axis',
textStyle: {
align: 'left',
},
position: function (pt) {
return [pt[0], '10%'];
},
formatter: (params) => {
let inhtml = "";
if (params.length === 0) {
// hideTipMarker();
return inhtml;
}
const height = params[0].value.height; // 海拔高度
const lng = params[0].value.lng; // 所在经纬度
const lat = params[0].value.lat; // 所在经纬度
const len = params[0].value.distance; // 距起点
// const hbgdStr = haoutil.str.formatLength(Number(params[0].value));
inhtml = `当前位置<br />
距起点:${len.toFixed(2)}米<br />
海拔:<span style='color:${params[0].color};'>${height.toFixed(2)}米</span><br />
经度:${lng.toFixed(2)}<br />
纬度:${lat.toFixed(2)}`;
this.showTipMarker(height, lng, lat, inhtml);
return inhtml;
},
},
dataset: {
source: this.echartDataGroup,
},
xAxis: {
type: 'category',
boundaryGap: false,
axisLabel: {
color: "#FFF",
// formatter: '{value}米'
formatter: (value) => {
const value2 = Math.round(value*100)
return value2/100 + '米'
}
},
axisLine: {
lineStyle: {
color: "#ccc"
}
},
splitLine: {
show: true,
lineStyle: {
color: "#ccc"
}
}
},
yAxis: {
axisLabel: {
color: "#FFF",
formatter: "{value} 米"
},
},
grid: {
width: 'auto',
height: 'auto',
left: '3%',
right: '3%',
top: '20%',
bottom: '0%',
containLabel: true,
show: true
},
series: [
{
type: 'line',
showSymbol: false,
lineStyle: {
color: '#ff4683'
},
areaStyle: {
opacity: 0.8,
color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [
{
offset: 0,
color: 'rgb(255, 158, 68)'
},
{
offset: 1,
color: 'rgb(255, 70, 131)'
}
])
}
}
]
};
}
initOptions(options: polylineOptions) {
Object.keys(options).forEach(item => {
this[item] = options[item];
})
}
/**
* @description: 鼠标触发tooltip和地图的交互,在地图上增加marker
* @param {*} point
* @param {*} z
* @param {*} inthtml
* @return { void }
*/
showTipMarker(height, lng, lat, inhtml): void {
// 标记Marker的地点
const drawPosition = Cartesian3.fromDegrees(lng, lat, height);
if (!this.tipGraphic) {
this.tipGraphic = this.viewer.entities.add({
name: "当前点",
position: drawPosition,
billboard: {
image: 'image/map-marker.png',
scale: 0.2,
heightReference: this.drawType === TERRAIN ? HeightReference.CLAMP_TO_GROUND : HeightReference.NONE,
verticalOrigin: VerticalOrigin.BOTTOM,
scaleByDistance: new NearFarScalar(10000, 1.0, 500000, 0.2)
}
})
// this.tipGraphic._setPositionsToCallback();
}
if(drawPosition) {
this.tipGraphic.position = new ConstantPositionProperty(drawPosition);
}
// this.tipGraphic.bindPopup(inthtml).openPopup();
}
/**
* @description: 借助DrawPolyline的方法来在地形上绘制剖面
* @param {*}
* @return {*}
*/
drawSectionsOnMap() {
let sampleIndex = 0;
this.DrawPolylineIns.startCreate(
positions => {
if (sampleIndex) {
this.start = positions[sampleIndex - 1];
this.end = positions[sampleIndex];
this.getSampledData()
} else {
// 每次开始新的绘制时,要把数据清空
this.echartDataGroup = [];
this.firstPoint = positions[sampleIndex];
}
sampleIndex++;
},
() => {
// 下次点击还可以画线但是需要把index重置为0
sampleIndex = 0;
// 创建echart的div
if(!this.echartIns) {
this.echartIns = this.createEchartContainer();
}
}
);
}
/**
* @description: 两点之间采样100个点,在每次右键点击时掉用该方法,异步获取和更新数据,避免效率过低
* @param {*}
* @return {*}
*/
getSampledData() {
const sampledPositions = [Cartographic.fromCartesian(this.start)];
// 用于配置在3dtiles上采样
const sampledPositions_3dtiles = [this.start]
const COUNT = 100;
for (let i = 1; i < COUNT; i++) {
const cart = Cartesian3.lerp(this.start, this.end, i / COUNT, new Cartesian3());
sampledPositions.push(Cartographic.fromCartesian(cart));
sampledPositions_3dtiles.push(cart);
}
sampledPositions.push(Cartographic.fromCartesian(this.end));
sampledPositions_3dtiles.push(this.end);
/* 这里要改,代码太臃肿了 */
if(this.drawType === CESIUM_3D_TILE) {
this.viewer.scene.clampToHeightMostDetailed(sampledPositions_3dtiles).then((res) => {
const dataGroup = res.map(_item => {
const item = Cartographic.fromCartesian(_item)
const geodesic = new EllipsoidGeodesic();
geodesic.setEndPoints(Cartographic.fromCartesian(this.firstPoint), item);
const distance = geodesic.surfaceDistance;
return {
distance: distance,
height: item.height,
lng: CMath.toDegrees(item.longitude),
lat: CMath.toDegrees(item.latitude)
}
})
this.echartDataGroup = this.echartDataGroup.concat(dataGroup)
// 异步获取和更新数据
if (this.echartOptions.dataset) {
this.echartOptions.dataset.source = this.echartDataGroup
}
this.echartIns && this.echartIns.setOption(this.echartOptions)
})
} else {
sampleTerrainMostDetailed(createWorldTerrain(), sampledPositions).then((res) => {
const dataGroup = res.map(item => {
const geodesic = new EllipsoidGeodesic();
geodesic.setEndPoints(Cartographic.fromCartesian(this.firstPoint), item);
const distance = geodesic.surfaceDistance;
return {
distance: distance,
height: item.height,
lng: CMath.toDegrees(item.longitude),
lat: CMath.toDegrees(item.latitude)
}
})
this.echartDataGroup = this.echartDataGroup.concat(dataGroup)
// 异步获取和更新数据
if (this.echartOptions.dataset) {
this.echartOptions.dataset.source = this.echartDataGroup
}
this.echartIns && this.echartIns.setOption(this.echartOptions)
})
}
}
/**
* @description: 创建echart的div并挂载至最外层的index.vue文件下,div样式也是写在该文件中
* @param {*}
* @return {echarts.ECharts} myChart
*/
createEchartContainer(): echarts.ECharts {
const chart_panel = document.createElement('div');
const chart_container = document.createElement('div');
const indexDom = document.getElementById('mapContainer');
if (!indexDom) {
throw new Error("echart初始化失败")
}
chart_panel.setAttribute('class', 'chart_panel')
chart_container.setAttribute('class', 'chart_container')
indexDom.appendChild(chart_panel);
chart_panel.appendChild(chart_container)
const myChart = echarts.init(chart_container);
myChart.setOption(this.echartOptions)
return myChart
}
stopDrawing() {
const echartDiv = document.getElementsByClassName('chart_panel')[0];
if(echartDiv) {
echartDiv.parentNode?.removeChild(echartDiv);
}
if(this.tipGraphic) {
this.viewer.entities.remove(this.tipGraphic);
}
this.DrawPolylineIns.polylineGroup.forEach(item => {
this.viewer.entities.remove(item);
})
this.DrawPolylineIns.stopDrawing();
}
}
export { sectionAnalysis } |
#!/bin/sh
set -e
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
install_resource()
{
case $1 in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.framework)
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync -av ${PODS_ROOT}/$1 ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -av "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1"`.mom\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd"
;;
*.xcassets)
;;
/*)
echo "$1"
echo "$1" >> "$RESOURCES_TO_COPY"
;;
*)
echo "${PODS_ROOT}/$1"
echo "${PODS_ROOT}/$1" >> "$RESOURCES_TO_COPY"
;;
esac
}
install_resource "SVProgressHUD/SVProgressHUD/SVProgressHUD.bundle"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]]; then
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ `find . -name '*.xcassets' | wc -l` -ne 0 ]
then
case "${TARGETED_DEVICE_FAMILY}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
find "${PWD}" -name "*.xcassets" -print0 | xargs -0 actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${IPHONEOS_DEPLOYMENT_TARGET}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
|
const router = require('express').Router();
const about = require('../controllers/aboutController');
router.get('/', about);
module.exports = router;
|
package com.beleske.borisavz;
import android.annotation.SuppressLint;
import android.content.Intent;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
public class Menu extends AppCompatActivity {
@SuppressLint("StaticFieldLeak")
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_menu);
Button newNoteButton = findViewById(R.id.newNote);
Button allNotesButton = findViewById(R.id.allNotes);
Button singleNoteButton = findViewById(R.id.singleNote);
Button aboutButton = findViewById(R.id.about);
final Intent addNoteIntent = new Intent(this, AddNote.class);
final Intent allNotesIntent = new Intent(this, AllNotes.class);
final Intent singleNoteIntent = new Intent(this, SingleNote.class);
final Intent aboutIntent = new Intent(this, About.class);
newNoteButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
startActivity(addNoteIntent);
}
});
allNotesButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
startActivity(allNotesIntent);
}
});
singleNoteButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
startActivity(singleNoteIntent);
}
});
aboutButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
startActivity(aboutIntent);
}
});
}
}
|
#!/bin/bash
edge_path='./input/txn_data/user_edge.csv'
field_path='./input/txn_data/user_field.npy'
target_path='./input/txn_data/user_age.csv'
code_path='./main.py'
gpus=0
gnn_units='none'
gnn_hops=11
graph_layer='sgc'
graph_refining='none'
grn_units='none'
bi_interaction='none'
nfm_units='none'
aggr_style='none'
# sh sh/txn_age/SGC.sh
learning_rate=0.1
weight_decay=0.0
dropout=0.5
printf "\n#### learning_rate=$learning_rate, weight_decay=$weight_decay, dropout=$dropout ####\n"
CUDA_VISIBLE_DEVICES=$gpus python $code_path --seed 42 --epochs 9999 --weight-balanced True \
--learning-rate $learning_rate --weight-decay $weight_decay --dropout $dropout \
--graph-refining $graph_refining --aggr-pooling mean --grn-units $grn_units \
--bi-interaction $bi_interaction --nfm-units $nfm_units \
--graph-layer $graph_layer --gnn-hops $gnn_hops --gnn-units $gnn_units \
--aggr-style $aggr_style \
--edge-path $edge_path --field-path $field_path --target-path $target_path
|
#!/bin/bash
# Add this at the beginning of the script to assure you run it with sudo
if [[ $UID != 0 ]]; then
echo "Please run this script with sudo:"
echo "sudo $0 $*"
exit 1
fi
echo "Disabling offload in LXC containers of deployment"
while read ctname; do
echo "Analyzing container $ctname..."
# Get list of all network interfaces
CT_NICS="$(lxc-attach -n $ctname -- tail -n+3 /proc/net/dev | cut --delimiter=: -f 1 | awk '{ print $1}')"
for nic in $CT_NICS; do
echo " > Disabling offload $nic@$ctname"
lxc-attach -n $ctname -- /sbin/disableOffload $nic 2> /dev/null
done
done <CONTAINERS
|
<filename>lib/commonjs/types/CompaniesAttributes.js<gh_stars>0
"use strict";
//# sourceMappingURL=CompaniesAttributes.js.map |
#!/bin/bash
HOSTNAME=$NAME.int.$DOMAIN
HOSTNAME_PUBLIC=$NAME.$DOMAIN
# Ensure hosts are not in FreeIPA yet
ipa host-del $HOSTNAME || true
ipa host-del $HOSTNAME_PUBLIC || true
ipa host-add --password "$FREEIPA_OTP" $HOSTNAME --force
ipa host-add $HOSTNAME_PUBLIC --force
ipa host-add-managedby $HOSTNAME_PUBLIC --hosts $HOSTNAME
ipa hostgroup-add-member $IPA_HOSTGROUP --hosts=$HOSTNAME
ipa service-add puppet/$HOSTNAME_PUBLIC --force
ipa service-add-host puppet/$HOSTNAME_PUBLIC --hosts $HOSTNAME
|
<reponame>LiamHayes1/rest-cucumber
package rest;
import static rest.RestCucumberFeatureLoader.load;
import gherkin.formatter.Formatter;
import gherkin.formatter.Reporter;
import java.util.List;
import cucumber.runtime.RuntimeOptions;
import cucumber.runtime.io.ResourceLoader;
import cucumber.runtime.model.CucumberFeature;
public class RestRuntimeOptions {
private RuntimeOptions runtimeOptions;
public RestRuntimeOptions(RuntimeOptions runtimeOptions) {
this.runtimeOptions = runtimeOptions;
}
public List<CucumberFeature> cucumberFeatures(ResourceLoader resourceLoader) {
if (resourceLoader instanceof RestMultiLoader) {
return load(resourceLoader, runtimeOptions.getFeaturePaths(),
runtimeOptions.getFilters(), System.out);
}
return CucumberFeature.load(resourceLoader, runtimeOptions.getFeaturePaths(),
runtimeOptions.getFilters());
}
public Reporter reporter(ClassLoader classLoader) {
return runtimeOptions.reporter(classLoader);
}
public Formatter formatter(ClassLoader classLoader) {
return runtimeOptions.formatter(classLoader);
}
public boolean isStrict() {
return runtimeOptions.isStrict();
}
public RuntimeOptions getRuntimeOptions() {
return runtimeOptions;
}
}
|
class Base1:
def __init__(self, *args):
print("Base1.__init__", args)
class Clist1(Base1, list):
pass
a = Clist1()
print(len(a))
# Not compliant - list assignment should happen in list.__init__, which is not called
# because there's Base1.__init__, but we assign in list.__new__
#a = Clist1([1, 2, 3])
#print(len(a))
print("---")
class Clist2(list, Base1):
pass
# Not compliant - should call list.__init__, but we don't have it
#a = Clist2()
#print(len(a))
# Not compliant - should call list.__init__, but we don't have it
#a = Clist2([1, 2, 3])
#print(len(a))
|
import tensorflow as tf
import numpy as np
model = tf.keras.models.Sequential([
tf.keras.layers.Input(shape=[10]),
tf.keras.layers.Dense(5, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
data = np.array([
["shirt", 0, 0, 0, 0, 1],
["pants", 0, 0, 1, 0, 0],
["jacket", 0, 1, 0, 0, 0],
["shoes", 1, 0, 0, 0, 0],
["accessories", 0, 0, 0, 1, 0]
])
labels = np.array([0, 1, 2, 3, 4])
model.fit(data, labels, epochs=10, verbose=2) |
#!/bin/sh
set -e
[ "$TRACE" ] && set -x
echo "Creating release for release branch"
revision=$(git rev-parse HEAD)
# build and upload assets
webpack
cd .release
cp ../build/index.js .
git add .
git commit -a -m "release $revision"
git push origin release
|
#!/usr/bin/env python
from __future__ import unicode_literals
import os
from nose.tools import assert_equal, assert_in, assert_almost_equal
import pyne.ace
def setup():
try:
import urllib.request as urllib
except ImportError:
import urllib
if not os.path.isfile('C012-n.ace'):
urllib.urlretrieve('ftp://ftp.nrg.eu/pub/www/talys/tendl2013/neutron_file/C/012/lib/endf/C012-n.ace',
'C012-n.ace')
with open('C012-n.ace') as f:
lines = f.readlines()
lines.insert(0,'The next lines contain the old-style header\n')
lines.insert(0,'11.896900 2.5263E-08 12/06/13 3\n' )
lines.insert(0,'2.0.0' +5*' '+' 6000.000nc' + 13*' ' + 'TENDL\n')
with open('C012-n-2p0.ace','w') as f:
f.writelines(lines)
def test_convert_c12():
pyne.ace.ascii_to_binary('C012-n.ace', 'C12-binary.ace')
def test_read_c12_ascii():
c12 = pyne.ace.Library('C012-n.ace')
c12.read()
assert_in('6000.00c', c12.tables)
table = c12.tables['6000.00c']
assert_equal(table.nxs[1], 38937)
assert_equal(table.nxs[2], 6000)
assert_equal(table.nxs[3], 1513)
assert_equal(table.jxs[1], 1)
assert_in(2, table.reactions)
assert_in(107, table.reactions)
assert_in(204, table.reactions)
assert_in(444, table.reactions)
assert_almost_equal(table.energy[0], 1.0e-11)
assert_equal(table.reactions[2].sigma[0], 78.04874)
assert_equal(table.reactions[2].sigma[-1], 1.00772)
def test_read_c12_2p0_ascii():
c12 = pyne.ace.Library('C012-n-2p0.ace')
c12.read()
assert_in('6000.000nc', c12.tables)
table = c12.tables['6000.000nc']
assert_equal(table.nxs[1], 38937)
assert_equal(table.nxs[2], 6000)
assert_equal(table.nxs[3], 1513)
assert_equal(table.jxs[1], 1)
assert_in(2, table.reactions)
assert_in(107, table.reactions)
assert_in(204, table.reactions)
assert_in(444, table.reactions)
assert_almost_equal(table.energy[0], 1.0e-11)
assert_equal(table.reactions[2].sigma[0], 78.04874)
assert_equal(table.reactions[2].sigma[-1], 1.00772)
def test_read_c12_binary():
c12 = pyne.ace.Library('C12-binary.ace')
c12.read()
assert_in('6000.00c', c12.tables)
table = c12.tables['6000.00c']
assert_equal(table.nxs[1], 38937)
assert_equal(table.nxs[2], 6000)
assert_equal(table.nxs[3], 1513)
assert_equal(table.jxs[1], 1)
assert_in(2, table.reactions)
assert_in(107, table.reactions)
assert_in(204, table.reactions)
assert_in(444, table.reactions)
assert_almost_equal(table.energy[0], 1.0e-11)
assert_equal(table.reactions[2].sigma[0], 78.04874)
assert_equal(table.reactions[2].sigma[-1], 1.00772)
def teardown():
if os.path.exists('C12-binary.ace'):
os.remove('C12-binary.ace')
|
docker-compose -p web-react-rest down
|
def match_strings(str1, str2):
matches = 0
for char in str1:
if char in str2:
matches += 1
return matches |
def multiplication_table(n):
for i in range(1,n + 1):
for j in range(1, n + 1):
print(i * j, end = "\t")
print() |
<gh_stars>0
import { Injectable } from '@angular/core';
import { collection, doc, docData, Firestore, runTransaction, Timestamp } from '@angular/fire/firestore';
import { Observable } from 'rxjs';
import { environment } from '../../environments/environment';
import { Leaderboard, LeaderboardStat, Score } from './score.model';
@Injectable({
providedIn: 'root'
})
export class ScoreService {
#leaderboardRef = doc(this.firestore, 'scores/leaderboard');
leaderboard$ = docData(this.#leaderboardRef) as Observable<Leaderboard>;
readonly scoreList = {
easy: 1,
medium: 2,
hard: 3
};
constructor(
private readonly firestore: Firestore,
) { }
/**
* Save the Score for the User.
* Then aggregate the result in the special `leaderboard` document
* for optimizing (both performance and cost) "Leaderboard" page rendering.
*/
async saveScore(score: number, userId: string, userDisplayName: string) {
const createdAt = Timestamp.now();
const scoresCollection = collection(this.firestore, 'scores');
const newScoreDoc: Score = {
createdAt,
score,
userId,
}
return runTransaction(this.firestore, async t => {
const leaderboard = await t.get(this.#leaderboardRef);
const candidateStatDoc: LeaderboardStat = {
score,
createdAt,
userId,
userDisplayName,
};
const stats: LeaderboardStat[] = leaderboard.exists() ? leaderboard.get('stats') : [];
let i = 0, stat: LeaderboardStat;
// eslint-disable-next-line no-cond-assign
for (; stat = stats[i]; i++) {
if (stat.score < candidateStatDoc.score) {
stats.splice(i, 0, candidateStatDoc);
break;
}
}
if (i === stats.length) {
stats.push(candidateStatDoc);
}
if (stats.length > environment.feature.score.leaderboardStatMaxQuantity) {
stats.pop();
}
t.set(doc(scoresCollection), newScoreDoc);
return t.set(this.#leaderboardRef, { stats });
})
}
}
|
int[] squareArray(int[] input) {
int[] result = new int[input.length];
for (int i = 0; i < input.length; i++) {
result[i] = input[i] * input[i];
}
return result;
} |
<reponame>ministryofjustice/mtp-api
from mtp_common.spooling import spoolable
from notification.rules import ENABLED_RULE_CODES, RULES
@spoolable(body_params=['records'])
def create_notification_events(records):
for record in records:
for code in ENABLED_RULE_CODES:
rule = RULES[code]
if rule.applies_to(record) and rule.triggered(record):
rule.create_events(record)
|
<reponame>edgggeTRON/cardano-explorer-app<gh_stars>1-10
import { useI18nFeature } from '../../i18n/context';
import styles from './UnmoderatedDataConsented.module.scss';
export default () => {
const { translate } = useI18nFeature().store;
return (
<div className={styles.unmoderatedDataConsentedContainer}>
{translate('stakePools.unmoderated')}
</div>
);
};
|
<reponame>saltedfishclub/PolarCore<filename>src/main/java/cc/sfclub/plugin/PluginManager.java
package cc.sfclub.plugin;
import cc.sfclub.core.I18N;
import cc.sfclub.plugin.exception.DependencyMissingException;
import cc.sfclub.plugin.exception.InvalidPluginException;
import cc.sfclub.plugin.java.JavaPluginLoader;
import lombok.SneakyThrows;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.*;
public class PluginManager {
private final Logger logger = LoggerFactory.getLogger(this.getClass());
private final List<PluginLoader> pluginLoaders = new LinkedList<>();
private final Set<String> failedToLoads = new HashSet<>();
private final Map<String, Plugin> pluginMap = new HashMap<>();
private final String rootPath;
@SneakyThrows
public PluginManager(String rootPath) {
this.rootPath = rootPath;
pluginLoaders.add(new JavaPluginLoader(new File(rootPath).toPath(), this));
}
public void loadPlugins() {
File pluginsDir = new File(rootPath);
Map<String, PluginDescription> preloadingPlugins = new HashMap<>();
//Pre-load descriptions
for (File file : pluginsDir.listFiles()) {
PluginLoader loader = fileToLoader(file);
if (loader == null) {
continue;
}
PluginDescription desc = loader.getDescriptionOf(file);
if (desc == null) {
continue;
}
preloadingPlugins.put(desc.getName(), desc);
logger.info(I18N.get().server.PLUGIN_PRELOADING, desc.getName(), desc.getVersion());
}
//End
//Check dependencies
Set<String> errorPlugins = checkDependencies(preloadingPlugins);
errorPlugins.forEach(preloadingPlugins::remove);
preloadingPlugins.forEach(this::loadPluginWithDependency);
if (failedToLoads.size() != 0) {
logger.error(I18N.get().exceptions.PLUGIN_FAILED_TO_LOAD, Arrays.toString(failedToLoads.toArray()));
}
}
public Set<String> checkDependencies(Map<String, PluginDescription> preloadingPlugins) {
Set<String> errorPlugins = new HashSet<>();
Iterator<PluginDescription> iter = preloadingPlugins.values().iterator();
while (iter.hasNext()) {
PluginDescription desc = iter.next();
if (desc.getDependencies() == null) {
continue;
}
for (String dependency : desc.getDependencies()) {
if (!preloadingPlugins.containsKey(dependency)) {
logger.warn(I18N.get().exceptions.PLUGIN_DEPENDENCY_MISSING, dependency, desc.getName());
//preloadingPlugins.remove(desc.getName());
errorPlugins.add(desc.getName());
break;
}
}
}
return errorPlugins;
}
private PluginLoader fileToLoader(File file) {
for (PluginLoader pluginLoader : pluginLoaders) {
if (file.getName().matches(pluginLoader.getFilePattern())) {
return pluginLoader;
}
}
return null;
}
private boolean loadPluginWithDependency(String name, PluginDescription plugin) {
return loadPluginWithDependency(name, plugin, new Stack<>());
}
public Plugin loadPlugin(File file) throws InvalidPluginException, DependencyMissingException {
PluginLoader loader = fileToLoader(file);
if (loader == null) {
return null;
}
return loader.loadPlugin(file);
}
public Collection<Plugin> getPlugins() {
return pluginMap.values();
}
/**
* 插件是否加载
*
* @param name
* @return
*/
public boolean isPluginLoaded(String name) {
return pluginMap.get(name) != null && pluginMap.get(name).isLoaded();
}
public Plugin getPlugin(String name) {
return pluginMap.get(name);
}
@SneakyThrows
private boolean loadPluginWithDependency(String name, PluginDescription plugin, Stack<String> loadStack) {
if (failedToLoads.contains(name)) {
return false;
}
if (loadStack.search(name) != -1) {
failedToLoads.add(name);
logger.error(I18N.get().exceptions.PLUGIN_DEPEND_LOOP, name, loadStack.pop());
logger.error("Plugin Dependency Stack: {}", Arrays.toString(loadStack.toArray()));
return false;
}
loadStack.push(name);
if (isPluginLoaded(name)) {
return false;
}
if (plugin.getDependencies() != null) {
for (String dep : plugin.getDependencies()) {
if (!isPluginLoaded(dep)) {
if (!loadPluginWithDependency(dep, plugin, loadStack)) {
failedToLoads.add(dep);
return false;
}
}
}
}
if (plugin.getSoftDependencies() != null) {
for (String dep : plugin.getSoftDependencies()) {
if (!isPluginLoaded(dep)) {
if (!loadPluginWithDependency(dep, plugin, loadStack)) {
failedToLoads.add(dep);
return true;
}
}
}
}
try {
Plugin p = loadPlugin(plugin.getPluginFile());
if (p == null) {
return false;
}
p.onEnable();
pluginMap.put(plugin.getName(), p);
return true;
} catch (DependencyMissingException e) {
logger.error("Unknown Error!", e);
return false;
}
}
public boolean unloadPlugin(String name) {
if (!isPluginLoaded(name)) {
return false;
}
Plugin plugin = getPlugin(name);
plugin.setLoaded(false);
plugin.onDisable();
if (plugin.getConfig() != null) plugin.getConfig().saveConfig();
pluginMap.put(name, null);
return true;
}
public void unloadPlugins() {
pluginMap.keySet().forEach(this::unloadPlugin);
pluginMap.clear();
}
}
|
import json
import web
urls = (
'/accounts', 'Accounts'
)
app = web.application(urls, globals())
class Accounts:
def GET(self):
# Retrieve list of all accounts
# Return response as application/json
...
def POST(self):
# Create a new account
# Return response as application/json
...
def PUT(self):
# Update an existing account
# Return response as application/json
...
def DELETE(self):
# Delete an existing account
# Return response as application/json
...
if __name__ == '__main__':
app.run() |
import { UIPersistenceDirective } from './ui-persistence.directive';
import { TestBed } from '@angular/core/testing';
import { UIPersistenceService } from '../services/ui-persistence.service';
describe('UIPersistenceDirective', () => {
var persistenceService = null;
beforeEach(() => { TestBed.configureTestingModule({}); persistenceService = TestBed.get(UIPersistenceService) });
it('should create an instance', () => {
const directive = new UIPersistenceDirective(persistenceService);
expect(directive).toBeTruthy();
});
});
|
module.exports = (function(app) {
const request = require('request');
let q = require('q');
// local cache
let currency = [];
let lastUpdated = null;
api = {
getAll: getAll,
getById: getById
}
return api;
function getAll() {
const def = q.defer();
if (currency.length == 0 || (lastUpdated - new Date()) > 60000) {
request('https://api.coinmarketcap.com/v1/ticker/', {
json: true
}, (err, res, body) => {
if (err) {
def.reject(currency);
}
currency = body.map((i) => {
return {
id: i.id,
name: i.name,
symbol: i.symbol,
rank: i.rank,
value: i.price_usd
};
});
lastUpdated = new Date();
console.log("Currency updated");
def.resolve(currency);
});
} else {
def.resolve(currency);
}
return def.promise;
}
function getById(id) {
const def = q.defer();
getAll().then((currency) => {
let thisCurrency = currency.find((i) => {
return i.id == id;
});
def.resolve(thisCurrency);
}, (err) => {
def.reject(err);
})
return def.promise;
}
})(); |
#!/bin/bash
# Author: yeho <lj2007331 AT gmail.com>
# BLOG: https://linuxeye.com
#
# Notes: OneinStack for CentOS/RedHat 7+ Debian 8+ and Ubuntu 16+
#
# Project home page:
# https://oneinstack.com
# https://github.com/oneinstack/oneinstack
Install_PHP53() {
pushd ${oneinstack_dir}/src > /dev/null
if [ -e "${apache_install_dir}/bin/httpd" ];then
[ "$(${apache_install_dir}/bin/httpd -v | awk -F'.' /version/'{print $2}')" == '4' ] && Apache_main_ver=24
[ "$(${apache_install_dir}/bin/httpd -v | awk -F'.' /version/'{print $2}')" == '2' ] && Apache_main_ver=22
fi
if [ ! -e "/usr/local/lib/libiconv.la" ]; then
tar xzf libiconv-${libiconv_ver}.tar.gz
pushd libiconv-${libiconv_ver} > /dev/null
./configure
make -j ${THREAD} && make install
popd > /dev/null
rm -rf libiconv-${libiconv_ver}
fi
if [ ! -e "${curl_install_dir}/lib/libcurl.la" ]; then
tar xzf curl-${curl_ver}.tar.gz
pushd curl-${curl_ver} > /dev/null
[ "${Debian_ver}" == '8' ] && apt-get -y remove zlib1g-dev
./configure --prefix=${curl_install_dir} --with-ssl=${openssl_install_dir}
make -j ${THREAD} && make install
[ "${Debian_ver}" == '8' ] && apt-get -y install libc-client2007e-dev libglib2.0-dev libpng12-dev libssl-dev libzip-dev zlib1g-dev
popd > /dev/null
rm -rf curl-${curl_ver}
fi
if [ ! -e "${freetype_install_dir}/lib/libfreetype.la" ]; then
tar xzf freetype-${freetype_ver}.tar.gz
pushd freetype-${freetype_ver} > /dev/null
./configure --prefix=${freetype_install_dir} --enable-freetype-config
make -j ${THREAD} && make install
ln -sf ${freetype_install_dir}/include/freetype2/* /usr/include/
[ -d /usr/lib/pkgconfig ] && /bin/cp ${freetype_install_dir}/lib/pkgconfig/freetype2.pc /usr/lib/pkgconfig/
popd > /dev/null
rm -rf freetype-${freetype_ver}
fi
if [ ! -e "/usr/local/bin/libmcrypt-config" -a ! -e "/usr/bin/libmcrypt-config" ]; then
tar xzf libmcrypt-${libmcrypt_ver}.tar.gz
pushd libmcrypt-${libmcrypt_ver} > /dev/null
./configure
make -j ${THREAD} && make install
ldconfig
pushd libltdl > /dev/null
./configure --enable-ltdl-install
make -j ${THREAD} && make install
popd > /dev/null
popd > /dev/null
rm -rf libmcrypt-${libmcrypt_ver}
fi
if [ ! -e "/usr/local/include/mhash.h" -a ! -e "/usr/include/mhash.h" ]; then
tar xzf mhash-${mhash_ver}.tar.gz
pushd mhash-${mhash_ver} > /dev/null
./configure
make -j ${THREAD} && make install
popd > /dev/null
rm -rf mhash-${mhash_ver}
fi
[ -z "`grep /usr/local/lib /etc/ld.so.conf.d/*.conf`" ] && echo '/usr/local/lib' > /etc/ld.so.conf.d/local.conf
ldconfig
if [ "${PM}" == 'yum' ]; then
[ ! -e "/usr/bin/libmcrypt-config" ] && ln -s /usr/local/bin/libmcrypt-config /usr/bin/libmcrypt-config
[ ! -e "/lib64/libpcre.so.1" ] && ln -s /lib64/libpcre.so.0.0.1 /lib64/libpcre.so.1
[ ! -e "/usr/lib/libc-client.so" ] && ln -s /usr/lib64/libc-client.so /usr/lib/libc-client.so
fi
if [ ! -e "/usr/local/bin/mcrypt" -a ! -e "/usr/bin/mcrypt" ]; then
tar xzf mcrypt-${mcrypt_ver}.tar.gz
pushd mcrypt-${mcrypt_ver} > /dev/null
ldconfig
./configure
make -j ${THREAD} && make install
popd > /dev/null
rm -rf mcrypt-${mcrypt_ver}
fi
id -g ${run_group} >/dev/null 2>&1
[ $? -ne 0 ] && groupadd ${run_group}
id -u ${run_user} >/dev/null 2>&1
[ $? -ne 0 ] && useradd -g ${run_group} -M -s /sbin/nologin ${run_user}
tar xzf php-${php53_ver}.tar.gz
patch -d php-${php53_ver} -p0 < fpm-race-condition.patch
pushd php-${php53_ver} > /dev/null
patch -p1 < ../php5.3patch
patch -p1 < ../debian_patches_disable_SSLv2_for_openssl_1_0_0.patch
make clean
[ ! -d "${php_install_dir}" ] && mkdir -p ${php_install_dir}
{ [ ${Debian_ver} -ge 10 >/dev/null 2>&1 ] || [ ${Ubuntu_ver} -ge 19 >/dev/null 2>&1 ]; } || intl_modules_options='--enable-intl'
if [ "${Apache_main_ver}" == '22' ] || [ "${apache_mode_option}" == '2' ]; then
./configure --prefix=${php_install_dir} --with-config-file-path=${php_install_dir}/etc \
--with-config-file-scan-dir=${php_install_dir}/etc/php.d \
--with-apxs2=${apache_install_dir}/bin/apxs --disable-fileinfo \
--with-mysql=mysqlnd --with-mysqli=mysqlnd --with-pdo-mysql=mysqlnd \
--with-iconv-dir=/usr/local --with-freetype-dir=${freetype_install_dir} --with-jpeg-dir --with-png-dir --with-zlib \
--with-libxml-dir=/usr --enable-xml --disable-rpath --enable-bcmath --enable-shmop --enable-exif \
--enable-sysvsem --enable-inline-optimization --with-curl=${curl_install_dir} --enable-mbregex \
--enable-mbstring --with-mcrypt --with-gd --enable-gd-native-ttf --with-openssl=${openssl_install_dir} \
--with-mhash --enable-pcntl --enable-sockets --with-xmlrpc --enable-ftp --with-xsl ${intl_modules_options} \
--with-gettext --enable-zip --enable-soap --disable-debug ${php_modules_options}
else
./configure --prefix=${php_install_dir} --with-config-file-path=${php_install_dir}/etc \
--with-config-file-scan-dir=${php_install_dir}/etc/php.d \
--with-fpm-user=${run_user} --with-fpm-group=${run_group} --enable-fpm --disable-fileinfo \
--with-mysql=mysqlnd --with-mysqli=mysqlnd --with-pdo-mysql=mysqlnd \
--with-iconv-dir=/usr/local --with-freetype-dir=${freetype_install_dir} --with-jpeg-dir --with-png-dir --with-zlib \
--with-libxml-dir=/usr --enable-xml --disable-rpath --enable-bcmath --enable-shmop --enable-exif \
--enable-sysvsem --enable-inline-optimization --with-curl=${curl_install_dir} --enable-mbregex \
--enable-mbstring --with-mcrypt --with-gd --enable-gd-native-ttf --with-openssl=${openssl_install_dir} \
--with-mhash --enable-pcntl --enable-sockets --with-xmlrpc --enable-ftp --with-xsl ${intl_modules_options} \
--with-gettext --enable-zip --enable-soap --disable-debug ${php_modules_options}
fi
{ [ ${Debian_ver} -ge 10 >/dev/null 2>&1 ] || [ ${Ubuntu_ver} -ge 19 >/dev/null 2>&1 ]; } || sed -i '/^BUILD_/ s/\$(CC)/\$(CXX)/g' Makefile
make ZEND_EXTRA_LIBS='-liconv' -j ${THREAD}
make install
if [ -e "${php_install_dir}/bin/phpize" ]; then
[ ! -e "${php_install_dir}/etc/php.d" ] && mkdir -p ${php_install_dir}/etc/php.d
echo "${CSUCCESS}PHP installed successfully! ${CEND}"
else
rm -rf ${php_install_dir}
echo "${CFAILURE}PHP install failed, Please Contact the author! ${CEND}"
kill -9 $$; exit 1;
fi
[ -z "`grep ^'export PATH=' /etc/profile`" ] && echo "export PATH=${php_install_dir}/bin:\$PATH" >> /etc/profile
[ -n "`grep ^'export PATH=' /etc/profile`" -a -z "`grep ${php_install_dir} /etc/profile`" ] && sed -i "s@^export PATH=\(.*\)@export PATH=${php_install_dir}/bin:\1@" /etc/profile
. /etc/profile
# wget -c http://pear.php.net/go-pear.phar
# ${php_install_dir}/bin/php go-pear.phar
/bin/cp php.ini-production ${php_install_dir}/etc/php.ini
sed -i "s@^memory_limit.*@memory_limit = ${Memory_limit}M@" ${php_install_dir}/etc/php.ini
sed -i 's@^output_buffering =@output_buffering = On\noutput_buffering =@' ${php_install_dir}/etc/php.ini
#sed -i 's@^;cgi.fix_pathinfo.*@cgi.fix_pathinfo=0@' ${php_install_dir}/etc/php.ini
sed -i 's@^short_open_tag = Off@short_open_tag = On@' ${php_install_dir}/etc/php.ini
sed -i 's@^expose_php = On@expose_php = Off@' ${php_install_dir}/etc/php.ini
sed -i 's@^request_order.*@request_order = "CGP"@' ${php_install_dir}/etc/php.ini
sed -i "s@^;date.timezone.*@date.timezone = ${timezone}@" ${php_install_dir}/etc/php.ini
sed -i 's@^post_max_size.*@post_max_size = 100M@' ${php_install_dir}/etc/php.ini
sed -i 's@^upload_max_filesize.*@upload_max_filesize = 50M@' ${php_install_dir}/etc/php.ini
sed -i 's@^max_execution_time.*@max_execution_time = 5@' ${php_install_dir}/etc/php.ini
sed -i 's@^disable_functions.*@disable_functions = passthru,exec,system,chroot,chgrp,chown,shell_exec,proc_open,proc_get_status,ini_alter,ini_restore,dl,readlink,symlink,popepassthru,stream_socket_server,fsocket,popen@' ${php_install_dir}/etc/php.ini
[ -e /usr/sbin/sendmail ] && sed -i 's@^;sendmail_path.*@sendmail_path = /usr/sbin/sendmail -t -i@' ${php_install_dir}/etc/php.ini
if [ ! -e "${apache_install_dir}/bin/apxs" -o "${Apache_main_ver}" == '24' ] && [ "${apache_mode_option}" != '2' ]; then
# php-fpm Init Script
if [ -e /bin/systemctl ]; then
/bin/cp ${oneinstack_dir}/init.d/php-fpm.service /lib/systemd/system/
sed -i "s@/usr/local/php@${php_install_dir}@g" /lib/systemd/system/php-fpm.service
systemctl enable php-fpm
else
/bin/cp sapi/fpm/init.d.php-fpm /etc/init.d/php-fpm
chmod +x /etc/init.d/php-fpm
[ "${PM}" == 'yum' ] && { chkconfig --add php-fpm; chkconfig php-fpm on; }
[ "${PM}" == 'apt-get' ] && update-rc.d php-fpm defaults
fi
cat > ${php_install_dir}/etc/php-fpm.conf <<EOF
;;;;;;;;;;;;;;;;;;;;;
; FPM Configuration ;
;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;
; Global Options ;
;;;;;;;;;;;;;;;;;;
[global]
pid = run/php-fpm.pid
error_log = log/php-fpm.log
log_level = warning
emergency_restart_threshold = 30
emergency_restart_interval = 60s
process_control_timeout = 5s
daemonize = yes
;;;;;;;;;;;;;;;;;;;;
; Pool Definitions ;
;;;;;;;;;;;;;;;;;;;;
[${run_user}]
listen = /dev/shm/php-cgi.sock
listen.backlog = -1
listen.allowed_clients = 127.0.0.1
listen.owner = ${run_user}
listen.group = ${run_group}
listen.mode = 0666
user = ${run_user}
group = ${run_group}
pm = dynamic
pm.max_children = 12
pm.start_servers = 8
pm.min_spare_servers = 6
pm.max_spare_servers = 12
pm.max_requests = 2048
pm.process_idle_timeout = 10s
request_terminate_timeout = 120
request_slowlog_timeout = 0
pm.status_path = /php-fpm_status
slowlog = var/log/slow.log
rlimit_files = 51200
rlimit_core = 0
catch_workers_output = yes
;env[HOSTNAME] = $HOSTNAME
env[PATH] = /usr/local/bin:/usr/bin:/bin
env[TMP] = /tmp
env[TMPDIR] = /tmp
env[TEMP] = /tmp
EOF
if [ $Mem -le 3000 ]; then
sed -i "s@^pm.max_children.*@pm.max_children = $(($Mem/3/20))@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.start_servers.*@pm.start_servers = $(($Mem/3/30))@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.min_spare_servers.*@pm.min_spare_servers = $(($Mem/3/40))@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.max_spare_servers.*@pm.max_spare_servers = $(($Mem/3/20))@" ${php_install_dir}/etc/php-fpm.conf
elif [ $Mem -gt 3000 -a $Mem -le 4500 ]; then
sed -i "s@^pm.max_children.*@pm.max_children = 50@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.start_servers.*@pm.start_servers = 30@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.min_spare_servers.*@pm.min_spare_servers = 20@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.max_spare_servers.*@pm.max_spare_servers = 50@" ${php_install_dir}/etc/php-fpm.conf
elif [ $Mem -gt 4500 -a $Mem -le 6500 ]; then
sed -i "s@^pm.max_children.*@pm.max_children = 60@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.start_servers.*@pm.start_servers = 40@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.min_spare_servers.*@pm.min_spare_servers = 30@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.max_spare_servers.*@pm.max_spare_servers = 60@" ${php_install_dir}/etc/php-fpm.conf
elif [ $Mem -gt 6500 -a $Mem -le 8500 ]; then
sed -i "s@^pm.max_children.*@pm.max_children = 70@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.start_servers.*@pm.start_servers = 50@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.min_spare_servers.*@pm.min_spare_servers = 40@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.max_spare_servers.*@pm.max_spare_servers = 70@" ${php_install_dir}/etc/php-fpm.conf
elif [ $Mem -gt 8500 ]; then
sed -i "s@^pm.max_children.*@pm.max_children = 80@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.start_servers.*@pm.start_servers = 60@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.min_spare_servers.*@pm.min_spare_servers = 50@" ${php_install_dir}/etc/php-fpm.conf
sed -i "s@^pm.max_spare_servers.*@pm.max_spare_servers = 80@" ${php_install_dir}/etc/php-fpm.conf
fi
service php-fpm start
elif [ "${Apache_main_ver}" == '22' ] || [ "${apache_mode_option}" == '2' ]; then
service httpd restart
fi
popd > /dev/null
[ -e "${php_install_dir}/bin/phpize" ] && rm -rf php-${php53_ver}
popd > /dev/null
}
|
#!/bin/bash
PGPASSWORD=postgres psql -h 127.0.0.1 -p 5000 -d postgres -U postgres \
-f example.sql
|
#!/usr/bin/env bash
set -ex
run_step() {
if [ -z "$1" ]; then
echo run_step called with no parameter
exit 1
fi
echo "--spdk=$ROCKSDB_CONF" >> "$1"_flags.txt
echo "--spdk_bdev=Nvme0n1" >> "$1"_flags.txt
echo "--spdk_cache_size=$CACHE_SIZE" >> "$1"_flags.txt
echo -n Start $1 test phase...
/usr/bin/time taskset 0xFF $DB_BENCH --flagfile="$1"_flags.txt &> "$1"_db_bench.txt
echo done.
}
run_bsdump() {
$rootdir/examples/blob/cli/blobcli -c $ROCKSDB_CONF -b Nvme0n1 -D &> bsdump.txt
}
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../../..)
source $rootdir/test/common/autotest_common.sh
DB_BENCH_DIR=/usr/src/rocksdb
DB_BENCH=$DB_BENCH_DIR/db_bench
ROCKSDB_CONF=$testdir/rocksdb.conf
if [ ! -e $DB_BENCH_DIR ]; then
echo $DB_BENCH_DIR does not exist, skipping rocksdb tests
exit 0
fi
timing_enter rocksdb
timing_enter db_bench_build
pushd $DB_BENCH_DIR
git clean -x -f -d
$MAKE db_bench $MAKEFLAGS $MAKECONFIG DEBUG_LEVEL=0 SPDK_DIR=$rootdir
popd
timing_exit db_bench_build
$rootdir/scripts/gen_nvme.sh > $ROCKSDB_CONF
trap 'run_bsdump; rm -f $ROCKSDB_CONF; exit 1' SIGINT SIGTERM EXIT
timing_enter mkfs
$rootdir/test/blobfs/mkfs/mkfs $ROCKSDB_CONF Nvme0n1
timing_exit mkfs
mkdir $output_dir/rocksdb
RESULTS_DIR=$output_dir/rocksdb
CACHE_SIZE=4096
if [ $RUN_NIGHTLY_FAILING -eq 1 ]; then
DURATION=60
NUM_KEYS=100000000
else
DURATION=20
NUM_KEYS=20000000
fi
cd $RESULTS_DIR
cp $testdir/common_flags.txt insert_flags.txt
echo "--benchmarks=fillseq" >> insert_flags.txt
echo "--threads=1" >> insert_flags.txt
echo "--disable_wal=1" >> insert_flags.txt
echo "--use_existing_db=0" >> insert_flags.txt
echo "--num=$NUM_KEYS" >> insert_flags.txt
cp $testdir/common_flags.txt randread_flags.txt
echo "--benchmarks=readrandom" >> randread_flags.txt
echo "--threads=16" >> randread_flags.txt
echo "--duration=$DURATION" >> randread_flags.txt
echo "--disable_wal=1" >> randread_flags.txt
echo "--use_existing_db=1" >> randread_flags.txt
echo "--num=$NUM_KEYS" >> randread_flags.txt
cp $testdir/common_flags.txt overwrite_flags.txt
echo "--benchmarks=overwrite" >> overwrite_flags.txt
echo "--threads=1" >> overwrite_flags.txt
echo "--duration=$DURATION" >> overwrite_flags.txt
echo "--disable_wal=1" >> overwrite_flags.txt
echo "--use_existing_db=1" >> overwrite_flags.txt
echo "--num=$NUM_KEYS" >> overwrite_flags.txt
cp $testdir/common_flags.txt readwrite_flags.txt
echo "--benchmarks=readwhilewriting" >> readwrite_flags.txt
echo "--threads=4" >> readwrite_flags.txt
echo "--duration=$DURATION" >> readwrite_flags.txt
echo "--disable_wal=1" >> readwrite_flags.txt
echo "--use_existing_db=1" >> readwrite_flags.txt
echo "--num=$NUM_KEYS" >> readwrite_flags.txt
cp $testdir/common_flags.txt writesync_flags.txt
echo "--benchmarks=overwrite" >> writesync_flags.txt
echo "--threads=1" >> writesync_flags.txt
echo "--duration=$DURATION" >> writesync_flags.txt
echo "--disable_wal=0" >> writesync_flags.txt
echo "--use_existing_db=1" >> writesync_flags.txt
echo "--sync=1" >> writesync_flags.txt
echo "--num=$NUM_KEYS" >> writesync_flags.txt
timing_enter rocksdb_insert
run_step insert
timing_exit rocksdb_insert
timing_enter rocksdb_overwrite
run_step overwrite
timing_exit rocksdb_overwrite
timing_enter rocksdb_readwrite
run_step readwrite
timing_exit rocksdb_readwrite
timing_enter rocksdb_writesync
run_step writesync
timing_exit rocksdb_writesync
timing_enter rocksdb_randread
run_step randread
timing_exit rocksdb_randread
trap - SIGINT SIGTERM EXIT
run_bsdump
rm -f $ROCKSDB_CONF
report_test_completion "blobfs"
timing_exit rocksdb
|
<reponame>arifBurakDemiray/image-processing
//<NAME>
#include "image_pyr.hpp"
#include <cstdlib>
#include <iostream>
#include <cmath>
using std::cerr;
using std::endl;
using std::clog;
using std::sqrt;
using ceng391::Image;
using std::string;
using std::to_string;
namespace ceng391{
ImagePyr::ImagePyr(Image *base_img, int n_levels, float sigma)
: m_n_levels(n_levels+1),m_base_img(base_img) ,m_sigma(sigma)
{ //why +1 because I think about the level 0
if(base_img->n_ch()!=1){//we can only process grayscale images
base_img->to_grayscale();//converting it to grayscale
}
const uchar *data = base_img->data(); //here creating the copy of the input image
m_base_img = Image::new_gray(base_img->w(),base_img->h()); //taking new space for it
uchar *cdata = m_base_img->data();
for(int i=0;i < m_base_img->h()*m_base_img->w();i++){
cdata[i] = data[i];
}//end of copy
if (m_n_levels < 1){//if user entered below 1 new levels are 1
m_n_levels = 1;}
m_levels = (Image*)malloc(sizeof(Image)*m_n_levels);//allocating memory for images
m_levels[0] = *m_base_img; //putting its pointer to list
}
ImagePyr::~ImagePyr()
{ //deallocates all memory taken by constructor
delete m_base_img; //deletes input image's copy
for(int i=1;i<m_n_levels;i++){ //deallocates all images one by one
m_levels[i].~Image();
}
free(m_levels);//frees the blocks of memory
}
static Image *copy(Image *src)
{ //copies an image and returns its copy
Image *dst = Image::new_gray(src->w(),src->h());//image is going to return
uchar *data_src = src->data(); //data of source
uchar *data_dst = dst->data(); //data of destination
for(int i=0;i < src->w()*src->h();i++)
data_dst[i] = data_src[i];
return dst;
}//end of image copy
void ImagePyr::downsample(int level){//takes desired level and creates it but there should be a image for new level
Image *before = this->level(level-1); //last level image
float sigma = this->sig(); //sigma
int b_width = before->w(); //before
int b_height = before->h();
if(b_width < 1 || b_height < 1){
cerr << "[ERROR][CENG391::ImagePyr] There is no image at the before level of this level." << endl;
return;
}
int width = b_width/2; //checking for we can downsample or not
int height = b_height/2;
if(height<1 || width<1){
cerr << "[ERROR][CENG391::ImagePyr] Program can not downsample anymore. The size of image is below 1x1." << endl;
clog << "New number of levels is " << level-1<< endl; //if not the last level is before level
m_n_levels = level; //Why new level is this level beacuse we have source image at 0 index so it should be mnlevels + 1
return;
}
// If we smooth before level directyl its datas change permanently
Image *temp_buffer = copy(before); //so, I am copy it to not change
temp_buffer->smooth(sigma); //smooth the copied image
Image *temp =new (this->level(level)) Image(width,height,1); //placement new for new level to desired address
uchar *data_temp = temp_buffer->data(); //copied data
uchar *data_b = temp->data(); //this level's data
for(int y=0;y < height ; y++){
int g_y_index = 2*y; //getting data from even rows and columns +1 not changes it
if(g_y_index < b_height){ // chechking we are above limits of source level's boundaries
for(int x=0; x < width; x++){
int g_x_index = 2*x;
if(g_x_index < b_width)
data_b[y*width + x] = data_temp[g_y_index*b_width + g_x_index];
} //at last, new level's datas are assigning from even rows and columns
}
}
clog <<"Level "<< level << " is created with the size of (w x h) "<<width<<"x"<<height<<endl;
delete temp_buffer; //deleting copied image for memory deallocation
}
//creates all levels with desired # of levels
void ImagePyr::create_pyramids(){
this->m_sigma *= sqrt(3); //multiplying source sigma0 with sqrt(3) to doube the sigma and prevent aliasing at the lower levels
for(int x=1;x<m_n_levels;x++){//why 1 because at 0 we have source image
downsample(x);
}
}
void ImagePyr::save_pyramids(){//saves pyramids to desired location
string filename = "/tmp/pyr_level_";
for(int x=0;x<m_n_levels;x++){
filename += to_string(x); //converts integer to string
filename += ".png";
Image *img = level(x); //takes that image
img->save_as_png(filename);//and saves it
clog << "Saved level "<<x<<" to "<<filename<<endl;
filename = "/tmp/pyr_level_";//makes filename default again
}
}
void ImagePyr::pyramid(){
Image *img = this->level(0);//source
Image *pyramid = Image::new_gray(img->w()*2,img->h());//for pyramid
clog << "Created a pyramid with the size of (w x h) "<<img->w()*2<<"x"<<img->h()<<endl;
pyramid->set(0);//for filling the image with zeros to not take error when saving it
int base_level = 0;//this is for saving them side by side
int alt_level = img->h(); //this is for saving them to the bottom
for(int i=1;i<m_n_levels+1;i++){ //why +1 because I am updating image at last also I need alt level information at first so for last image I should turn again
for(int y=0;y<img->h();y++){ //beacuse when i = N the image processing is (N - 1)th image
for(int x=0;x<img->w();x++){//from bottom and side by side
pyramid->data()[ (y + alt_level - img->h())*pyramid->w() + x + base_level] = img->data()[y*img->w() + x];
}
}
base_level += img->w();//side by side property
img = this->level(i); //updating to next level
}
clog << "Saved pyramid to /tmp/pyramid.png"<<endl;//saving pyramid
pyramid->save_as_png("/tmp/pyramid.png");
delete pyramid;//and delete it
}
} |
<gh_stars>0
import React from 'react'
import styled from 'styled-components'
import { Layout } from '../layout'
import { media } from '../styles'
import { Header, About, Contact, Portfolio } from '../components'
const StyledStaticHeroBanner = styled.div`
width: 100%;
position: relative;
.static-button {
position: absolute;
left: 0px;
right: 0px;
margin: auto;
bottom: 40px;
font-size: 30px;
padding: 10px 45px;
background: #000000de;
color: white;
font-weight: bold;
border: none;
animation: example 700ms ease infinite;
@keyframes example {
0% {
transform: scale(0.99);
}
50% {
transform: scale(1.05);
}
100% {
transform: scale(0.99);
}
}
${media.phoneOrSmaller`
bottom: 18px;
font-size: 12px;
padding: 7px 45px;
`}
}
`
const StyledStaticBanner = styled.img`
width: 100%;
`
const StaticHeroBanner = props => {
return (
<StyledStaticHeroBanner>
<a href="/arts">
<button className="static-button">GO MY SKETCH</button>
</a>
<StyledStaticBanner src="//ntryogep.sirv.com/pencil-sketch/banner_01.jpg" />
</StyledStaticHeroBanner>
)
}
const HomePage = props => {
const { title = 'BK' } = props
return (
<Layout title={title}>
<Header />
<StaticHeroBanner />
<Portfolio />
<About />
<Contact />
</Layout>
)
}
export default HomePage
|
import re
from typing import Dict, List, Mapping
from gwv.dump import Dump
import gwv.filters as filters
from gwv.helper import GWGroupLazyLoader
from gwv.helper import load_package_data
from gwv.kagedata import KageData
from gwv.validators import Validator
from gwv.validators import ErrorCodes
error_codes = ErrorCodes(
NAMING_RULE_VIOLATION="0", # 命名規則違反
INVALID_IDS="1", # 不正なIDS
PROHIBITED_GLYPH_NAME="2", # 禁止されたグリフ名
ENCODED_CDP_IN_IDS="3", # UCSで符号化済みのCDP外字
DEPRECATED_NAMING_RULE="4", # 廃止予定の命名規則
)
class NamingRules:
def __init__(self, data: Mapping[str, List[str]]):
self.regex = [re.compile(regex) for regex in data.get("regex", [])]
self.string = set(data.get("string", []))
def match(self, name: str):
return name in self.string or any(
regex.search(name) for regex in self.regex)
def get_naming_rules():
naming_data: Dict[str, Dict[str, List[str]]] = \
load_package_data("data/naming.yaml")
return {
key: NamingRules(value) for key, value in naming_data.items()
}
def get_cdp_dict():
it = iter(GWGroupLazyLoader("UCSで符号化されたCDP外字", isset=False).get_data())
return dict(zip(it, it))
cdp_dict = get_cdp_dict()
rules = get_naming_rules()
_re_var = re.compile(r"-(var|itaiji)-\d{3}$")
_re_henka = re.compile(r"-\d{2}$")
_re_gl_glyph = re.compile(
r"^(j78|j83|j90|jsp|jx1-200[04]|jx2|k0|g0|c[0-9a-f])-([\da-f]{4})$")
_re_valid_gl = re.compile(r"(2[1-9a-f]|[3-6][\da-f]|7[\da-e]){2}")
_re_cdp = re.compile(r"(?:^|-)(cdp([on]?)-([\da-f]{4}))(?=-|$)")
_re_valid_cdp = re.compile(
r"(8[1-9a-f]|9[\da-f]|a0|c[67])(a[1-9a-f]|[4-6b-e][\da-f]|[7f][\da-e])")
_re_ids_head = re.compile(r"u2ff[\dab]-")
_re_idc_2 = re.compile(r"(^|-)u2ff[014-9ab](?=-|$)")
_re_idc_3 = re.compile(r"(^|-)u2ff[23](?=-|$)")
_re_kanji = re.compile(
r"""-(?:
u[23]?[\da-f]{4}(?:-u(?:e01[\da-f]{2}|fe0[\da-f]))?|
cdp[on]?-[\da-f]{4}
)(?=-|$)""",
re.X)
_re_ids_kanji = re.compile(r"2-漢-漢|3-漢-漢-漢")
_re_ucs = re.compile(r"(^|-)(u[23]?[\da-f]{4})(?=-|$)")
class NamingValidator(Validator):
name = "naming"
@filters.check_only(-filters.is_of_category({"user-owned"}))
def is_invalid(self, name: str, related: str, kage: KageData, gdata: str,
dump: Dump):
isHenka = False
isVar = False
if _re_var.search(name):
name = name.rsplit("-", 2)[0]
isVar = True
if _re_henka.search(name):
name = name[:-3]
isHenka = True
if rules["dont-create"].match(name):
return [error_codes.PROHIBITED_GLYPH_NAME] # 禁止されたグリフ名
if _re_gl_glyph.match(name):
if not _re_valid_gl.match(name[-4:]):
# 禁止されたグリフ名(不正なGL領域の番号)
return [error_codes.PROHIBITED_GLYPH_NAME]
else:
for m in _re_cdp.finditer(name):
if not _re_valid_cdp.match(m.group(3)):
# 禁止されたグリフ名(不正なCDP番号)
return [error_codes.PROHIBITED_GLYPH_NAME]
if _re_ids_head.match(name):
idsReplacedName = name
idsReplacedName = _re_idc_2.sub(r"\12", idsReplacedName)
idsReplacedName = _re_idc_3.sub(r"\13", idsReplacedName)
idsReplacedName = _re_kanji.sub("-漢", idsReplacedName)
while _re_ids_kanji.search(idsReplacedName):
idsReplacedName = _re_ids_kanji.sub("漢", idsReplacedName)
if idsReplacedName != "漢":
return [error_codes.INVALID_IDS, idsReplacedName] # 不正なIDS
for m in _re_cdp.finditer(name):
cdp = m.group(1)
cdpv = m.group(2)
if cdpv and cdp not in cdp_dict:
cdp = "cdp-" + cdp[-4:]
if cdp in cdp_dict:
# UCSで符号化済みのCDP外字
return [error_codes.ENCODED_CDP_IN_IDS, cdp, cdp_dict[cdp]]
for m in _re_ucs.finditer(name):
ucs = m.group(2)
if ucs == "u3013":
return [error_codes.INVALID_IDS, idsReplacedName] # 〓
if "ue000" <= ucs <= "uf8ff":
return [error_codes.INVALID_IDS, idsReplacedName] # 私用領域
return False
if rules["rule"].match(name):
return False
if not isVar and rules["rule-novar"].match(name):
return False
if not isHenka and rules["rule-nohenka"].match(name):
return False
if not isVar and not isHenka and \
rules["rule-novar-nohenka"].match(name):
return False
if rules["deprecated-rule"].match(name):
return [error_codes.DEPRECATED_NAMING_RULE] # 廃止予定の命名規則
return [error_codes.NAMING_RULE_VIOLATION] # 命名規則違反
|
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Usage:
#
# ./prereqs-ubuntu.sh
#
# User must then logout and login upon completion of script
#
# Exit on any failure
set -e
# Array of supported versions
declare -a versions=('trusty' 'xenial' 'yakkety');
# check the version and extract codename of ubuntu if release codename not provided by user
if [ -z "$1" ]; then
source /etc/lsb-release || \
(echo "Error: Release information not found, run script passing Ubuntu version codename as a parameter"; exit 1)
CODENAME=${DISTRIB_CODENAME}
else
CODENAME=${1}
fi
# check version is supported
if echo ${versions[@]} | grep -q -w ${CODENAME}; then
echo "Installing Hyperledger Composer prereqs for Ubuntu ${CODENAME}"
else
echo "Error: Ubuntu ${CODENAME} is not supported"
exit 1
fi
# Update package lists
echo "# Updating package lists"
sudo apt-add-repository -y ppa:git-core/ppa
sudo apt-get update
# Install Git
echo "# Installing Git"
sudo apt-get install -y git
# Install nvm dependencies
echo "# Installing nvm dependencies"
sudo apt-get -y install build-essential libssl-dev
# Execute nvm installation script
echo "# Executing nvm installation script"
curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.2/install.sh | bash
# Set up nvm environment without restarting the shell
export NVM_DIR="${HOME}/.nvm"
[ -s "${NVM_DIR}/nvm.sh" ] && . "${NVM_DIR}/nvm.sh"
[ -s "${NVM_DIR}/bash_completion" ] && . "${NVM_DIR}/bash_completion"
# Install node
echo "# Installing nodeJS"
nvm install --lts
# Configure nvm to use version 6.9.5
nvm use --lts
nvm alias default 'lts/*'
# Install the latest version of npm
echo "# Installing npm"
npm install npm@latest -g
# Ensure that CA certificates are installed
sudo apt-get -y install apt-transport-https ca-certificates
# Add Docker repository key to APT keychain
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
# Update where APT will search for Docker Packages
echo "deb [arch=amd64] https://download.docker.com/linux/ubuntu ${CODENAME} stable" | \
sudo tee /etc/apt/sources.list.d/docker.list
# Update package lists
sudo apt-get update
# Verifies APT is pulling from the correct Repository
sudo apt-cache policy docker-ce
# Install kernel packages which allows us to use aufs storage driver if V14 (trusty/utopic)
if [ "${CODENAME}" == "trusty" ]; then
echo "# Installing required kernel packages"
sudo apt-get -y install linux-image-extra-$(uname -r) linux-image-extra-virtual
fi
# Install Docker
echo "# Installing Docker"
sudo apt-get -y install docker-ce
# Add user account to the docker group
sudo usermod -aG docker $(whoami)
# Install docker compose
echo "# Installing Kubectl"
sudo curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl" \
-o /usr/local/bin/kubectl
sudo chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
# Install python v2 if required
set +e
COUNT="$(python -V 2>&1 | grep -c 2.)"
if [ ${COUNT} -ne 1 ]
then
sudo apt-get install -y python-minimal
fi
# Install unzip, required to install hyperledger fabric.
sudo apt-get -y install unzip
# Print installation details for user
echo ''
echo 'Installation completed, versions installed are:'
echo ''
echo -n 'Node: '
node --version
echo -n 'npm: '
npm --version
echo -n 'Docker: '
docker --version
echo -n 'Kubectl: '
kubectl version
echo -n 'Python: '
python -V
# Print reminder of need to logout in order for these changes to take effect!
echo ''
echo "Please logout then login before continuing."
|
package org.ednovo.gooru.core.api.model;
import java.io.Serializable;
import java.util.Date;
public class SearchResultActivity implements IndexableEntry,Serializable {
/**
*
*/
private static final long serialVersionUID = -8205404722642577622L;
/**
*
*/
private long id;
private String resultUId;
private String userAction;
private Date userActionTime;
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public String getResultUId() {
return resultUId;
}
public void setResultUId(String resultUId) {
this.resultUId = resultUId;
}
public String getUserAction() {
return userAction;
}
public void setUserAction(String userAction) {
this.userAction = userAction;
}
public Date getUserActionTime() {
return userActionTime;
}
public void setUserActionTime(Date userActionTime) {
this.userActionTime = userActionTime;
}
/* (non-Javadoc)
* @see org.ednovo.gooru.domain.model.IndexableEntry#getEntryId()
*/
@Override
public String getEntryId() {
// TODO Auto-generated method stub
return null;
}
}
|
import { fromFederatedSDLToValidSDL } from "../src/federation.js";
import { buildSchema, graphql } from "graphql";
import { print } from "../src/errors.js";
import { makeFieldResolver } from "../src/execute.js";
import { loadString } from "../src/graphql.js";
import { findServices } from "../src/protos.js";
import { validate } from "../src/validate.js";
import { run } from "./__fixtures__/posts.js";
test("entities", async () => {
const schema = buildSchema(
fromFederatedSDLToValidSDL(
generateSdl(`#graphql
type Query {
posts: [Post] @grpc__fetch(service: POSTS, rpc: "ListPosts", dig: "posts")
}
type Post @key(fields: "id")
@grpc__fetch(
service: POSTS
rpc: "BatchGetPosts"
dig: "posts"
dataloader: {
key: "$args.id"
listArgument: "ids"
responseKey: "id"
}
) {
id: ID
title: String
}
type Author @key(fields: "id")
@grpc__fetch(
service: POSTS
rpc: "BatchGetAuthors"
dig: "authors"
dataloader: {
key: "$args.id"
listArgument: "ids"
responseKey: "id"
}
) {
id: ID
name: String
}`)
)
);
const services = findServices(schema, { cwd: process.cwd() });
const [requests, stopGrpc] = await run(50003);
const result = await graphql({
schema,
source: `{
_entities(representations: [
{ __typename: "Post" id: "2" },
{ __typename: "Author" id: "1" },
{ __typename: "Post" id: "1" },
]) {
... on Post {
id title
}
... on Author {
id name
}
}
}`,
fieldResolver: makeFieldResolver(services),
contextValue: {},
});
expect(result).toMatchInlineSnapshot(`
Object {
"data": Object {
"_entities": Array [
Object {
"id": "2",
"title": "Post 2",
},
Object {
"id": "1",
"name": "<NAME>",
},
Object {
"id": "1",
"title": "Post 1",
},
],
},
}
`);
expect(requests).toMatchInlineSnapshot(`
Array [
Array [
"BatchGetPosts",
Object {
"ids": Array [
"2",
"1",
],
},
],
Array [
"BatchGetAuthors",
Object {
"ids": Array [
"1",
],
},
],
]
`);
await stopGrpc();
});
describe("validation", () => {
test("missing field", async () => {
const sdl = generateSdl(`#graphql
type Post @key(fields: "id")
@grpc__fetch(
service: POSTS
rpc: "BatchGetPosts"
dig: "posts"
dataloader: {
key: "$args.id"
listArgument: "ids"
responseKey: "id"
}
) {
id: ID
name: String # incorrect name
title: Int # incorrect type
}
`);
expect(
validate(loadString(sdl, { federated: true, cwd: process.cwd() })).map(
print
)
).toMatchInlineSnapshot(`
Array [
"[ERROR] Post.name not found
Query._entities:[_Entity]! calls Posts/BatchGetPosts
⌙ Post.name -> Post
",
"[ERROR] Post.title returns a Int, but Post.title returns a TYPE_STRING
Query._entities:[_Entity]! calls Posts/BatchGetPosts
⌙ Post.title -> Post
",
]
`);
});
test("incorrect dig", async () => {
const sdl = generateSdl(`#graphql
type Post @key(fields: "id")
@grpc__fetch(
service: POSTS
rpc: "BatchGetPosts"
dig: "XXX"
dataloader: {
key: "$args.id"
listArgument: "ids"
responseKey: "id"
}
) {
id: ID
title: String
}
`);
expect(
validate(loadString(sdl, { federated: true, cwd: process.cwd() })).map(
print
)
).toMatchInlineSnapshot(`
Array [
"[ERROR] Query._entities cannot dig \`XXX\` from rpc BatchGetPosts return type BatchGetPostsResponse
Query._entities:[_Entity]! calls Posts/BatchGetPosts
",
"[ERROR] Response key id not found on message BatchGetPostsResponse
Query._entities:[_Entity]! calls Posts/BatchGetPosts
",
]
`);
});
test("incorrect dataloader params", async () => {
const sdl = generateSdl(`#graphql
type Post @key(fields: "id")
@grpc__fetch(
service: POSTS
rpc: "BatchGetPosts"
dig: "posts"
dataloader: {
key: "$args.uuid" # must match @key fields
listArgument: "uuids" # doesn't match request type
responseKey: "uuid" # doesn't match request type (after digging)
}
) {
id: ID
title: String
}
`);
expect(
validate(loadString(sdl, { federated: true, cwd: process.cwd() })).map(
print
)
).toMatchInlineSnapshot(`
Array [
"[ERROR] Dataloader cache key uuid does not match the @key directives (id)
Query._entities:[_Entity]! calls Posts/BatchGetPosts
",
"[ERROR] Field uuids not found on BatchGetPostsRequest for dataloader listArgument
Query._entities:[_Entity]! calls Posts/BatchGetPosts
",
"[ERROR] Response key uuid not found on message Post
Query._entities:[_Entity]! calls Posts/BatchGetPosts
",
]
`);
});
});
/**
* @param {string} additional
*/
function generateSdl(additional) {
return `#graphql
directive @grpc(
protoFile: String!
serviceName: String!
address: String!
metadata: [grpc__Metadata!]
) on ENUM_VALUE
input grpc__Metadata {
name: String!
value: String
valueFrom: String
}
directive @grpc__renamed(
from: String!
) on FIELD_DEFINITION | ARGUMENT_DEFINITION | ENUM_VALUE | INPUT_FIELD_DEFINITION
directive @grpc__wrap(
gql: String!
proto: String!
) repeatable on FIELD_DEFINITION
directive @grpc__fetch(
service: grpc__Service!
rpc: String!
dig: String
mapArguments: [grpc__InputMap!]
dataloader: grpc__Dataloader
) on FIELD_DEFINITION | OBJECT
input grpc__InputMap {
sourceField: String!
arg: String!
}
input grpc__Dataloader {
key: String!
listArgument: String!
responseKey: String
}
enum grpc__Service {
POSTS
@grpc(
protoFile: "test/__fixtures__/posts.proto"
serviceName: "Posts"
address: "localhost:50003"
)
}
${additional}
`;
}
|
#!/usr/bin/env sh
env $(cat .env) poetry run pytest -s -vv
|
import { html, PolymerElement } from '@polymer/polymer';
import './shared-styles';
class ContentLoader extends PolymerElement {
constructor() {
super(...arguments);
this.avatarSize = '0px';
this.avatarCircle = '0px';
this.itemsCount = 0;
}
static get template() {
return html `
<style include="shared-styles flex flex-alignment positioning">
:host {
--darkgrey: rgba(250, 250, 250, 0);
--darkgrey-lighter: rgba(250, 250, 250, 0.8);
--background-color: #fff;
--content-color: #e2e2e2;
--card-padding: 24px;
--card-height: 340px;
--card-width: auto;
--card-border-radius: 0;
--card-margin: 0;
--card-box-shadow: none;
--card-skeleton: linear-gradient(
var(--background-color) var(--card-height),
transparent 0
);
--horizontal-position: var(--card-padding);
--avatar-size: 32px;
--avatar-circle: 16px;
--avatar-position: var(--horizontal-position) var(--card-padding);
--avatar-skeleton: radial-gradient(
circle var(--avatar-circle) at center,
var(--content-color) 99%,
transparent 0
);
--title-height: 32px;
--title-width: 200px;
--title-top-position: 180px;
--title-position: var(--horizontal-position) var(--title-top-position);
--title-skeleton: linear-gradient(
var(--content-color) var(--title-height),
transparent 0
);
--desc-line-height: 16px;
--desc-line-skeleton: linear-gradient(
var(--content-color) var(--desc-line-height),
transparent 0
);
--desc-line-1-width: 230px;
--desc-line-1-position: var(--horizontal-position) calc(var(--title-top-position) + 62px);
--desc-line-2-width: 180px;
--desc-line-2-position: var(--horizontal-position) calc(var(--title-top-position) + 85px);
--footer-height: 0px;
--footer-position: 0 calc(var(--card-height) - var(--footer-height));
--footer-skeleton: linear-gradient(
var(--background-color) var(--footer-height),
transparent 0
);
--blur-width: 200px;
--blur-size: var(--blur-width) calc(var(--card-height) - var(--footer-height));
--load-from: -150%;
--load-to: 350%;
--animation-time: 1.5s;
}
.content {
width: var(--card-width);
height: var(--card-height);
margin: var(--card-margin);
}
.content::after {
content: '';
display: block;
width: 100%;
height: 100%;
box-shadow: var(--card-box-shadow);
border-radius: var(--card-border-radius);
background-image: linear-gradient(
90deg,
var(--darkgrey) 0,
var(--darkgrey-lighter) 50%,
var(--darkgrey) 100%
),
var(--title-skeleton), var(--desc-line-skeleton), var(--desc-line-skeleton),
var(--avatar-skeleton), var(--footer-skeleton), var(--card-skeleton);
background-size: var(--blur-size), var(--title-width) var(--title-height),
var(--desc-line-1-width) var(--desc-line-height),
var(--desc-line-2-width) var(--desc-line-height), var(--avatar-size) var(--avatar-size),
100% var(--footer-height), 100% 100%;
background-position: var(--load-from) 0, var(--title-position),
var(--desc-line-1-position), var(--desc-line-2-position), var(--avatar-position),
var(--footer-position), 0 0;
background-repeat: no-repeat;
animation: loading var(--animation-time) infinite;
}
@keyframes loading {
to {
background-position: var(--load-to) 0, var(--title-position),
var(--desc-line-1-position), var(--desc-line-2-position), var(--avatar-position),
var(--footer-position), 0 0;
}
}
</style>
<template is="dom-repeat" items="[[_getArray()]]">
<div class="content"></div>
</template>
`;
}
static get is() {
return 'content-loader';
}
static get properties() {
return {
cardPadding: String,
cardMargin: String,
cardHeight: String,
cardWidth: String,
borderRadius: String,
horizontalPosition: String,
avatarSize: {
type: String,
value: '0px',
},
avatarCircle: {
type: String,
value: '0px',
},
titleTopPosition: String,
titleHeight: String,
titleWidth: String,
animationTime: String,
boxShadow: String,
blurWidth: String,
loadFrom: String,
loadTo: String,
itemsCount: {
type: Number,
value: 0,
},
};
}
connectedCallback() {
super.connectedCallback();
this.updateStyles({
'--card-padding': this.cardPadding || '',
'--card-margin': this.cardMargin || '',
'--card-height': this.cardHeight || '',
'--card-width': this.cardWidth || '',
'--card-border-radius': this.borderRadius || '',
'--horizontal-position': this.horizontalPosition || '',
'--avatar-size': this.avatarSize || '',
'--avatar-circle': this.avatarCircle || '',
'--title-top-position': this.titleTopPosition || '',
'--title-height': this.titleHeight || '',
'--title-width': this.titleWidth || '',
'--animation-time': this.animationTime || '',
'--card-box-shadow': this.boxShadow || '',
'--blur-width': this.blurWidth || '',
'--load-from': this.loadFrom || '',
'--load-to': this.loadTo || '',
});
}
_getArray() {
return new Array(Number(this.itemsCount));
}
}
window.customElements.define(ContentLoader.is, ContentLoader);
//# sourceMappingURL=content-loader.js.map |
#!/usr/bin/env bash
# ———————————————————————————————————————————————————————
# BashMatic Utilities Library
# ———————————————————————————————————————————————————————
# © 2016-2020 Konstantin Gredeskoul, All rights reserved. MIT License.
#
# Distributed under the MIT LICENSE.
#
# Sources: https://github.com/kigster/bashmatic
# ———————————————————————————————————————————————————————
#
# This part of the library deals with Shell's set properties.
# As you may know you can call set -e/+e or set -x/+x and so on
# to control various behaviors of the shell.
#
# Sometimes when we run a subroutine, we want to not fail on
# error, and set +e is the solution. However, what if before we
# entered the subroutine, that option was set to -e instead?
# In this case the right thing to do is to restore it to whatever
# it was prior to changing it.
#
# This is what this library is about. It maintains a stack
# of set values for each call to shell-set.push-stack "character"
# and subsequently restore with shell-set.pop-stack "character".
# It works with both +{value} and -{value} — whatever the state is
# it is pushed on top of the stack.
#
# Example:
#
# #!/usr/bin/env bash
# source ${BASHMATIC_HOME}/lib/shell-set.sh
# # we set this because we want to fail on errors
# set -e
#
# # this function has a body that might trigger a failure
# # but we'd rather handle it in the function than abort.
# function myfunc() {
# shell-set.push-stack e # save the state of set -e/+e
#
# set +e
# # perform operation that may fail
#
# shell-set.pop-stack e # now set -e/+e is restored.
# }
#
#
shell-set.is-set() {
local v="$1"
local is_set=${-//[^${v}]/}
if [[ -n ${is_set} ]]; then
return 0
else
return 1
fi
}
shell-set.show-stack() {
info "Current Shell Set Stack: ${bldylw}[${SetOptsStack[*]}]"
}
shell-set.init-stack() {
unset SetOptsStack
declare -a SetOptsStack=()
export SetOptsStack
}
shell-set.push-stack() {
local value="$1"
local is_set=${-//[^${value}]/}
shell-set.is-set ${value} && export SetOptsStack=(${SetOptsStack[@]} "-${value}")
shell-set.is-set ${value} || export SetOptsStack=(${SetOptsStack[@]} "+${value}")
[[ -n ${DEBUG} ]] && shell-set-show
}
shell-set.pop-stack() {
local value="$1"
local len=${#SetOptsStack[@]}
local last_index=$((len - 1))
local last=${SetOptsStack[${last_index}]}
if [[ ${last} != "-${value}" && ${last} != "+${value}" ]]; then
error "Can not restore ${value}, not the last element in ${SetOptsStack[*]} stack."
return 1
fi
local pop=(${last})
export SetOptsStack=("${SetOptsStack[@]/$pop/}")
[[ -n ${DEBUG} ]] && shell-set-show
eval "set ${last}"
}
# Deprecated
save-set-x() { shell-set.push-stack x; }
save-restore-x() { shell-set.pop-stack x; }
[[ -z "${SetOptsStack[*]}" ]] && shell-set.init-stack
|
#!/usr/bin/env bash
##########################################################################
# script name: mutex
# script date: 28 Juli 2016
##########################################################################
# a simple mutex interface
#
# use and manage a simple mutex implementation
# via a local file system
# - lock 'mutex name' [returns 1(succeeded) or 0(failed)]
# - free 'mutex name'
##########################################################################
# --- include guard -------------------------------------------------
[ -n "${MUTEX_SH+x}" ] && return || readonly MUTEX_SH=1
# --- global parameters ---------------------------------------------
#set -e # kill script if a command fails
#set -o nounset # unset values give error
#set -o pipefail # prevents errors in a pipeline from being masked
# --- include files -------------------------------------------------
SCRIPT_PATH="$(dirname $( realpath ${BASH_SOURCE[0]} ) )"
LIBS_PATH="${SCRIPT_PATH}"
source "${LIBS_PATH}/b-log/b-log.sh" # logging
# -------------------------------------------------------------------
function mutex_lock() {
# @description locks a mutex
# if no mutex exists, one will be made
# @param $1 the mutex name
# @return returns status: true/1(succeeded) or false/0(failed)
local mutex_name=${1:-}
local readonly mutex_name_prefix="mutex_"
local readonly LOCK_FD=200
if [ -z "$mutex_name" ]; then
return 0 # missing mutex name
fi
#local prefix=`basename $0`
#prefix+="_$mutex_name"
local fd=${2:-$LOCK_FD}
local mutex_file="${LOCKFILE_DIR}/${mutex_name_prefix}${mutex_name}"
# create lock file
mkdir -p "$(dirname "${mutex_file}")" || return 0
touch "${mutex_file}"
eval "exec $fd>$mutex_file"
# acquier the lock
flock -n $fd \
&& return 0 \
|| return 1
}
function mutex_free() {
# @description frees a mutex
# use this when you have the mutex
# of the to be freed mutex
# @param $1 the mutex name
local mutex_name=${1:-}
local readonly mutex_name_prefix="mutex_"
if [ -z "$mutex_name" ]; then
return 0 # missing mutex name
fi
local mutex_file="${LOCKFILE_DIR}/${mutex_name_prefix}${mutex_name}"
if [ -e "$mutex_file" ]; then
rm $mutex_file
fi
}
|
import py42.sdk
import py42.settings
import py42.settings.debug as debug
import requests
from click import prompt
from click import secho
from py42.exceptions import Py42UnauthorizedError
from requests.exceptions import ConnectionError
from requests.exceptions import SSLError
from code42cli.click_ext.types import TOTP
from code42cli.errors import Code42CLIError
from code42cli.errors import LoggedCLIError
from code42cli.logger import get_main_cli_logger
py42.settings.items_per_page = 500
logger = get_main_cli_logger()
def create_sdk(profile, is_debug_mode, password=<PASSWORD>, totp=None):
if is_debug_mode:
py42.settings.debug.level = debug.DEBUG
if profile.ignore_ssl_errors == "True":
secho(
f"Warning: Profile '{profile.name}' has SSL verification disabled. "
"Adding certificate verification is strongly advised.",
fg="red",
err=True,
)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning
)
py42.settings.verify_ssl_certs = False
password = password or <PASSWORD>()
return _validate_connection(profile.authority_url, profile.username, password, totp)
def _validate_connection(authority_url, username, password, totp=None):
try:
return py42.sdk.from_local_account(authority_url, username, password, totp=totp)
except SSLError as err:
logger.log_error(err)
raise LoggedCLIError(
f"Problem connecting to {authority_url}, SSL certificate verification failed.\nUpdate profile with --disable-ssl-errors to bypass certificate checks (not recommended!)."
)
except ConnectionError as err:
logger.log_error(err)
raise LoggedCLIError(f"Problem connecting to {authority_url}.")
except Py42UnauthorizedError as err:
logger.log_error(err)
if "LoginConfig: LOCAL_2FA" in str(err):
if totp is None:
totp = prompt(
"Multi-factor authentication required. Enter TOTP", type=TOTP()
)
return _validate_connection(authority_url, username, password, totp)
else:
raise Code42CLIError(
f"Invalid credentials or TOTP token for user {username}."
)
else:
raise Code42CLIError(f"Invalid credentials for user {username}.")
except Exception as err:
logger.log_error(err)
raise LoggedCLIError("Unknown problem validating connection.")
|
<reponame>jlenoble/promise-plumber
import { ResolutionState, ResolvableState } from "./resolution-state";
export interface RunState<T> extends ResolutionState<T> {
started: boolean;
running: boolean;
start: () => void;
pause: () => void;
resume: () => void;
}
export class RunningState<T> extends ResolvableState<T> implements RunState<T> {
public started: boolean = false;
public running: boolean = false;
public start(): void {
if (!this.started) {
this.started = true;
this.running = true;
}
}
public pause(): void {
this.running = false;
}
public resume(): void {
if (this.started && !this.done) {
this.running = true;
}
}
public resolve(value?: T | PromiseLike<T>): void {
if (this.running) {
super.resolve(value);
this.running = false;
}
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
public reject(reason?: any): void {
if (this.running) {
super.reject(reason);
this.running = false;
}
}
}
|
// Package core provides functions and data structures for basic DXF operations.
package core
import "strconv"
// DataType is an interface for a DXF type.
type DataType interface {
DxfElement
// ToString returns a string representation of the value
ToString() string
// Value returns the encapsulated value as an interface{} the caller should cast it appropriately
// or use the 'AsString', 'AsInt' and 'AsFloat' functions
Value() interface{}
}
// String DataType implementation
type String struct {
value string
}
// NewString creates a new String object with the provided value.
func NewString(value string) (DataType, error) {
returnValue := new(String)
returnValue.value = value
return returnValue, nil
}
// NewStringValue creates a new String object with provided string.
func NewStringValue(value string) *String {
returnValue := new(String)
returnValue.value = value
return returnValue
}
// ToString returns a string representation of the value
func (s String) ToString() string {
return s.value
}
// Value returns the encapsulated value
func (s String) Value() interface{} {
return s.value
}
// Equals Compares two Strings for equality.
// If other cannot be casted to a String, returns false.
func (s String) Equals(other DxfElement) bool {
if sStr, ok := other.(*String); ok {
return s.value == sStr.value
}
return false
}
// Integer DataType implementation
type Integer struct {
value int
}
// NewInteger creates a new Integer object with the provided value as a string
func NewInteger(value string) (DataType, error) {
returnValue := new(Integer)
v, err := strconv.Atoi(value)
returnValue.value = v
return returnValue, err
}
// NewIntegerValue creates a new Integer object with provided int.
func NewIntegerValue(value int) *Integer {
returnValue := new(Integer)
returnValue.value = value
return returnValue
}
// ToString returns a string representation of the value
func (i Integer) ToString() string {
return strconv.Itoa(i.value)
}
// Value returns the encapsulated value
func (i Integer) Value() interface{} {
return i.value
}
// Equals Compares two Integers for equality.
// If other cannot be casted to an Integer, returns false.
func (i Integer) Equals(other DxfElement) bool {
if iValue, ok := other.(*Integer); ok {
return i.value == iValue.value
}
return false
}
// Float DataType implementation
type Float struct {
value float64
}
// NewFloat creates a new Float object with the provided value as a string
func NewFloat(value string) (DataType, error) {
returnValue := new(Float)
v, err := strconv.ParseFloat(value, 64)
returnValue.value = v
return returnValue, err
}
// NewFloatValue creates a new Float object with provided float64.
func NewFloatValue(value float64) *Float {
returnValue := new(Float)
returnValue.value = value
return returnValue
}
// ToString returns a string representation of the value
func (f Float) ToString() string {
return strconv.FormatFloat(f.value, 'f', -1, 64)
}
// Value returns the encapsulated value
func (f Float) Value() interface{} {
return f.value
}
// Equals Compares two Floats for equality.
// If other cannot be casted to a Float, returns false.
func (f Float) Equals(other DxfElement) bool {
if fValue, ok := other.(*Float); ok {
return f.value == fValue.value
}
return false
}
// AsString is the acessor for a String DataType.
// If d is String, it will return the (value, true), otherwise ("", false)
func AsString(d DataType) (string, bool) {
value, ok := d.Value().(string)
return value, ok
}
// AsInt is the acessor for an Integer DataType.
// If d is Integer, it will return the (value, true), otherwise (0, false)
func AsInt(d DataType) (int, bool) {
value, ok := d.Value().(int)
return value, ok
}
// AsFloat is the acessor for a Float DataType.
// If d is Float, it will return the (value, true), otherwise (0.0, false)
func AsFloat(d DataType) (float64, bool) {
value, ok := d.Value().(float64)
return value, ok
}
|
class Mission {
constructor(public name: string, public destination: string, public launchDate: Date) {}
}
class MissionControl {
private missions: Mission[] = [];
constructor() {}
// Add a new mission to the list
addMission(name: string, destination: string, launchDate: Date): void {
this.missions.push(new Mission(name, destination, launchDate));
}
// Retrieve the total number of missions
getTotalMissions(): number {
return this.missions.length;
}
// Display the details of each mission
displayMissions(): void {
this.missions.forEach((mission, index) => {
console.log(`Mission ${index + 1}: ${mission.name} - Destination: ${mission.destination} - Launch Date: ${mission.launchDate.toDateString()}`);
});
}
}
// Example usage
const missionControl = new MissionControl();
missionControl.addMission("Apollo 11", "Moon", new Date("1969-07-20"));
missionControl.addMission("Mars Rover", "Mars", new Date("2020-07-30"));
missionControl.displayMissions();
console.log("Total missions: " + missionControl.getTotalMissions()); |
#!/bin/bash
LLVM_PROFILE_FILE=${1}.profraw ./${1} -nv |
#!/bin/bash
set -o pipefail
function finish {
sync_unlock.sh
}
if [ -z "$TRAP" ]
then
sync_lock.sh || exit -1
trap finish EXIT
export TRAP=1
fi
> errors.txt
> run.log
GHA2DB_PROJECT=oras PG_DB=oras GHA2DB_LOCAL=1 structure 2>>errors.txt | tee -a run.log || exit 1
./devel/db.sh psql oras -c "create extension if not exists pgcrypto" || exit 1
GHA2DB_PROJECT=oras PG_DB=oras GHA2DB_LOCAL=1 gha2db 2018-12-24 0 today now 'oras-project,deislabs/oras,shizhMSFT/oras' 2>>errors.txt | tee -a run.log || exit 2
GHA2DB_PROJECT=oras PG_DB=oras GHA2DB_LOCAL=1 GHA2DB_MGETC=y GHA2DB_SKIPTABLE=1 GHA2DB_INDEX=1 structure 2>>errors.txt | tee -a run.log || exit 3
GHA2DB_PROJECT=oras PG_DB=oras ./shared/setup_repo_groups.sh 2>>errors.txt | tee -a run.log || exit 4
GHA2DB_PROJECT=oras PG_DB=oras ./shared/import_affs.sh 2>>errors.txt | tee -a run.log || exit 5
GHA2DB_PROJECT=oras PG_DB=oras ./shared/setup_scripts.sh 2>>errors.txt | tee -a run.log || exit 6
GHA2DB_PROJECT=oras PG_DB=oras ./shared/get_repos.sh 2>>errors.txt | tee -a run.log || exit 7
GHA2DB_PROJECT=oras PG_DB=oras GHA2DB_LOCAL=1 vars || exit 8
./devel/ro_user_grants.sh oras || exit 9
./devel/psql_user_grants.sh devstats_team oras || exit 10
|
<reponame>cbedanne/TestAutomation
package io.hackages.learning;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.cucumber.datatable.DataTable;
import io.cucumber.java.en.Given;
import io.cucumber.java.en.Then;
import io.cucumber.java.en.When;
import io.hackages.learning.domain.model.Flight;
import io.hackages.learning.repository.model.AircraftEntity;
import org.hamcrest.core.StringContains;
import org.springframework.boot.web.client.RestTemplateBuilder;
import org.springframework.http.HttpStatus;
import java.util.ArrayList;
import java.util.List;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;
public class StepDefsIntegrationTest extends SpringIntegrationTest {
private static final String server = "http://localhost:5000/";
public StepDefsIntegrationTest(RestTemplateBuilder builder) {
super(builder);
}
@Given("We have the following aircrafts in the database")
public void setup_aircraft_table_in_the_database(DataTable table) throws Throwable {
List<AircraftEntity> aircraftEntities = new ArrayList<>();
table.cells().stream()
.map(fields -> new AircraftEntity(Long.parseLong(fields.get(0)), fields.get(1), fields.get(2)))
.forEach(aircraftEntities::add);
setupAircraftDatabase(aircraftEntities);
}
@When("the client calls /{word}")
public void the_client_issues_GET_request_root_path(String path) throws Throwable {
executeGet(server.concat(path));
}
@When("the client change the destination {word}")
public void the_client_change_the_destination(String destination) throws Throwable{
ObjectMapper objectMapper = new ObjectMapper();
final Flight flight = objectMapper.readValue(latestResponse.getBody(), new TypeReference<List<Flight>>(){}).get(0);
new Flight(
flight.getType(),
flight.getOrigin(),
destination,
flight.getDepartureDate(),
flight.getArrivalDate(),
flight.getAircraft()
);
}
@Then("the destination new destination is {word}")
public void the_destination_is_the_new_destination(String destination) throws Throwable {
ObjectMapper objectMapper = new ObjectMapper();
final List<Flight> flights = objectMapper.readValue(latestResponse.getBody(), new TypeReference<List<Flight>>(){});
final Flight flight = flights.get(0);
assertThat(flight.getDestination(), is(destination));
}
@Then("the client receives status code of {int}")
public void the_client_receives_status_code_of(int statusCode) throws Throwable {
final HttpStatus currentStatusCode = latestResponse.getTheResponse().getStatusCode();
assertThat("status code is incorrect : " + latestResponse.getBody(), currentStatusCode.value(), is(statusCode));
}
@Then("the client take the first flight and the destination is {word}")
public void the_client_take_the_first_flight_and_find_destination(String destination) throws Throwable{
ObjectMapper objectMapper = new ObjectMapper();
final List<Flight> flights = objectMapper.readValue(latestResponse.getBody(), new TypeReference<List<Flight>>(){});
Flight flight = flights.get(0);
assertThat(flight.getDestination(), StringContains.containsString(destination));
}
@Then("the client take the first flight and the origin is {word}")
public void the_client_take_the_first_flight_and_find_origin(String origin) throws Throwable{
ObjectMapper objectMapper = new ObjectMapper();
final List<Flight> flights = objectMapper.readValue(latestResponse.getBody(), new TypeReference<List<Flight>>(){});
Flight flight = flights.get(0);
assertThat(flight.getOrigin(), StringContains.containsString(origin));
}
@Then("the client take the first one")
public void the_client_take_the_first_one() throws Throwable{
System.out.println("We are here");
}
@Then("the description of {word} is modify")
public void the_description_is_modify(String code){
System.out.println("We are there");
}
@Then("all the flights origin is {word}")
public void verify_if_all_flights_origin_is_the_good_one(String origin) throws Throwable{
ObjectMapper objectMapper = new ObjectMapper();
final List<Flight> flights = objectMapper.readValue(latestResponse.getBody(), new TypeReference<List<Flight>>(){});
flights.stream().forEach(flight -> assertThat(flight.getOrigin(), StringContains.containsString(origin)));
}
@Then("all the flights destination is {word}")
public void verify_if_all_flights_destination_is_the_good_one(String destination) throws Throwable{
ObjectMapper objectMapper = new ObjectMapper();
final List<Flight> flights = objectMapper.readValue(latestResponse.getBody(), new TypeReference<List<Flight>>(){});
flights.stream().forEach(flight -> assertThat(flight.getDestination(), StringContains.containsString(destination)));
}
}
|
/**
* <a href="http://www.openolat.org">
* OpenOLAT - Online Learning and Training</a><br>
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); <br>
* you may not use this file except in compliance with the License.<br>
* You may obtain a copy of the License at the
* <a href="http://www.apache.org/licenses/LICENSE-2.0">Apache homepage</a>
* <p>
* Unless required by applicable law or agreed to in writing,<br>
* software distributed under the License is distributed on an "AS IS" BASIS, <br>
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <br>
* See the License for the specific language governing permissions and <br>
* limitations under the License.
* <p>
* Initial code contributed and copyrighted by<br>
* frentix GmbH, http://www.frentix.com
* <p>
*/
package org.olat.core.gui.control.generic.breadcrumb;
import java.util.ArrayList;
import java.util.List;
import org.olat.core.gui.UserRequest;
import org.olat.core.gui.components.Component;
import org.olat.core.gui.components.link.Link;
import org.olat.core.gui.components.link.LinkFactory;
import org.olat.core.gui.components.velocity.VelocityContainer;
import org.olat.core.gui.control.Event;
import org.olat.core.gui.control.WindowControl;
import org.olat.core.gui.control.controller.BasicController;
/**
* <h3>Description:</h3>
* The BreadCrumbController implements a simple bred crumb navigation. This type
* of navigation is useful when users can launch loosely coupled work-flows that
* don't span up in a new tab and that are not modal.
* <p>
* Don't use it to implement wizard like work-flows. Use the StepsController for
* this, the StepsController offers a modal wizard infrastructure.
* <p>
* When a controller in the bread crumb path is re-activated by the user, the
* child crumb controller of the activated crumb is disposed.
* <p>
* Initial Date: 08.09.2008 <br>
*
* @author <NAME>, frentix GmbH, http://www.frentix.com
*/
public class BreadCrumbController extends BasicController {
public static final Event CRUMB_VIEW_ACTIVATED = new Event("CRUMB_VIEW_ACTIVATED");
private VelocityContainer breadCrumbVC;
private List<Link> breadCrumbLinks;
/**
* Constructor to create a bread crumb navigation controller. Use the
* activateFirstCrumbController() method to add the first crumb controller to
* the crumb stack
*
* @param ureq
* @param control
*/
public BreadCrumbController(UserRequest ureq, WindowControl control) {
super(ureq, control);
breadCrumbVC = createVelocityContainer("breadCrumb");
breadCrumbLinks = new ArrayList<>();
breadCrumbVC.contextPut("breadCrumbs", breadCrumbLinks);
putInitialPanel(breadCrumbVC);
}
/**
* Add the first crumb controller to the crumb stack. To add followup crumbs
* to the stack you must use the
* crumbController.activateAndListenToChildCrumbController() method
*
* @param firstCrumbController The crumb controller that serves as the home
* crumb
*/
public void activateFirstCrumbController(CrumbController firstCrumbController) {
firstCrumbController.setBreadCrumbController(this);
putToBreadCrumbStack(firstCrumbController);
}
/**
* Put a crumb controller with it's view to the bread crumb stack. Use the
* crumbController.activateAndListenToChildCrumbController() to put new crumbs
* to the stack in your code
*
* @param crumbController
*/
void putToBreadCrumbStack(CrumbController crumbController) {
// re-enable last link
if (breadCrumbLinks.size() > 0) breadCrumbLinks.get(breadCrumbLinks.size() - 1).setEnabled(true);
// create new link for this crumb and add it to data model
String cmd = "crumb-" + breadCrumbLinks.size();
Link link = LinkFactory.createCustomLink(cmd, cmd, cmd, Link.NONTRANSLATED, breadCrumbVC, this);
link.setCustomDisplayText(crumbController.getCrumbLinkText());
link.setTitle(crumbController.getCrumbLinkHooverText());
link.setUserObject(crumbController);
link.setEnabled(false);
breadCrumbLinks.add(link);
breadCrumbVC.put("content", crumbController.getInitialComponent());
// set bread crumb navigation controller
crumbController.setBreadCrumbController(this);
}
/**
* Reset all texts on the crumb path
*/
public void resetCrumbTexts() {
for (Link link : breadCrumbLinks) {
CrumbController crumbController = (CrumbController) link.getUserObject();
link.setCustomDisplayText(crumbController.getCrumbLinkText());
link.setTitle(crumbController.getCrumbLinkHooverText());
}
}
/**
* Remove a crumb controller and all it's child controllers that are created
* by this controller or it's children from the bread crumb stack and calls
* dispose on the crumb controller. Use
* crumbController.removeFromBreadCrumbPathAndDispose() if you manually want
* to remove a crumb controller from the bread crumb
*
* @param crumbController
*/
void removeFromBreadCrumb(CrumbController crumbController) {
int activateLinkPos = 0;
for (Link link : breadCrumbLinks) {
CrumbController linkController = (CrumbController) link.getUserObject();
if (linkController.equals(crumbController)) {
linkController.deactivateAndDisposeChildCrumbController();
linkController.dispose();
break;
}
activateLinkPos++;
}
if (activateLinkPos > 0) {
// remove children elements from list and reput to hibernate
breadCrumbLinks = breadCrumbLinks.subList(0, activateLinkPos);
breadCrumbVC.contextPut("breadCrumbs", breadCrumbLinks);
// disable current link and update content view from current controller
Link parentLink = breadCrumbLinks.get(activateLinkPos - 1);
parentLink.setEnabled(false);
CrumbController parentController = (CrumbController) parentLink.getUserObject();
breadCrumbVC.put("content", parentController.getInitialComponent());
}
}
@Override
protected void doDispose() {
if (breadCrumbLinks.size() > 0) {
removeFromBreadCrumb((CrumbController) breadCrumbLinks.get(0).getUserObject());
}
super.doDispose();
}
@Override
protected void event(UserRequest ureq, Component source, Event event) {
for (Link link : breadCrumbLinks) {
if (source == link) {
// set content to new controller view and disable the corresponding link
link.setEnabled(false);
CrumbController crumbController = (CrumbController) link.getUserObject();
breadCrumbVC.put("content", crumbController.getInitialComponent());
// remove all children from this new controller
CrumbController childCrumb = crumbController.getChildCrumbController();
if (childCrumb != null) {
removeFromBreadCrumb(childCrumb);
}
// manually fire an event to the crumb controller
crumbController.dispatchEvent(ureq, this, CRUMB_VIEW_ACTIVATED);
break;
} else {
link.setEnabled(true);
}
}
}
}
|
<filename>node_modules/react-icons-kit/md/ic_sanitizer_outline.js
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.ic_sanitizer_outline = void 0;
var ic_sanitizer_outline = {
"viewBox": "0 0 24 24",
"children": [{
"name": "rect",
"attribs": {
"fill": "none",
"height": "24",
"width": "24"
},
"children": []
}, {
"name": "path",
"attribs": {
"d": "M15.5,6.5C15.5,5.66,17,4,17,4s1.5,1.66,1.5,2.5C18.5,7.33,17.83,8,17,8S15.5,7.33,15.5,6.5z M19.5,15 c1.38,0,2.5-1.12,2.5-2.5c0-1.67-2.5-4.5-2.5-4.5S17,10.83,17,12.5C17,13.88,18.12,15,19.5,15z M13,14h-2v-2H9v2H7v2h2v2h2v-2h2V14z M16,12v8c0,1.1-0.9,2-2,2H6c-1.1,0-2-0.9-2-2v-8c0-2.97,2.16-5.43,5-5.91V4H7V2h6c1.13,0,2.15,0.39,2.99,1.01l-1.43,1.43 C14.1,4.17,13.57,4,13,4h-2v2.09C13.84,6.57,16,9.03,16,12z M14,12c0-2.21-1.79-4-4-4s-4,1.79-4,4v8h8V12z"
},
"children": []
}]
};
exports.ic_sanitizer_outline = ic_sanitizer_outline; |
let facade = require('gamecloud')
/**
* 配置管理器
* Updated by liub on 2017-05-05.
*/
class task extends facade.Control
{
async list(user, objData){
return {
code: facade.const.ReturnCode.Success,
data: user.baseMgr.task.getList(objData.type, objData.status)
}
}
async getBonus(user, objData){
return {
code: facade.const.ReturnCode.Success,
data: user.baseMgr.task.getBonus(objData.id)
}
}
async getInfo(user,objData){
return {
code: facade.const.ReturnCode.Success,
data: user.baseMgr.task.getTaskObj(objData.id)
}
}
}
exports = module.exports = task;
|
import nltk
import numpy as np
import random
import string
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
bot = ChatBot('Customer Service Chatbot')
trainer = ListTrainer(bot)
# train the bot
conversation = [
"Hello",
"Hi there! How can I help you?",
"Can I book an appointment?",
"I'm sorry, but I don't offer appointment booking services. How else can I help you?"
]
trainer.train(conversation)
# response helper
def response(userInput):
botResponse = bot.get_response(userInput)
return botResponse
# chat
while True:
userInput = input("You: ")
if userInput.lower() == 'exit':
break
botResponse = response(userInput)
print("Chatbot:", botResponse) |
package config
var schemaDataV1 = `{{.schemaV1}}`
var servicesSchemaDataV2 = `{{.schemaV2}}`
|
#!/bin/sh
# This file is used to generate the annotation of package info that
# records the user, url, revision and timestamp.
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
unset LANG
unset LC_CTYPE
version=$1
outputDirectory=$2
user=`whoami`
date=`date`
cwd=`pwd`
if [ -d .svn ]; then
revision=`svn info | sed -n -e 's/Last Changed Rev: \(.*\)/\1/p'`
url=`svn info | sed -n -e 's/^URL: \(.*\)/\1/p'`
elif [ -d .git ]; then
revision=`git log -1 --pretty=format:"%H"`
hostname=`hostname`
url="git://${hostname}${cwd}"
else
revision="Unknown"
url="file://$cwd"
fi
mkdir -p "$outputDirectory/org/apache/hadoop/hbase"
cat >"$outputDirectory/org/apache/hadoop/hbase/package-info.java" <<EOF
/*
* Generated by src/saveVersion.sh
*/
@VersionAnnotation(version="$version", revision="$revision",
user="$user", date="$date", url="$url")
package org.apache.hadoop.hbase;
EOF
|
#include "iwkats_core.h"
IWKATS_NAMESPACE_BEGIN
char* GetErrorMsgByCode(int err_code) {
switch(err_code) {
case kLimitReqSeconds: return "每秒发送请求数量超限";
case kQueueLimit: return "请求队列超限";
case kNetwordError: return "网络原因ctp失败";
case kSuccess: return "Success";
case kUnpackMsgError: return "解包失败";
case kUnsupportMsg: return "不支持的请求";
case kNeedLoginFirst: return "必须先登录";
default: return "未知错误";
}
}
IWKATS_NAMESPACE_END
|
package utd.bigdata.twitter.mapreduce;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map.Entry;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class ReducerClass extends Reducer<IntWritable, Text, IntWritable, Text> {
public void reduce(IntWritable key, Iterable<Text> values,Context context ) throws IOException, InterruptedException {
HashMap<Integer,Integer> tweetcounter = new HashMap<Integer,Integer>();
for(Text t : values){
int hour = Integer.parseInt(t.toString().split(" ")[3].split(":")[0]);
int index = (hour % 4) + 1;
if(tweetcounter.containsKey(index))
tweetcounter.put(index, (tweetcounter.get(index)+1));
else
tweetcounter.put(new Integer(index), 1);
}
for (Entry<Integer,Integer> entry: tweetcounter.entrySet())
context.write(new IntWritable(key.get()),new Text(entry.getKey()+", "+ entry.getValue().toString()));
}
@Override
protected void cleanup(Context context) throws IOException, InterruptedException {
super.cleanup(context);
}
}
|
#!/usr/bin/env bash
set -e
[ -f './project.bash' ] && source './project.bash'
PROJECT_NAME=${PROJECT_NAME:-'project'}
JAVA_MINOR_VERSION=${JAVA_MINOR_VERSION:-8}
JAVA_VERSION=${JAVA_VERSION:-"1.${JAVA_MINOR_VERSION}"}
NODE_VERSION=${NODE_VERSION:-'6.2.0'}
WILDFLY_RELEASE='Final'
WILDFLY_VERSION='10.0.0'
WILDFLY_FULL_VERSION="${WILDFLY_VERSION}.${WILDFLY_RELEASE}"
DATABASE_USER=${DATABASE_USER:-'app'}
DATABASE_PASS=${DATABASE_PASS:-'password'}
docker_err() {
exit=$?
echo '/nStoping containers'
docker stop mysql-dbms java-dev node-assets glassfish-web
exit $exit;
}
trap docker_err ERR
docker run \
--detach=true \
--name='mysql-dbms' \
--env="DATABASE_USER=${DATABASE_USER}" \
--env="DATABASE_PASS=${DATABASE_PASS}" \
"${PROJECT_NAME}/mysql-dbms:latest"
docker run \
--detach=true \
--name='java-dev' \
--volume="$(dirname $(pwd))/src:/var/www/projects" \
--publish='7070:8080' \
"${PROJECT_NAME}/java-${JAVA_VERSION}:latest"
docker run \
--detach=true \
--name='node-assets' \
--volume="$(dirname $(pwd))/src:/var/www/projects" \
"${PROJECT_NAME}/node-${NODE_VERSION}:latest"
# 9990 (administration), 8080 (HTTP listener), 8181 (HTTPS listener), 9009 (JPDA debug port)
docker run \
--detach=true \
--name='wildfly-web' \
--publish='8080:8080' \
--publish='8443:8443' \
--publish='9009:9009' \
--publish='9990:9990' \
"${PROJECT_NAME}/wildfly-${JAVA_VERSION}-${WILDFLY_VERSION}:latest"
|
# This file is part of the TeTePy software
#
# Copyright (c) 2017, 2018, University of Southampton
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
if len( sys.argv ) != 2:
print "Provide path to submission log file as command line argument, please"
sys.exit(1)
logfile = sys.argv[1]
def scan_log_file(logfile):
"""Returns list of tuples with jobs that were injected to testing queue.
Each tuple contains (jobnumber, datetime, user, assignmentname)
"""
f = open(logfile)
submissions = []
found_successful_test = False
job = user = datetime = assignment = passed = failed = total = None
for line in f:
if found_successful_test:
if 'Sending' in line:
found_successful_test = False
user = line.split('Sending email to ')[1].split(',')[0]
assignment = line.split(' job')[0].split()[-1]
job = int(line.split('job ')[1].split()[0])
submissions.append( (job, datetime, user, assignment, passed, failed))
job = user = datetime = assignment = passed = failed = total = None
if 'Terminated well' in line:
found_successful_test = True
bits = line.strip().split("Terminated well (")[1].split(',')
passed, failed, total = map(int, bits[0:2]) + [int(bits[2][:-1])]
assert passed + failed == total, "Internal error, line=%s" % line
datetime = line.split(",")[0]
return submissions
def create_summary_ordered_by_assignments(s):
r = {}
for job, datetime, user, assignmentname, passed, failed in s:
sub_so_far = r.get(assignmentname,[])
try:
tmp = [ x[2] for x in sub_so_far ].index(user)
if assignmentname[0:3] == 'lab': #do not update
pass
#increase attempt counter but do not change the first set of data
sub_so_far[tmp][-1] += 1
else:
attempts = sub_so_far[tmp][-1] + 1
sub_so_far[tmp] = [job, datetime, user, passed, failed, attempts] #override older submission entry
except ValueError:
sub_so_far.append( [job, datetime, user, passed, failed, 1])
r[assignmentname] = sub_so_far
return r
def max_min_avg(l):
n = len(l)
return max(l),min(l),sum(l)/float(n)
def compute_statistics(subs):
"""Called with object r, like
In [15]: r['training3']
Out[15]:
[[20, '2011-10-04 11:37:32', '<EMAIL>', 3, 0, 2],
[34, '2011-10-05 10:33:32', '<EMAIL>', 3, 0, 1],
[43, '2011-10-05 18:43:32', '<EMAIL>', 3, 0, 3],
[52, '2011-10-05 21:26:32', '<EMAIL>', 3, 0, 2],
[67, '2011-10-06 11:34:32', '<EMAIL>', 3, 0, 1]]
"""
n = len(subs)
passed = [s[3] for s in subs]
total = [s[3]+s[4] for s in subs]
max_pass, min_pass, avg_pass = max_min_avg(passed)
avg_pass_pct = avg_pass / float(total[0])
attempts = [s[5] for s in subs]
max_attempt, min_attempt, avg_attempt = max_min_avg(attempts)
return max_pass, min_pass, avg_pass, avg_pass_pct, max_attempt, min_attempt, avg_attempt
s = scan_log_file(logfile)
r = create_summary_ordered_by_assignments(s)
import time
print("Last updated: %s" % time.asctime())
for key in sorted(r.keys()):
print "%15s : %3d unique submissions" %(key,len(r[key])),
retval = compute_statistics(r[key])
max_pass, min_pass, avg_pass, avg_pass_pct, max_attempt, min_attempt, avg_attempt=retval
print "pass : (max=%d, min=%d, avg=%4.2f=%4.1f%%)" % (max_pass, min_pass, avg_pass, avg_pass_pct*100),
print "#( %d, %d, %4.2f )" % (max_attempt, min_attempt, avg_attempt)
|
<reponame>bongani-m/element
const handlers = require('./handlers');
const reducer = async (state = {}, anchoredOperation) => {
try {
const { operation } = anchoredOperation.decodedOperation.header;
// eslint-disable-next-line
if (handlers[operation]) {
// eslint-disable-next-line
return await handlers[operation](state, anchoredOperation);
}
throw new Error('operation not supported');
} catch (e) {
console.warn('Operation rejected', e);
console.warn('Operation: ', anchoredOperation);
console.warn('State: ', state);
return state;
}
};
module.exports = reducer;
|
#!/bin/bash
# generate an output based on the script name
outfile=$(basename -s .sh $0)".out"
#echo $outfile
rm -f $outfile 2>&1
exec > >(tee -a $outfile) 2>&1
sqlplus -s sys/Oracle123@pdb1 as sysdba << EOF
set lines 110
set pages 9999
col audit_option format A20
col policy_name format A18
select POLICY_NAME, AUDIT_OPTION, CONDITION_EVAL_OPT from AUDIT_UNIFIED_POLICIES where POLICY_NAME in ('AUD_ROLE_POL','AUD_DBA_POL');
col policy_name format A38
col entity_name format A28
column entity_type format a14
select POLICY_NAME, ENABLED_OPTION, ENTITY_NAME, ENTITY_TYPE, SUCCESS, FAILURE from AUDIT_UNIFIED_ENABLED_POLICIES order by policy_name;
EOF
exit
|
#!/bin/bash
# Copyright 2016 OPNFV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
rm -f /tmp/notifications
while read x y
do
echo $x$y >> /tmp/notifications
done
|
#!/bin/bash
# ---------------------------------------
# STOP
# ---------------------------------------
# check if parameter passed to define script namespaces for cron, supervisord, etc
if [ -n "$1" ]; then
ns=$1
else
ns="ik-script"
fi
# underscore version
ns_u=${ns//-/_}
# stop supervisord
service supervisord stop
# stop activemq if running
/etc/init.d/activemq stop $ns
# ---------------------------------------
# RESTART
# ---------------------------------------
# restart cron
/etc/init.d/cron restart
# start activemq
/etc/init.d/activemq start $ns
# unlink supervisor if it is already running
unlink /tmp/supervisor.sock
# start/restart supervisor
service supervisord start |
def sum_2D_array(arr):
"""Return the sum of all the elements in a 2D array."""
sum_arr = 0
for row in arr:
for elem in row:
sum_arr += elem
return sum_arr |
import pandas as pd
# Load the data
products_data = pd.read_csv('products.csv')
# Create a dictionary of products and their features
products_dict = {}
for _, row in products_data.iterrows():
product = row['product_id']
feature = row['product_feature']
if product not in products_dict:
products_dict[product] = [feature]
else:
products_dict[product].append(feature)
# Make product recommendations based on customer interests
def make_recommendations(interests):
recs = []
for product, features in products_dict.items():
if any(interest in features for interest in interests):
recs.append(product)
return recs
# Example
recommendations = make_recommendations(['games', 'action'])
print(recommendations) |
/* InputUtils.cpp */
//----------------------------------------------------------------------------------------
//
// Project: CCore 3.50
//
// Tag: Applied
//
// License: Boost Software License - Version 1.0 - August 17th, 2003
//
// see http://www.boost.org/LICENSE_1_0.txt or the local copy
//
// Copyright (c) 2017 <NAME>. All rights reserved.
//
//----------------------------------------------------------------------------------------
#include <CCore/inc/InputUtils.h>
namespace CCore {
/* class ReadConBase */
#ifdef CCORE_UTF8
void ReadConBase::shift()
{
if( !off ) return;
for(ulen i=0; i<len ;i++) buf[i]=buf[i+off];
off=0;
}
Utf8Code ReadConBase::GetCode(char ch,const char *ptr,unsigned len)
{
switch( len )
{
default: [[fallthrough]];
case 1 : return Utf8Code(ch);
case 2 : return Utf8Code(ch,ptr[1]);
case 3 : return Utf8Code(ch,ptr[1],ptr[2]);
case 4 : return Utf8Code(ch,ptr[1],ptr[2],ptr[3]);
}
}
ReadConBase::ReadConBase()
: off(0),
len(0)
{
}
ReadConBase::~ReadConBase()
{
}
// get
bool ReadConBase::try_get(Utf8Code &ret)
{
char ch;
unsigned symlen;
start:
for(;;)
{
if( !len ) return false;
ch=buf[off];
symlen=Utf8Len(ch);
if( symlen!=0 ) break;
off++;
len--;
}
if( symlen>len ) return false;
for(unsigned i=1; i<symlen ;i++)
if( !Utf8Ext(buf[off+i]) )
{
off+=i;
len-=i;
goto start;
}
ret=GetCode(ch,buf+off,symlen);
off+=symlen;
len-=symlen;
return true;
}
Utf8Code ReadConBase::get()
{
Utf8Code ret;
while( !get(DefaultTimeout,ret) );
return ret;
}
bool ReadConBase::get(MSec timeout,Utf8Code &ret)
{
return get(TimeScope(timeout),ret);
}
bool ReadConBase::get(TimeScope time_scope,Utf8Code &ret)
{
if( try_get(ret) ) return true;
shift();
while( +time_scope.get() )
{
ulen delta=read(buf+len,Len-len,time_scope);
if( delta==0 ) continue;
len+=delta;
if( try_get(ret) ) return true;
shift();
}
return false;
}
#else
void ReadConBase::shift()
{
off=0;
}
ReadConBase::ReadConBase()
: off(0),
len(0)
{
}
ReadConBase::~ReadConBase()
{
}
// get
bool ReadConBase::try_get(char &ret)
{
if( !len ) return false;
ret=buf[off];
off++;
len--;
return true;
}
char ReadConBase::get()
{
char ret;
if( try_get(ret) ) return ret;
shift();
ulen delta=read(buf,Len);
if( delta==0 ) return 0;
len=delta;
try_get(ret);
return ret;
}
bool ReadConBase::get(MSec timeout,char &ret)
{
if( try_get(ret) ) return true;
shift();
ulen delta=read(buf,Len,timeout);
if( delta==0 ) return false;
len=delta;
return try_get(ret);
}
bool ReadConBase::get(TimeScope time_scope,char &ret)
{
if( try_get(ret) ) return true;
shift();
ulen delta=read(buf,Len,time_scope);
if( delta==0 ) return false;
len=delta;
return try_get(ret);
}
#endif
/* class SymbolParser */
#ifdef CCORE_UTF8
bool SymbolParser::feed(StrLen &text)
{
if( len==0 )
{
start:
char ch;
do
{
if( !text ) return false;
ch=*text;
++text;
symlen=Utf8Len(ch);
}
while( symlen==0 );
buf[len++]=ch;
}
while( len<symlen )
{
if( !text ) return false;
char ch=*text;
if( Utf8Ext(ch) )
{
buf[len++]=ch;
++text;
}
else
{
len=0;
goto start;
}
}
return true;
}
#else
bool SymbolParser::feed(StrLen &text)
{
if( len==0 )
{
if( !text ) return false;
buf=*text;
len++;
++text;
}
return true;
}
#endif
} // namespace CCore
|
<reponame>anticipasean/girakkafunc<filename>func-futurestream/src/main/java/cyclops/async/reactive/futurestream/pipeline/stream/InfiniteClosingSpliterator.java
package cyclops.async.reactive.futurestream.pipeline.stream;
import cyclops.async.queue.Queue;
import cyclops.async.exception.ClosedQueueException;
import cyclops.reactive.subscription.Continueable;
import java.util.Objects;
import java.util.Spliterator;
import java.util.function.Consumer;
import java.util.function.Supplier;
public class InfiniteClosingSpliterator<T> implements Spliterator<T> {
final Supplier<T> s;
private final Continueable subscription;
private final Queue queue;
private long estimate;
protected InfiniteClosingSpliterator(final long estimate,
final Supplier<T> s,
final Continueable subscription,
final Queue queue) {
this.estimate = estimate;
this.s = s;
this.subscription = subscription;
this.queue = queue;
this.subscription.addQueue(queue);
}
public InfiniteClosingSpliterator(final long estimate,
final Supplier<T> s,
final Continueable subscription) {
this.estimate = estimate;
this.s = s;
this.subscription = subscription;
this.queue = null;
}
@Override
public long estimateSize() {
return estimate;
}
@Override
public int characteristics() {
return IMMUTABLE;
}
@Override
public boolean tryAdvance(final Consumer<? super T> action) {
Objects.requireNonNull(action);
try {
action.accept(s.get());
if (subscription.closed()) {
return false;
}
return true;
} catch (final ClosedQueueException e) {
return false;
} catch (final Exception e) {
return false;
}
}
@Override
public Spliterator<T> trySplit() {
return new InfiniteClosingSpliterator(estimate >>>= 1,
s,
subscription,
queue);
}
}
|
package me.batizhao.uaa;
import me.batizhao.common.feign.annotation.EnablePecadoFeignClients;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.cloud.client.discovery.EnableDiscoveryClient;
/**
* @author batizhao
* @since 2016/9/28
*/
@EnableDiscoveryClient
@SpringBootApplication
@EnablePecadoFeignClients
public class PecadoUaaApplication {
public static void main(String[] args) {
SpringApplication.run(PecadoUaaApplication.class, args);
}
}
|
<reponame>yamanakahirofumi/RogueInJava<gh_stars>0
package org.hiro.things;
public enum WeaponEnum {
/*
* Weapon types
*/
MACE(0),
SWORD(1),
BOW(2),
ARROW(3),
DAGGER(4),
TWOSWORD(5),
DART(6),
SHIRAKEN(7),
SPEAR(8),
FLAME(9); /* fake entry for dragon breath (ick) */
// MAXWEAPONS(9), /* this should equal FLAME */
private int value;
WeaponEnum(int value){
this.value = value;
}
public int getValue() {
return value;
}
public static int getMaxValue(){
return WeaponEnum.values().length -1;
}
}
|
# platform = multi_platform_rhel
# The two fingerprints below are retrieved from https://access.redhat.com/security/team/key
readonly REDHAT_RELEASE_2_FINGERPRINT="567E347AD0044ADE55BA8A5F199E2F91FD431D51"
readonly REDHAT_AUXILIARY_FINGERPRINT="43A6E49C4A38F4BE9ABF2A5345689C882FA658E0"
# Location of the key we would like to import (once it's integrity verified)
readonly REDHAT_RELEASE_KEY="/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release"
RPM_GPG_DIR_PERMS=$(stat -c %a "$(dirname "$REDHAT_RELEASE_KEY")")
# Verify /etc/pki/rpm-gpg directory permissions are safe
if [ "${RPM_GPG_DIR_PERMS}" -le "755" ]
then
# If they are safe, try to obtain fingerprints from the key file
# (to ensure there won't be e.g. CRC error).
# Backup IFS value
IFS_BKP=$IFS
{{% if product == "rhel8" %}}
IFS=$'\n' GPG_OUT=($(gpg --show-key --with-colons "$REDHAT_RELEASE_KEY" | grep "^fpr" | cut -d ":" -f 10))
{{% else %}}
IFS=$'\n' GPG_OUT=($(gpg --with-fingerprint --with-colons "$REDHAT_RELEASE_KEY" | grep "^fpr" | cut -d ":" -f 10))
{{% endif %}}
GPG_RESULT=$?
# Reset IFS back to default
IFS=$IFS_BKP
# No CRC error, safe to proceed
if [ "${GPG_RESULT}" -eq "0" ]
then
echo "${GPG_OUT[*]}" | grep -vE "${REDHAT_RELEASE_2_FINGERPRINT}|${REDHAT_AUXILIARY_FINGERPRINT}" || {
# If $REDHAT_RELEASE_KEY file doesn't contain any keys with unknown fingerprint, import it
rpm --import "${REDHAT_RELEASE_KEY}"
}
fi
fi
|
def normalize(lst):
minVal = min(lst)
maxVal = max(lst)
result = []
for elem in lst:
result.append((elem - minVal) / (maxVal - minVal))
return result |
import React from 'react'
import PropTypes from 'prop-types'
import { useFormikContext, getIn } from 'formik'
import cx from 'classnames'
import WithLabel from '../WithLabel'
export const Radio = ({
disabled,
name,
options,
inline,
className,
style,
...rest
}) => {
const { values, handleChange, handleBlur } = useFormikContext()
return (
<div className={cx('radio-options', { inline })}>
{
options.map(option => (
<div key={option.label} className="radio-option">
<input
className={className}
style={style}
onChange={handleChange}
{...rest}
checked={getIn(values, name) === option.value}
id={`${name}-id-${option.value}`}
value={option.value}
onBlur={handleBlur}
name={name}
disabled={disabled}
type="radio"
/>
<label
htmlFor={`${name}-id-${option.value}`}
>
{option.label}
</label>
</div>
))
}
</div>
)
}
Radio.propTypes = {
/** Adds a custom class to each input element of the Radio component */
className: PropTypes.string,
/** Adds a custom inline styles to the Radio input element */
style: PropTypes.instanceOf(Object),
/** Disables the Radio Fields */
disabled: PropTypes.bool,
/** Sets the Name of the Radio Fields */
name: PropTypes.string.isRequired,
/** Sets the main Label for the Radio Fields */
label: PropTypes.string,
/** Array in the shape of [ { value: string or number, label: string } ] */
options: PropTypes.arrayOf(PropTypes.shape({
label: PropTypes.string.isRequired,
value: PropTypes.oneOfType([
PropTypes.string,
PropTypes.number,
]).isRequired,
})).isRequired,
/** Displays the radio option inline from left to right */
inline: PropTypes.bool,
/** Sets a hint text after/below the Radio component */
hint: PropTypes.string,
/** Sets the field as requierd, if label is passed, an * is added to the end of the main label. Validation will only work if you pass the required() method in the yup validation schema */
required: PropTypes.bool,
}
Radio.defaultProps = {
className: null,
style: null,
label: null,
disabled: false,
inline: false,
hint: null,
required: false,
}
export default WithLabel('radio')(Radio)
|
<reponame>CasbaL/naive-ui-admin
import { renderIcon } from '@/utils/index';
import { DashboardOutlined } from '@vicons/antd';
// import { RouterTransition } from '@/components/transition'
//前端路由映射表
export const constantRouterComponents = {
Layout: () => import('@/layout/index.vue'), //布局
DashboardConsole: () => import('@/views/dashboard/console/console.vue'), // 主控台
DashboardMonitor: () => import('@/views/dashboard/monitor/monitor.vue'), // 监控页
DashboardWorkplace: () => import('@/views/dashboard/workplace/workplace.vue'), // 工作台
};
//前端路由图标映射表
export const constantRouterIcon = {
DashboardOutlined: renderIcon(DashboardOutlined),
};
|
//////////-//////////-//////////-//////////-//////////
// Color-As //
// //
// Ascll Project - Color Library //
// //
// Made by Gaccho. //
//////////-//////////-//////////-//////////-//////////
#ifndef ASCLL_PROJECT_COLOR_LIBRARY_COLOR_AS
#define ASCLL_PROJECT_COLOR_LIBRARY_COLOR_AS
#include <cstdint>
#include <string>
#include <array>
#include "ColorSystem.hpp"
#include "ColorJIS.hpp"
#include "PCCS_RGB.hpp"
#endif |
package cn.zqgx.moniter.center.hj212.format.segment.core.deser;
import cn.zqgx.moniter.center.hj212.format.segment.core.SegmentParser;
import cn.zqgx.moniter.center.hj212.format.segment.exception.SegmentFormatException;
import java.io.IOException;
/**
* Created by xiaoyao9184 on 2018/1/4.
*/
public interface SegmentDeserializer<Target> {
Target deserialize(SegmentParser parser) throws IOException, SegmentFormatException;
}
|
protected override void UpdateValue(string value)
{
if (value.Length < 5)
{
_component.text = "Short Text";
}
else if (value.Length >= 5 && value.Length <= 10)
{
_component.text = "Medium Text";
}
else
{
_component.text = "Long Text";
}
} |
<reponame>smagill/opensphere-desktop
package io.opensphere.core.util.collections.petrifyable;
import java.util.Collection;
import java.util.Random;
import gnu.trove.TCollections;
import gnu.trove.TLongCollection;
import gnu.trove.function.TLongFunction;
import gnu.trove.iterator.TLongIterator;
import gnu.trove.list.TLongList;
import gnu.trove.list.array.TLongArrayList;
import gnu.trove.procedure.TLongProcedure;
import io.opensphere.core.util.Constants;
import io.opensphere.core.util.MathUtil;
/**
* A list of primitive longs backed by a {@link TLongArrayList} that is also
* {@link Petrifyable}.
*/
@SuppressWarnings("PMD.GodClass")
public class PetrifyableTLongArrayList extends AbstractPetrifyable implements PetrifyableTLongList
{
/** The wrapped list. */
private TLongList myList;
/**
* Create a new empty collection with the default capacity.
*/
public PetrifyableTLongArrayList()
{
myList = new TLongArrayList();
}
/**
* Create a new empty collection with a certain capacity.
*
* @param capacity The number of longs that can be held by the list.
*/
public PetrifyableTLongArrayList(int capacity)
{
myList = new TLongArrayList(capacity);
}
/**
* Create a new instance using an array of longs for the initial values.
* <p>
* NOTE: This constructor copies the given array.
*
* @param values The initial values for the list.
*/
public PetrifyableTLongArrayList(long[] values)
{
myList = new TLongArrayList(values);
}
/**
* Create a new instance from another collection.
* <p>
* NOTE: This constructor iterates over the contents of the given
* collection, so it may be slow for large arrays.
*
* @param values The initial values for the list.
*/
public PetrifyableTLongArrayList(TLongCollection values)
{
myList = new TLongArrayList(values);
}
@Override
public boolean add(long val)
{
return myList.add(val);
}
@Override
public void add(long[] vals)
{
myList.add(vals);
}
@Override
public void add(long[] vals, int offset, int length)
{
myList.add(vals, offset, length);
}
@Override
public boolean addAll(Collection<? extends Long> collection)
{
return myList.addAll(collection);
}
@Override
public boolean addAll(long[] array)
{
return myList.addAll(array);
}
@Override
public boolean addAll(TLongCollection collection)
{
return myList.addAll(collection);
}
@Override
public int binarySearch(long value)
{
return myList.binarySearch(value);
}
@Override
public int binarySearch(long value, int fromIndex, int toIndex)
{
return myList.binarySearch(value, fromIndex, toIndex);
}
@Override
public void clear()
{
myList.clear();
}
@Override
public boolean contains(long value)
{
return myList.contains(value);
}
@Override
public boolean containsAll(Collection<?> collection)
{
return myList.containsAll(collection);
}
@Override
public boolean containsAll(long[] array)
{
return myList.containsAll(array);
}
@Override
public boolean containsAll(TLongCollection collection)
{
return myList.containsAll(collection);
}
@Override
public void fill(int fromIndex, int toIndex, long val)
{
myList.fill(fromIndex, toIndex, val);
}
@Override
public void fill(long val)
{
myList.fill(val);
}
@Override
public boolean forEach(TLongProcedure procedure)
{
return myList.forEach(procedure);
}
@Override
public boolean forEachDescending(TLongProcedure procedure)
{
return myList.forEachDescending(procedure);
}
@Override
public long get(int offset)
{
return myList.get(offset);
}
@Override
public long getNoEntryValue()
{
return myList.getNoEntryValue();
}
@Override
public long getSizeBytes()
{
return MathUtil.roundUpTo(Constants.OBJECT_SIZE_BYTES + Constants.REFERENCE_SIZE_BYTES + Constants.BOOLEAN_SIZE_BYTES,
Constants.MEMORY_BLOCK_SIZE_BYTES)
+ MathUtil.roundUpTo(Constants.OBJECT_SIZE_BYTES + Constants.REFERENCE_SIZE_BYTES + Constants.INT_SIZE_BYTES
+ Constants.LONG_SIZE_BYTES, Constants.MEMORY_BLOCK_SIZE_BYTES)
+ MathUtil.roundUpTo(Constants.ARRAY_SIZE_BYTES + size() * Constants.LONG_SIZE_BYTES,
Constants.MEMORY_BLOCK_SIZE_BYTES);
}
@Override
public TLongList grep(TLongProcedure condition)
{
return myList.grep(condition);
}
@Override
public int indexOf(int offset, long value)
{
return myList.indexOf(offset, value);
}
@Override
public int indexOf(long value)
{
return myList.indexOf(value);
}
@Override
public void insert(int offset, long value)
{
myList.insert(offset, value);
}
@Override
public void insert(int offset, long[] values)
{
myList.insert(offset, values);
}
@Override
public void insert(int offset, long[] values, int valOffset, int len)
{
myList.insert(offset, values, valOffset, len);
}
@Override
public TLongList inverseGrep(TLongProcedure condition)
{
return myList.inverseGrep(condition);
}
@Override
public boolean isEmpty()
{
return myList.isEmpty();
}
@Override
public TLongIterator iterator()
{
return myList.iterator();
}
@Override
public int lastIndexOf(int offset, long value)
{
return myList.lastIndexOf(offset, value);
}
@Override
public int lastIndexOf(long value)
{
return myList.lastIndexOf(value);
}
@Override
public long max()
{
return myList.max();
}
@Override
public long min()
{
return myList.min();
}
@Override
public synchronized void petrify()
{
if (!isPetrified())
{
super.petrify();
((TLongArrayList)myList).trimToSize();
myList = TCollections.unmodifiableList(myList);
}
}
@Override
public void remove(int offset, int length)
{
myList.remove(offset, length);
}
@Override
public boolean remove(long value)
{
return myList.remove(value);
}
@Override
public boolean removeAll(Collection<?> collection)
{
return myList.removeAll(collection);
}
@Override
public boolean removeAll(long[] array)
{
return myList.removeAll(array);
}
@Override
public boolean removeAll(TLongCollection collection)
{
return myList.removeAll(collection);
}
@Override
public long removeAt(int offset)
{
return myList.removeAt(offset);
}
@Override
public long replace(int offset, long val)
{
return myList.replace(offset, val);
}
@Override
public boolean retainAll(Collection<?> collection)
{
return myList.retainAll(collection);
}
@Override
public boolean retainAll(long[] array)
{
return myList.retainAll(array);
}
@Override
public boolean retainAll(TLongCollection collection)
{
return myList.retainAll(collection);
}
@Override
public void reverse()
{
myList.reverse();
}
@Override
public void reverse(int from, int to)
{
myList.reverse(from, to);
}
@Override
public long set(int offset, long val)
{
return myList.set(offset, val);
}
@Override
public void set(int offset, long[] values)
{
myList.set(offset, values);
}
@Override
public void set(int offset, long[] values, int valOffset, int length)
{
myList.set(offset, values, valOffset, length);
}
@Override
public void shuffle(Random rand)
{
myList.shuffle(rand);
}
@Override
public int size()
{
return myList.size();
}
@Override
public void sort()
{
myList.sort();
}
@Override
public void sort(int fromIndex, int toIndex)
{
myList.sort(fromIndex, toIndex);
}
@Override
public PetrifyableTLongArrayList subList(int begin, int end)
{
return new PetrifyableTLongArrayList(myList.subList(begin, end));
}
@Override
public long sum()
{
return myList.sum();
}
@Override
public long[] toArray()
{
return myList.toArray();
}
@Override
public long[] toArray(int offset, int len)
{
return myList.toArray(offset, len);
}
@Override
public long[] toArray(long[] dest)
{
return myList.toArray(dest);
}
@Override
public long[] toArray(long[] dest, int offset, int len)
{
return myList.toArray(dest, offset, len);
}
@Override
public long[] toArray(long[] dest, int sourcePos, int destPos, int len)
{
return myList.toArray(dest, sourcePos, destPos, len);
}
@Override
public void transformValues(TLongFunction function)
{
myList.transformValues(function);
}
}
|
<filename>src/cpp-ethereum/ethash/include/ethash/hash_types.h
/* ethash: C/C++ implementation of Ethash, the Ethereum Proof of Work algorithm.
* Copyright 2018 <NAME>.
* Licensed under the Apache License, Version 2.0. See the LICENSE file.
*/
#pragma once
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
union ethash_hash256
{
uint64_t word64s[4];
uint32_t word32s[8];
uint8_t bytes[32];
};
union ethash_hash512
{
uint64_t word64s[8];
uint32_t word32s[16];
uint8_t bytes[64];
};
union ethash_hash1024
{
union ethash_hash512 hash512s[2];
uint64_t word64s[16];
uint32_t word32s[32];
uint8_t bytes[128];
};
union ethash_hash2048
{
union ethash_hash512 hash512s[4];
uint64_t word64s[32];
uint32_t word32s[64];
uint8_t bytes[256];
};
#ifdef __cplusplus
}
#endif
|
<gh_stars>1-10
""" Global city data. """
CITIES = {}
|
import functools
import logging
import threading
from node import constants
from sqlite3 import dbapi2
class Obdb(object):
"""
API for DB storage. Serves as segregation of the persistence
layer and the application logic.
"""
def __init__(self, db_path, disable_sqlite_crypt=False):
self.db_path = db_path
self.con = None
self.disable_sqlite_crypt = disable_sqlite_crypt
self._log = logging.getLogger('DB')
self._lock = threading.Lock()
dbapi2.register_adapter(bool, int)
dbapi2.register_converter("bool", lambda v: bool(int(v)))
def _login(self, passphrase=constants.DB_PASSPHRASE):
"""Enable access to an encrypted database."""
cursor = self.con.cursor()
cursor.execute("PRAGMA key = '%s';" % passphrase)
def _make_db_connection(self):
"""Create and return a DB connection."""
return dbapi2.connect(
self.db_path,
detect_types=dbapi2.PARSE_DECLTYPES,
timeout=10
)
# pylint: disable=no-self-argument
# pylint: disable=not-callable
def _managedmethod(func):
"""
Decorator for abstracting the setting up and tearing down of a
DB operation. It handles:
* Syncrhonizing multiple DB accesses.
* Opening and closing DB connections.
* Authenitcating the user if the database is encrypted.
A function wrapped by this decorator may use the database
connection (via self.con) in order to operate on the DB
but shouldn't close the connection or manage it in any other way.
"""
@functools.wraps(func)
def managed_func(self, *args, **kwargs):
with self._lock, self._make_db_connection() as self.con:
self.con.row_factory = self._dict_factory
if not self.disable_sqlite_crypt:
self._login()
ret_val = func(self, *args, **kwargs)
self.con.commit()
return ret_val
return managed_func
@staticmethod
def _dict_factory(cursor, row):
"""
A factory that allows sqlite to return a dictionary instead of a tuple.
"""
dictionary = {}
for idx, col in enumerate(cursor.description):
if row[idx] is None:
dictionary[col[0]] = ""
else:
dictionary[col[0]] = row[idx]
return dictionary
@staticmethod
def _before_storing(value):
"""Method called before executing SQL identifiers."""
return unicode(value)
def get_or_create(self, table, where_dict, data_dict=False):
"""
This method attempts to grab the record first. If it fails to
find it, it will create it.
@param table: The table to search to
@param where_dict: A dictionary with the WHERE/SET clauses
@param data_dict: A dictionary with the SET clauses
"""
if not data_dict:
data_dict = where_dict
entries = self.select_entries(table, where_dict)
if not entries:
self.insert_entry(table, data_dict)
return self.select_entries(table, where_dict)[0]
@_managedmethod
def update_entries(self, table, set_dict, where_dict=None, operator="AND"):
"""
A wrapper for the SQL UPDATE operation.
@param table: The table to search to
@param set_dict: A dictionary with the SET clauses
@param where_dict: A dictionary with the WHERE clauses
"""
if where_dict is None:
where_dict = {'"1"': '1'}
cur = self.con.cursor()
sets = []
wheres = []
where_part = []
set_part = []
for key, value in set_dict.iteritems():
if type(value) == bool:
value = bool(value)
key = self._before_storing(key)
value = self._before_storing(value)
sets.append(value)
set_part.append("%s = ?" % key)
set_part = ",".join(set_part)
for key, value in where_dict.iteritems():
sign = "="
if isinstance(value, dict):
sign = value["sign"]
value = value["value"]
key = self._before_storing(key)
value = self._before_storing(value)
wheres.append(value)
where_part.append("%s %s ?" % (key, sign))
operator = " " + operator + " "
where_part = operator.join(where_part)
query = "UPDATE %s SET %s WHERE %s" % (
table, set_part, where_part
)
self._log.debug('query: %s', query)
cur.execute(query, tuple(sets + wheres))
@_managedmethod
def insert_entry(self, table, update_dict):
"""
A wrapper for the SQL INSERT operation.
@param table: The table to search to
@param update_dict: A dictionary with the values to set
"""
cur = self.con.cursor()
sets = []
updatefield_part = []
setfield_part = []
for key, value in update_dict.iteritems():
if type(value) == bool:
value = bool(value)
key = self._before_storing(key)
value = self._before_storing(value)
sets.append(value)
updatefield_part.append(key)
setfield_part.append("?")
updatefield_part = ",".join(updatefield_part)
setfield_part = ",".join(setfield_part)
query = "INSERT INTO %s(%s) VALUES(%s)" % (
table, updatefield_part, setfield_part
)
self._log.debug("query: %s", query)
cur.execute(query, tuple(sets))
lastrowid = cur.lastrowid
if lastrowid:
return lastrowid
@_managedmethod
def select_entries(self, table, where_dict=None, operator="AND", order_field="id",
order="ASC", limit=None, limit_offset=None, select_fields="*"):
"""
A wrapper for the SQL SELECT operation. It will always return
all the attributes for the selected rows.
@param table: The table to search
@param where_dict: A dictionary with the WHERE clauses. If ommited,
it will return all the rows of the table.
"""
if where_dict is None:
where_dict = {'"1"': '1'}
cur = self.con.cursor()
wheres = []
where_part = []
for key, value in where_dict.iteritems():
sign = "="
if isinstance(value, dict):
sign = value["sign"]
value = value["value"]
key = self._before_storing(key)
value = self._before_storing(value)
wheres.append(value)
where_part.append("%s %s ?" % (key, sign))
if limit is not None and limit_offset is None:
limit_clause = "LIMIT %s" % limit
elif limit is not None and limit_offset is not None:
limit_clause = "LIMIT %s, %s" % (limit_offset, limit)
else:
limit_clause = ""
operator = " " + operator + " "
where_part = operator.join(where_part)
query = "SELECT * FROM %s WHERE %s ORDER BY %s %s %s" % (
table, where_part, order_field, order, limit_clause
)
self._log.debug("query: %s", query)
cur.execute(query, tuple(wheres))
rows = cur.fetchall()
return rows
@_managedmethod
def delete_entries(self, table, where_dict=None, operator="AND"):
"""
A wrapper for the SQL DELETE operation.
@param table: The table to search
@param where_dict: A dictionary with the WHERE clauses. If ommited,
it will delete all the rows of the table.
"""
if where_dict is None:
where_dict = {'"1"': '1'}
cur = self.con.cursor()
dels = []
where_part = []
for key, value in where_dict.iteritems():
sign = "="
if isinstance(value, dict):
sign = value["sign"]
value = value["value"]
key = self._before_storing(key)
value = self._before_storing(value)
dels.append(value)
where_part.append("%s %s ?" % (key, sign))
operator = " " + operator + " "
where_part = operator.join(where_part)
query = "DELETE FROM %s WHERE %s" % (
table, where_part
)
self._log.debug('Query: %s', query)
cur.execute(query, dels)
|
#!/bin/sh
os=$1
echo ''
if [ X"$os" = X"u" ]; then
echo "does not support ubuntu"
fi
if [ X"$os" = X"a" ]; then
echo 'Installing ssh-agent for Arch systems'
echo ''
mkdir -p $HOME/.config/systemd/user
cat ./ssh-agent/ssh-agent.service > $HOME/.config/systemd/user/ssh-agent.service
mkdir -p $HOME/.ssh
touch $HOME/.ssh/config
echo "" >> $HOME/.ssh/config
echo "AddKeysToAgent yes" >> $HOME/.ssh/config
systemctl --user enable ssh-agent
systemctl --user start ssh-agent
fi
echo ''
|
/* global Analytics */
exports.onRouteUpdate = function({ location }, options) {
console.log('location', location)
}
|
export NVM_DIR=$(realpath "$HOME/.nvm")
|
python cls_condwgan.py --manualSeed 3483 --cls_weight 0.1 --preprocessing --image_embedding "./SABR/features/SUN/features_resnet_ae_nodec" --class_embedding "./SABR/features/SUN/att" --netG_name MLP_G --netD_name MLP_CRITIC --nepoch 66 --ngh 2048 --ndh 4096 --lr 0.0002 --classifier_lr 0.001 --lambda1 10 --critic_iter 5 --dataset SUN --batch_size 128 --nz 102 --attSize 102 --resSize 1024 --syn_num 2400 --modeldir "./SABR/models/SUN/SABR-T/models_sun/" --model_path "./SABR/models/SUN/logs_classifier_now/models_134.ckpt" --nclass_all 717
#unseen class accuracy= 62.361111111111114
#unseen=49.3750, seen=32.0155, h=38.8440
python sep_clswgan.py --manualSeed 3483 --cls_weight 0.1 --preprocessing --image_embedding "./SABR/features/SUN/features_resnet_ae_nodec" --class_embedding "./SABR/features/SUN/att" --netG_name MLP_G --netD_name MLP_CRITIC --nepoch 110 --ngh 2048 --ndh 4096 --lr 0.0001 --classifier_lr 0.001 --lambda1 10 --critic_iter 5 --dataset SUN --batch_size 128 --nz 102 --attSize 102 --resSize 1024 --syn_num 300 --modeldir "./SABR/models/SUN/SABR-T/marg_sun/" --generator_checkpoint 65 --conditional_modeldir "./SABR/models/SUN/SABR-T/models_sun/" --model_path "./SABR/models/SUN/logs_classifier_now/models_134.ckpt" --regulariser 0.002 --nclass_all 717 --gzsl
#unseen=58.8194, seen=41.4729, h=48.6460
|
localisort -t ./layout/Library/Application\ Support/CCSupport/en.lproj/Localizable.strings -i ./layout/Library/Application\ Support/CCSupport/ar.lproj/Localizable.strings -r
localisort -t ./layout/Library/Application\ Support/CCSupport/en.lproj/Localizable.strings -i ./layout/Library/Application\ Support/CCSupport/de.lproj/Localizable.strings -r
localisort -t ./layout/Library/Application\ Support/CCSupport/en.lproj/Localizable.strings -i ./layout/Library/Application\ Support/CCSupport/es.lproj/Localizable.strings -r
localisort -t ./layout/Library/Application\ Support/CCSupport/en.lproj/Localizable.strings -i ./layout/Library/Application\ Support/CCSupport/fr.lproj/Localizable.strings -r
localisort -t ./layout/Library/Application\ Support/CCSupport/en.lproj/Localizable.strings -i ./layout/Library/Application\ Support/CCSupport/it.lproj/Localizable.strings -r
localisort -t ./layout/Library/Application\ Support/CCSupport/en.lproj/Localizable.strings -i ./layout/Library/Application\ Support/CCSupport/ja.lproj/Localizable.strings -r
localisort -t ./layout/Library/Application\ Support/CCSupport/en.lproj/Localizable.strings -i ./layout/Library/Application\ Support/CCSupport/ms.lproj/Localizable.strings -r
localisort -t ./layout/Library/Application\ Support/CCSupport/en.lproj/Localizable.strings -i ./layout/Library/Application\ Support/CCSupport/nl.lproj/Localizable.strings -r
localisort -t ./layout/Library/Application\ Support/CCSupport/en.lproj/Localizable.strings -i ./layout/Library/Application\ Support/CCSupport/zh-Hans.lproj/Localizable.strings -r
localisort -t ./layout/Library/Application\ Support/CCSupport/en.lproj/Localizable.strings -i ./layout/Library/Application\ Support/CCSupport/zh-Hant.lproj/Localizable.strings -r
|
def count_alphabets(string):
alphabets = {}
for char in string:
if char not in alphabets:
alphabets[char] = 1
else:
alphabets[char] += 1
return alphabets
# Outputs {'h': 1, 'e': 1, 'l': 3, 'o': 2, ' ': 1, 'w': 1, 'r': 1, 'd': 1} |
#!/bin/sh -u
# Architecture commands for GDB, the GNU debugger.
#
# Copyright (C) 1998-2019 Free Software Foundation, Inc.
#
# This file is part of GDB.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Make certain that the script is not running in an internationalized
# environment.
LANG=C ; export LANG
LC_ALL=C ; export LC_ALL
compare_new ()
{
file=$1
if test ! -r ${file}
then
echo "${file} missing? cp new-${file} ${file}" 1>&2
elif diff -u ${file} new-${file}
then
echo "${file} unchanged" 1>&2
else
echo "${file} has changed? cp new-${file} ${file}" 1>&2
fi
}
# Format of the input table
read="class returntype function formal actual staticdefault predefault postdefault invalid_p print garbage_at_eol"
do_read ()
{
comment=""
class=""
# On some SH's, 'read' trims leading and trailing whitespace by
# default (e.g., bash), while on others (e.g., dash), it doesn't.
# Set IFS to empty to disable the trimming everywhere.
while IFS='' read line
do
if test "${line}" = ""
then
continue
elif test "${line}" = "#" -a "${comment}" = ""
then
continue
elif expr "${line}" : "#" > /dev/null
then
comment="${comment}
${line}"
else
# The semantics of IFS varies between different SH's. Some
# treat ``;;' as three fields while some treat it as just two.
# Work around this by eliminating ``;;'' ....
line="`echo "${line}" | sed -e 's/;;/; ;/g' -e 's/;;/; ;/g'`"
OFS="${IFS}" ; IFS="[;]"
eval read ${read} <<EOF
${line}
EOF
IFS="${OFS}"
if test -n "${garbage_at_eol}"
then
echo "Garbage at end-of-line in ${line}" 1>&2
kill $$
exit 1
fi
# .... and then going back through each field and strip out those
# that ended up with just that space character.
for r in ${read}
do
if eval test \"\${${r}}\" = \"\ \"
then
eval ${r}=""
fi
done
case "${class}" in
m ) staticdefault="${predefault}" ;;
M ) staticdefault="0" ;;
* ) test "${staticdefault}" || staticdefault=0 ;;
esac
case "${class}" in
F | V | M )
case "${invalid_p}" in
"" )
if test -n "${predefault}"
then
#invalid_p="gdbarch->${function} == ${predefault}"
predicate="gdbarch->${function} != ${predefault}"
elif class_is_variable_p
then
predicate="gdbarch->${function} != 0"
elif class_is_function_p
then
predicate="gdbarch->${function} != NULL"
fi
;;
* )
echo "Predicate function ${function} with invalid_p." 1>&2
kill $$
exit 1
;;
esac
esac
# PREDEFAULT is a valid fallback definition of MEMBER when
# multi-arch is not enabled. This ensures that the
# default value, when multi-arch is the same as the
# default value when not multi-arch. POSTDEFAULT is
# always a valid definition of MEMBER as this again
# ensures consistency.
if [ -n "${postdefault}" ]
then
fallbackdefault="${postdefault}"
elif [ -n "${predefault}" ]
then
fallbackdefault="${predefault}"
else
fallbackdefault="0"
fi
#NOT YET: See gdbarch.log for basic verification of
# database
break
fi
done
if [ -n "${class}" ]
then
true
else
false
fi
}
fallback_default_p ()
{
[ -n "${postdefault}" -a "x${invalid_p}" != "x0" ] \
|| [ -n "${predefault}" -a "x${invalid_p}" = "x0" ]
}
class_is_variable_p ()
{
case "${class}" in
*v* | *V* ) true ;;
* ) false ;;
esac
}
class_is_function_p ()
{
case "${class}" in
*f* | *F* | *m* | *M* ) true ;;
* ) false ;;
esac
}
class_is_multiarch_p ()
{
case "${class}" in
*m* | *M* ) true ;;
* ) false ;;
esac
}
class_is_predicate_p ()
{
case "${class}" in
*F* | *V* | *M* ) true ;;
* ) false ;;
esac
}
class_is_info_p ()
{
case "${class}" in
*i* ) true ;;
* ) false ;;
esac
}
# dump out/verify the doco
for field in ${read}
do
case ${field} in
class ) : ;;
# # -> line disable
# f -> function
# hiding a function
# F -> function + predicate
# hiding a function + predicate to test function validity
# v -> variable
# hiding a variable
# V -> variable + predicate
# hiding a variable + predicate to test variables validity
# i -> set from info
# hiding something from the ``struct info'' object
# m -> multi-arch function
# hiding a multi-arch function (parameterised with the architecture)
# M -> multi-arch function + predicate
# hiding a multi-arch function + predicate to test function validity
returntype ) : ;;
# For functions, the return type; for variables, the data type
function ) : ;;
# For functions, the member function name; for variables, the
# variable name. Member function names are always prefixed with
# ``gdbarch_'' for name-space purity.
formal ) : ;;
# The formal argument list. It is assumed that the formal
# argument list includes the actual name of each list element.
# A function with no arguments shall have ``void'' as the
# formal argument list.
actual ) : ;;
# The list of actual arguments. The arguments specified shall
# match the FORMAL list given above. Functions with out
# arguments leave this blank.
staticdefault ) : ;;
# To help with the GDB startup a static gdbarch object is
# created. STATICDEFAULT is the value to insert into that
# static gdbarch object. Since this a static object only
# simple expressions can be used.
# If STATICDEFAULT is empty, zero is used.
predefault ) : ;;
# An initial value to assign to MEMBER of the freshly
# malloc()ed gdbarch object. After initialization, the
# freshly malloc()ed object is passed to the target
# architecture code for further updates.
# If PREDEFAULT is empty, zero is used.
# A non-empty PREDEFAULT, an empty POSTDEFAULT and a zero
# INVALID_P are specified, PREDEFAULT will be used as the
# default for the non- multi-arch target.
# A zero PREDEFAULT function will force the fallback to call
# internal_error().
# Variable declarations can refer to ``gdbarch'' which will
# contain the current architecture. Care should be taken.
postdefault ) : ;;
# A value to assign to MEMBER of the new gdbarch object should
# the target architecture code fail to change the PREDEFAULT
# value.
# If POSTDEFAULT is empty, no post update is performed.
# If both INVALID_P and POSTDEFAULT are non-empty then
# INVALID_P will be used to determine if MEMBER should be
# changed to POSTDEFAULT.
# If a non-empty POSTDEFAULT and a zero INVALID_P are
# specified, POSTDEFAULT will be used as the default for the
# non- multi-arch target (regardless of the value of
# PREDEFAULT).
# You cannot specify both a zero INVALID_P and a POSTDEFAULT.
# Variable declarations can refer to ``gdbarch'' which
# will contain the current architecture. Care should be
# taken.
invalid_p ) : ;;
# A predicate equation that validates MEMBER. Non-zero is
# returned if the code creating the new architecture failed to
# initialize MEMBER or the initialized the member is invalid.
# If POSTDEFAULT is non-empty then MEMBER will be updated to
# that value. If POSTDEFAULT is empty then internal_error()
# is called.
# If INVALID_P is empty, a check that MEMBER is no longer
# equal to PREDEFAULT is used.
# The expression ``0'' disables the INVALID_P check making
# PREDEFAULT a legitimate value.
# See also PREDEFAULT and POSTDEFAULT.
print ) : ;;
# An optional expression that convers MEMBER to a value
# suitable for formatting using %s.
# If PRINT is empty, core_addr_to_string_nz (for CORE_ADDR)
# or plongest (anything else) is used.
garbage_at_eol ) : ;;
# Catches stray fields.
*)
echo "Bad field ${field}"
exit 1;;
esac
done
function_list ()
{
# See below (DOCO) for description of each field
cat <<EOF
i;const struct bfd_arch_info *;bfd_arch_info;;;&bfd_default_arch_struct;;;;gdbarch_bfd_arch_info (gdbarch)->printable_name
#
i;enum bfd_endian;byte_order;;;BFD_ENDIAN_BIG
i;enum bfd_endian;byte_order_for_code;;;BFD_ENDIAN_BIG
#
i;enum gdb_osabi;osabi;;;GDB_OSABI_UNKNOWN
#
i;const struct target_desc *;target_desc;;;;;;;host_address_to_string (gdbarch->target_desc)
# The bit byte-order has to do just with numbering of bits in debugging symbols
# and such. Conceptually, it's quite separate from byte/word byte order.
v;int;bits_big_endian;;;1;(gdbarch->byte_order == BFD_ENDIAN_BIG);;0
# Number of bits in a short or unsigned short for the target machine.
v;int;short_bit;;;8 * sizeof (short);2*TARGET_CHAR_BIT;;0
# Number of bits in an int or unsigned int for the target machine.
v;int;int_bit;;;8 * sizeof (int);4*TARGET_CHAR_BIT;;0
# Number of bits in a long or unsigned long for the target machine.
v;int;long_bit;;;8 * sizeof (long);4*TARGET_CHAR_BIT;;0
# Number of bits in a long long or unsigned long long for the target
# machine.
v;int;long_long_bit;;;8 * sizeof (LONGEST);2*gdbarch->long_bit;;0
# The ABI default bit-size and format for "half", "float", "double", and
# "long double". These bit/format pairs should eventually be combined
# into a single object. For the moment, just initialize them as a pair.
# Each format describes both the big and little endian layouts (if
# useful).
v;int;half_bit;;;16;2*TARGET_CHAR_BIT;;0
v;const struct floatformat **;half_format;;;;;floatformats_ieee_half;;pformat (gdbarch->half_format)
v;int;float_bit;;;8 * sizeof (float);4*TARGET_CHAR_BIT;;0
v;const struct floatformat **;float_format;;;;;floatformats_ieee_single;;pformat (gdbarch->float_format)
v;int;double_bit;;;8 * sizeof (double);8*TARGET_CHAR_BIT;;0
v;const struct floatformat **;double_format;;;;;floatformats_ieee_double;;pformat (gdbarch->double_format)
v;int;long_double_bit;;;8 * sizeof (long double);8*TARGET_CHAR_BIT;;0
v;const struct floatformat **;long_double_format;;;;;floatformats_ieee_double;;pformat (gdbarch->long_double_format)
# The ABI default bit-size for "wchar_t". wchar_t is a built-in type
# starting with C++11.
v;int;wchar_bit;;;8 * sizeof (wchar_t);4*TARGET_CHAR_BIT;;0
# One if \`wchar_t' is signed, zero if unsigned.
v;int;wchar_signed;;;1;-1;1
# Returns the floating-point format to be used for values of length LENGTH.
# NAME, if non-NULL, is the type name, which may be used to distinguish
# different target formats of the same length.
m;const struct floatformat **;floatformat_for_type;const char *name, int length;name, length;0;default_floatformat_for_type;;0
# For most targets, a pointer on the target and its representation as an
# address in GDB have the same size and "look the same". For such a
# target, you need only set gdbarch_ptr_bit and gdbarch_addr_bit
# / addr_bit will be set from it.
#
# If gdbarch_ptr_bit and gdbarch_addr_bit are different, you'll probably
# also need to set gdbarch_dwarf2_addr_size, gdbarch_pointer_to_address and
# gdbarch_address_to_pointer as well.
#
# ptr_bit is the size of a pointer on the target
v;int;ptr_bit;;;8 * sizeof (void*);gdbarch->int_bit;;0
# addr_bit is the size of a target address as represented in gdb
v;int;addr_bit;;;8 * sizeof (void*);0;gdbarch_ptr_bit (gdbarch);
#
# dwarf2_addr_size is the target address size as used in the Dwarf debug
# info. For .debug_frame FDEs, this is supposed to be the target address
# size from the associated CU header, and which is equivalent to the
# DWARF2_ADDR_SIZE as defined by the target specific GCC back-end.
# Unfortunately there is no good way to determine this value. Therefore
# dwarf2_addr_size simply defaults to the target pointer size.
#
# dwarf2_addr_size is not used for .eh_frame FDEs, which are generally
# defined using the target's pointer size so far.
#
# Note that dwarf2_addr_size only needs to be redefined by a target if the
# GCC back-end defines a DWARF2_ADDR_SIZE other than the target pointer size,
# and if Dwarf versions < 4 need to be supported.
v;int;dwarf2_addr_size;;;sizeof (void*);0;gdbarch_ptr_bit (gdbarch) / TARGET_CHAR_BIT;
#
# One if \`char' acts like \`signed char', zero if \`unsigned char'.
v;int;char_signed;;;1;-1;1
#
F;CORE_ADDR;read_pc;readable_regcache *regcache;regcache
F;void;write_pc;struct regcache *regcache, CORE_ADDR val;regcache, val
# Function for getting target's idea of a frame pointer. FIXME: GDB's
# whole scheme for dealing with "frames" and "frame pointers" needs a
# serious shakedown.
m;void;virtual_frame_pointer;CORE_ADDR pc, int *frame_regnum, LONGEST *frame_offset;pc, frame_regnum, frame_offset;0;legacy_virtual_frame_pointer;;0
#
M;enum register_status;pseudo_register_read;readable_regcache *regcache, int cookednum, gdb_byte *buf;regcache, cookednum, buf
# Read a register into a new struct value. If the register is wholly
# or partly unavailable, this should call mark_value_bytes_unavailable
# as appropriate. If this is defined, then pseudo_register_read will
# never be called.
M;struct value *;pseudo_register_read_value;readable_regcache *regcache, int cookednum;regcache, cookednum
M;void;pseudo_register_write;struct regcache *regcache, int cookednum, const gdb_byte *buf;regcache, cookednum, buf
#
v;int;num_regs;;;0;-1
# This macro gives the number of pseudo-registers that live in the
# register namespace but do not get fetched or stored on the target.
# These pseudo-registers may be aliases for other registers,
# combinations of other registers, or they may be computed by GDB.
v;int;num_pseudo_regs;;;0;0;;0
# Assemble agent expression bytecode to collect pseudo-register REG.
# Return -1 if something goes wrong, 0 otherwise.
M;int;ax_pseudo_register_collect;struct agent_expr *ax, int reg;ax, reg
# Assemble agent expression bytecode to push the value of pseudo-register
# REG on the interpreter stack.
# Return -1 if something goes wrong, 0 otherwise.
M;int;ax_pseudo_register_push_stack;struct agent_expr *ax, int reg;ax, reg
# Some targets/architectures can do extra processing/display of
# segmentation faults. E.g., Intel MPX boundary faults.
# Call the architecture dependent function to handle the fault.
# UIOUT is the output stream where the handler will place information.
M;void;handle_segmentation_fault;struct ui_out *uiout;uiout
# GDB's standard (or well known) register numbers. These can map onto
# a real register or a pseudo (computed) register or not be defined at
# all (-1).
# gdbarch_sp_regnum will hopefully be replaced by UNWIND_SP.
v;int;sp_regnum;;;-1;-1;;0
v;int;pc_regnum;;;-1;-1;;0
v;int;ps_regnum;;;-1;-1;;0
v;int;fp0_regnum;;;0;-1;;0
# Convert stab register number (from \`r\' declaration) to a gdb REGNUM.
m;int;stab_reg_to_regnum;int stab_regnr;stab_regnr;;no_op_reg_to_regnum;;0
# Provide a default mapping from a ecoff register number to a gdb REGNUM.
m;int;ecoff_reg_to_regnum;int ecoff_regnr;ecoff_regnr;;no_op_reg_to_regnum;;0
# Convert from an sdb register number to an internal gdb register number.
m;int;sdb_reg_to_regnum;int sdb_regnr;sdb_regnr;;no_op_reg_to_regnum;;0
# Provide a default mapping from a DWARF2 register number to a gdb REGNUM.
# Return -1 for bad REGNUM. Note: Several targets get this wrong.
m;int;dwarf2_reg_to_regnum;int dwarf2_regnr;dwarf2_regnr;;no_op_reg_to_regnum;;0
m;const char *;register_name;int regnr;regnr;;0
# Return the type of a register specified by the architecture. Only
# the register cache should call this function directly; others should
# use "register_type".
M;struct type *;register_type;int reg_nr;reg_nr
# Generate a dummy frame_id for THIS_FRAME assuming that the frame is
# a dummy frame. A dummy frame is created before an inferior call,
# the frame_id returned here must match the frame_id that was built
# for the inferior call. Usually this means the returned frame_id's
# stack address should match the address returned by
# gdbarch_push_dummy_call, and the returned frame_id's code address
# should match the address at which the breakpoint was set in the dummy
# frame.
m;struct frame_id;dummy_id;struct frame_info *this_frame;this_frame;;default_dummy_id;;0
# Implement DUMMY_ID and PUSH_DUMMY_CALL, then delete
# deprecated_fp_regnum.
v;int;deprecated_fp_regnum;;;-1;-1;;0
M;CORE_ADDR;push_dummy_call;struct value *function, struct regcache *regcache, CORE_ADDR bp_addr, int nargs, struct value **args, CORE_ADDR sp, function_call_return_method return_method, CORE_ADDR struct_addr;function, regcache, bp_addr, nargs, args, sp, return_method, struct_addr
v;int;call_dummy_location;;;;AT_ENTRY_POINT;;0
M;CORE_ADDR;push_dummy_code;CORE_ADDR sp, CORE_ADDR funaddr, struct value **args, int nargs, struct type *value_type, CORE_ADDR *real_pc, CORE_ADDR *bp_addr, struct regcache *regcache;sp, funaddr, args, nargs, value_type, real_pc, bp_addr, regcache
# Return true if the code of FRAME is writable.
m;int;code_of_frame_writable;struct frame_info *frame;frame;;default_code_of_frame_writable;;0
m;void;print_registers_info;struct ui_file *file, struct frame_info *frame, int regnum, int all;file, frame, regnum, all;;default_print_registers_info;;0
m;void;print_float_info;struct ui_file *file, struct frame_info *frame, const char *args;file, frame, args;;default_print_float_info;;0
M;void;print_vector_info;struct ui_file *file, struct frame_info *frame, const char *args;file, frame, args
# MAP a GDB RAW register number onto a simulator register number. See
# also include/...-sim.h.
m;int;register_sim_regno;int reg_nr;reg_nr;;legacy_register_sim_regno;;0
m;int;cannot_fetch_register;int regnum;regnum;;cannot_register_not;;0
m;int;cannot_store_register;int regnum;regnum;;cannot_register_not;;0
# Determine the address where a longjmp will land and save this address
# in PC. Return nonzero on success.
#
# FRAME corresponds to the longjmp frame.
F;int;get_longjmp_target;struct frame_info *frame, CORE_ADDR *pc;frame, pc
#
v;int;believe_pcc_promotion;;;;;;;
#
m;int;convert_register_p;int regnum, struct type *type;regnum, type;0;generic_convert_register_p;;0
f;int;register_to_value;struct frame_info *frame, int regnum, struct type *type, gdb_byte *buf, int *optimizedp, int *unavailablep;frame, regnum, type, buf, optimizedp, unavailablep;0
f;void;value_to_register;struct frame_info *frame, int regnum, struct type *type, const gdb_byte *buf;frame, regnum, type, buf;0
# Construct a value representing the contents of register REGNUM in
# frame FRAME_ID, interpreted as type TYPE. The routine needs to
# allocate and return a struct value with all value attributes
# (but not the value contents) filled in.
m;struct value *;value_from_register;struct type *type, int regnum, struct frame_id frame_id;type, regnum, frame_id;;default_value_from_register;;0
#
m;CORE_ADDR;pointer_to_address;struct type *type, const gdb_byte *buf;type, buf;;unsigned_pointer_to_address;;0
m;void;address_to_pointer;struct type *type, gdb_byte *buf, CORE_ADDR addr;type, buf, addr;;unsigned_address_to_pointer;;0
M;CORE_ADDR;integer_to_address;struct type *type, const gdb_byte *buf;type, buf
# Return the return-value convention that will be used by FUNCTION
# to return a value of type VALTYPE. FUNCTION may be NULL in which
# case the return convention is computed based only on VALTYPE.
#
# If READBUF is not NULL, extract the return value and save it in this buffer.
#
# If WRITEBUF is not NULL, it contains a return value which will be
# stored into the appropriate register. This can be used when we want
# to force the value returned by a function (see the "return" command
# for instance).
M;enum return_value_convention;return_value;struct value *function, struct type *valtype, struct regcache *regcache, gdb_byte *readbuf, const gdb_byte *writebuf;function, valtype, regcache, readbuf, writebuf
# Return true if the return value of function is stored in the first hidden
# parameter. In theory, this feature should be language-dependent, specified
# by language and its ABI, such as C++. Unfortunately, compiler may
# implement it to a target-dependent feature. So that we need such hook here
# to be aware of this in GDB.
m;int;return_in_first_hidden_param_p;struct type *type;type;;default_return_in_first_hidden_param_p;;0
m;CORE_ADDR;skip_prologue;CORE_ADDR ip;ip;0;0
M;CORE_ADDR;skip_main_prologue;CORE_ADDR ip;ip
# On some platforms, a single function may provide multiple entry points,
# e.g. one that is used for function-pointer calls and a different one
# that is used for direct function calls.
# In order to ensure that breakpoints set on the function will trigger
# no matter via which entry point the function is entered, a platform
# may provide the skip_entrypoint callback. It is called with IP set
# to the main entry point of a function (as determined by the symbol table),
# and should return the address of the innermost entry point, where the
# actual breakpoint needs to be set. Note that skip_entrypoint is used
# by GDB common code even when debugging optimized code, where skip_prologue
# is not used.
M;CORE_ADDR;skip_entrypoint;CORE_ADDR ip;ip
f;int;inner_than;CORE_ADDR lhs, CORE_ADDR rhs;lhs, rhs;0;0
m;const gdb_byte *;breakpoint_from_pc;CORE_ADDR *pcptr, int *lenptr;pcptr, lenptr;0;default_breakpoint_from_pc;;0
# Return the breakpoint kind for this target based on *PCPTR.
m;int;breakpoint_kind_from_pc;CORE_ADDR *pcptr;pcptr;;0;
# Return the software breakpoint from KIND. KIND can have target
# specific meaning like the Z0 kind parameter.
# SIZE is set to the software breakpoint's length in memory.
m;const gdb_byte *;sw_breakpoint_from_kind;int kind, int *size;kind, size;;NULL;;0
# Return the breakpoint kind for this target based on the current
# processor state (e.g. the current instruction mode on ARM) and the
# *PCPTR. In default, it is gdbarch->breakpoint_kind_from_pc.
m;int;breakpoint_kind_from_current_state;struct regcache *regcache, CORE_ADDR *pcptr;regcache, pcptr;0;default_breakpoint_kind_from_current_state;;0
M;CORE_ADDR;adjust_breakpoint_address;CORE_ADDR bpaddr;bpaddr
m;int;memory_insert_breakpoint;struct bp_target_info *bp_tgt;bp_tgt;0;default_memory_insert_breakpoint;;0
m;int;memory_remove_breakpoint;struct bp_target_info *bp_tgt;bp_tgt;0;default_memory_remove_breakpoint;;0
v;CORE_ADDR;decr_pc_after_break;;;0;;;0
# A function can be addressed by either it's "pointer" (possibly a
# descriptor address) or "entry point" (first executable instruction).
# The method "convert_from_func_ptr_addr" converting the former to the
# latter. gdbarch_deprecated_function_start_offset is being used to implement
# a simplified subset of that functionality - the function's address
# corresponds to the "function pointer" and the function's start
# corresponds to the "function entry point" - and hence is redundant.
v;CORE_ADDR;deprecated_function_start_offset;;;0;;;0
# Return the remote protocol register number associated with this
# register. Normally the identity mapping.
m;int;remote_register_number;int regno;regno;;default_remote_register_number;;0
# Fetch the target specific address used to represent a load module.
F;CORE_ADDR;fetch_tls_load_module_address;struct objfile *objfile;objfile
#
v;CORE_ADDR;frame_args_skip;;;0;;;0
m;CORE_ADDR;unwind_pc;struct frame_info *next_frame;next_frame;;default_unwind_pc;;0
m;CORE_ADDR;unwind_sp;struct frame_info *next_frame;next_frame;;default_unwind_sp;;0
# DEPRECATED_FRAME_LOCALS_ADDRESS as been replaced by the per-frame
# frame-base. Enable frame-base before frame-unwind.
F;int;frame_num_args;struct frame_info *frame;frame
#
M;CORE_ADDR;frame_align;CORE_ADDR address;address
m;int;stabs_argument_has_addr;struct type *type;type;;default_stabs_argument_has_addr;;0
v;int;frame_red_zone_size
#
m;CORE_ADDR;convert_from_func_ptr_addr;CORE_ADDR addr, struct target_ops *targ;addr, targ;;convert_from_func_ptr_addr_identity;;0
# On some machines there are bits in addresses which are not really
# part of the address, but are used by the kernel, the hardware, etc.
# for special purposes. gdbarch_addr_bits_remove takes out any such bits so
# we get a "real" address such as one would find in a symbol table.
# This is used only for addresses of instructions, and even then I'm
# not sure it's used in all contexts. It exists to deal with there
# being a few stray bits in the PC which would mislead us, not as some
# sort of generic thing to handle alignment or segmentation (it's
# possible it should be in TARGET_READ_PC instead).
m;CORE_ADDR;addr_bits_remove;CORE_ADDR addr;addr;;core_addr_identity;;0
# On some machines, not all bits of an address word are significant.
# For example, on AArch64, the top bits of an address known as the "tag"
# are ignored by the kernel, the hardware, etc. and can be regarded as
# additional data associated with the address.
v;int;significant_addr_bit;;;;;;0
# FIXME/cagney/2001-01-18: This should be split in two. A target method that
# indicates if the target needs software single step. An ISA method to
# implement it.
#
# FIXME/cagney/2001-01-18: The logic is backwards. It should be asking if the
# target can single step. If not, then implement single step using breakpoints.
#
# Return a vector of addresses on which the software single step
# breakpoints should be inserted. NULL means software single step is
# not used.
# Multiple breakpoints may be inserted for some instructions such as
# conditional branch. However, each implementation must always evaluate
# the condition and only put the breakpoint at the branch destination if
# the condition is true, so that we ensure forward progress when stepping
# past a conditional branch to self.
F;std::vector<CORE_ADDR>;software_single_step;struct regcache *regcache;regcache
# Return non-zero if the processor is executing a delay slot and a
# further single-step is needed before the instruction finishes.
M;int;single_step_through_delay;struct frame_info *frame;frame
# FIXME: cagney/2003-08-28: Need to find a better way of selecting the
# disassembler. Perhaps objdump can handle it?
f;int;print_insn;bfd_vma vma, struct disassemble_info *info;vma, info;;default_print_insn;;0
f;CORE_ADDR;skip_trampoline_code;struct frame_info *frame, CORE_ADDR pc;frame, pc;;generic_skip_trampoline_code;;0
# If in_solib_dynsym_resolve_code() returns true, and SKIP_SOLIB_RESOLVER
# evaluates non-zero, this is the address where the debugger will place
# a step-resume breakpoint to get us past the dynamic linker.
m;CORE_ADDR;skip_solib_resolver;CORE_ADDR pc;pc;;generic_skip_solib_resolver;;0
# Some systems also have trampoline code for returning from shared libs.
m;int;in_solib_return_trampoline;CORE_ADDR pc, const char *name;pc, name;;generic_in_solib_return_trampoline;;0
# Return true if PC lies inside an indirect branch thunk.
m;bool;in_indirect_branch_thunk;CORE_ADDR pc;pc;;default_in_indirect_branch_thunk;;0
# A target might have problems with watchpoints as soon as the stack
# frame of the current function has been destroyed. This mostly happens
# as the first action in a function's epilogue. stack_frame_destroyed_p()
# is defined to return a non-zero value if either the given addr is one
# instruction after the stack destroying instruction up to the trailing
# return instruction or if we can figure out that the stack frame has
# already been invalidated regardless of the value of addr. Targets
# which don't suffer from that problem could just let this functionality
# untouched.
m;int;stack_frame_destroyed_p;CORE_ADDR addr;addr;0;generic_stack_frame_destroyed_p;;0
# Process an ELF symbol in the minimal symbol table in a backend-specific
# way. Normally this hook is supposed to do nothing, however if required,
# then this hook can be used to apply tranformations to symbols that are
# considered special in some way. For example the MIPS backend uses it
# to interpret \`st_other' information to mark compressed code symbols so
# that they can be treated in the appropriate manner in the processing of
# the main symbol table and DWARF-2 records.
F;void;elf_make_msymbol_special;asymbol *sym, struct minimal_symbol *msym;sym, msym
f;void;coff_make_msymbol_special;int val, struct minimal_symbol *msym;val, msym;;default_coff_make_msymbol_special;;0
# Process a symbol in the main symbol table in a backend-specific way.
# Normally this hook is supposed to do nothing, however if required,
# then this hook can be used to apply tranformations to symbols that
# are considered special in some way. This is currently used by the
# MIPS backend to make sure compressed code symbols have the ISA bit
# set. This in turn is needed for symbol values seen in GDB to match
# the values used at the runtime by the program itself, for function
# and label references.
f;void;make_symbol_special;struct symbol *sym, struct objfile *objfile;sym, objfile;;default_make_symbol_special;;0
# Adjust the address retrieved from a DWARF-2 record other than a line
# entry in a backend-specific way. Normally this hook is supposed to
# return the address passed unchanged, however if that is incorrect for
# any reason, then this hook can be used to fix the address up in the
# required manner. This is currently used by the MIPS backend to make
# sure addresses in FDE, range records, etc. referring to compressed
# code have the ISA bit set, matching line information and the symbol
# table.
f;CORE_ADDR;adjust_dwarf2_addr;CORE_ADDR pc;pc;;default_adjust_dwarf2_addr;;0
# Adjust the address updated by a line entry in a backend-specific way.
# Normally this hook is supposed to return the address passed unchanged,
# however in the case of inconsistencies in these records, this hook can
# be used to fix them up in the required manner. This is currently used
# by the MIPS backend to make sure all line addresses in compressed code
# are presented with the ISA bit set, which is not always the case. This
# in turn ensures breakpoint addresses are correctly matched against the
# stop PC.
f;CORE_ADDR;adjust_dwarf2_line;CORE_ADDR addr, int rel;addr, rel;;default_adjust_dwarf2_line;;0
v;int;cannot_step_breakpoint;;;0;0;;0
# See comment in target.h about continuable, steppable and
# non-steppable watchpoints.
v;int;have_nonsteppable_watchpoint;;;0;0;;0
F;int;address_class_type_flags;int byte_size, int dwarf2_addr_class;byte_size, dwarf2_addr_class
M;const char *;address_class_type_flags_to_name;int type_flags;type_flags
# Execute vendor-specific DWARF Call Frame Instruction. OP is the instruction.
# FS are passed from the generic execute_cfa_program function.
m;bool;execute_dwarf_cfa_vendor_op;gdb_byte op, struct dwarf2_frame_state *fs;op, fs;;default_execute_dwarf_cfa_vendor_op;;0
# Return the appropriate type_flags for the supplied address class.
# This function should return 1 if the address class was recognized and
# type_flags was set, zero otherwise.
M;int;address_class_name_to_type_flags;const char *name, int *type_flags_ptr;name, type_flags_ptr
# Is a register in a group
m;int;register_reggroup_p;int regnum, struct reggroup *reggroup;regnum, reggroup;;default_register_reggroup_p;;0
# Fetch the pointer to the ith function argument.
F;CORE_ADDR;fetch_pointer_argument;struct frame_info *frame, int argi, struct type *type;frame, argi, type
# Iterate over all supported register notes in a core file. For each
# supported register note section, the iterator must call CB and pass
# CB_DATA unchanged. If REGCACHE is not NULL, the iterator can limit
# the supported register note sections based on the current register
# values. Otherwise it should enumerate all supported register note
# sections.
M;void;iterate_over_regset_sections;iterate_over_regset_sections_cb *cb, void *cb_data, const struct regcache *regcache;cb, cb_data, regcache
# Create core file notes
M;char *;make_corefile_notes;bfd *obfd, int *note_size;obfd, note_size
# Find core file memory regions
M;int;find_memory_regions;find_memory_region_ftype func, void *data;func, data
# Read offset OFFSET of TARGET_OBJECT_LIBRARIES formatted shared libraries list from
# core file into buffer READBUF with length LEN. Return the number of bytes read
# (zero indicates failure).
# failed, otherwise, return the red length of READBUF.
M;ULONGEST;core_xfer_shared_libraries;gdb_byte *readbuf, ULONGEST offset, ULONGEST len;readbuf, offset, len
# Read offset OFFSET of TARGET_OBJECT_LIBRARIES_AIX formatted shared
# libraries list from core file into buffer READBUF with length LEN.
# Return the number of bytes read (zero indicates failure).
M;ULONGEST;core_xfer_shared_libraries_aix;gdb_byte *readbuf, ULONGEST offset, ULONGEST len;readbuf, offset, len
# How the core target converts a PTID from a core file to a string.
M;const char *;core_pid_to_str;ptid_t ptid;ptid
# How the core target extracts the name of a thread from a core file.
M;const char *;core_thread_name;struct thread_info *thr;thr
# Read offset OFFSET of TARGET_OBJECT_SIGNAL_INFO signal information
# from core file into buffer READBUF with length LEN. Return the number
# of bytes read (zero indicates EOF, a negative value indicates failure).
M;LONGEST;core_xfer_siginfo;gdb_byte *readbuf, ULONGEST offset, ULONGEST len; readbuf, offset, len
# BFD target to use when generating a core file.
V;const char *;gcore_bfd_target;;;0;0;;;pstring (gdbarch->gcore_bfd_target)
# If the elements of C++ vtables are in-place function descriptors rather
# than normal function pointers (which may point to code or a descriptor),
# set this to one.
v;int;vtable_function_descriptors;;;0;0;;0
# Set if the least significant bit of the delta is used instead of the least
# significant bit of the pfn for pointers to virtual member functions.
v;int;vbit_in_delta;;;0;0;;0
# Advance PC to next instruction in order to skip a permanent breakpoint.
f;void;skip_permanent_breakpoint;struct regcache *regcache;regcache;default_skip_permanent_breakpoint;default_skip_permanent_breakpoint;;0
# The maximum length of an instruction on this architecture in bytes.
V;ULONGEST;max_insn_length;;;0;0
# Copy the instruction at FROM to TO, and make any adjustments
# necessary to single-step it at that address.
#
# REGS holds the state the thread's registers will have before
# executing the copied instruction; the PC in REGS will refer to FROM,
# not the copy at TO. The caller should update it to point at TO later.
#
# Return a pointer to data of the architecture's choice to be passed
# to gdbarch_displaced_step_fixup. Or, return NULL to indicate that
# the instruction's effects have been completely simulated, with the
# resulting state written back to REGS.
#
# For a general explanation of displaced stepping and how GDB uses it,
# see the comments in infrun.c.
#
# The TO area is only guaranteed to have space for
# gdbarch_max_insn_length (arch) bytes, so this function must not
# write more bytes than that to that area.
#
# If you do not provide this function, GDB assumes that the
# architecture does not support displaced stepping.
#
# If the instruction cannot execute out of line, return NULL. The
# core falls back to stepping past the instruction in-line instead in
# that case.
M;struct displaced_step_closure *;displaced_step_copy_insn;CORE_ADDR from, CORE_ADDR to, struct regcache *regs;from, to, regs
# Return true if GDB should use hardware single-stepping to execute
# the displaced instruction identified by CLOSURE. If false,
# GDB will simply restart execution at the displaced instruction
# location, and it is up to the target to ensure GDB will receive
# control again (e.g. by placing a software breakpoint instruction
# into the displaced instruction buffer).
#
# The default implementation returns false on all targets that
# provide a gdbarch_software_single_step routine, and true otherwise.
m;int;displaced_step_hw_singlestep;struct displaced_step_closure *closure;closure;;default_displaced_step_hw_singlestep;;0
# Fix up the state resulting from successfully single-stepping a
# displaced instruction, to give the result we would have gotten from
# stepping the instruction in its original location.
#
# REGS is the register state resulting from single-stepping the
# displaced instruction.
#
# CLOSURE is the result from the matching call to
# gdbarch_displaced_step_copy_insn.
#
# If you provide gdbarch_displaced_step_copy_insn.but not this
# function, then GDB assumes that no fixup is needed after
# single-stepping the instruction.
#
# For a general explanation of displaced stepping and how GDB uses it,
# see the comments in infrun.c.
M;void;displaced_step_fixup;struct displaced_step_closure *closure, CORE_ADDR from, CORE_ADDR to, struct regcache *regs;closure, from, to, regs;;NULL
# Return the address of an appropriate place to put displaced
# instructions while we step over them. There need only be one such
# place, since we're only stepping one thread over a breakpoint at a
# time.
#
# For a general explanation of displaced stepping and how GDB uses it,
# see the comments in infrun.c.
m;CORE_ADDR;displaced_step_location;void;;;NULL;;(! gdbarch->displaced_step_location) != (! gdbarch->displaced_step_copy_insn)
# Relocate an instruction to execute at a different address. OLDLOC
# is the address in the inferior memory where the instruction to
# relocate is currently at. On input, TO points to the destination
# where we want the instruction to be copied (and possibly adjusted)
# to. On output, it points to one past the end of the resulting
# instruction(s). The effect of executing the instruction at TO shall
# be the same as if executing it at FROM. For example, call
# instructions that implicitly push the return address on the stack
# should be adjusted to return to the instruction after OLDLOC;
# relative branches, and other PC-relative instructions need the
# offset adjusted; etc.
M;void;relocate_instruction;CORE_ADDR *to, CORE_ADDR from;to, from;;NULL
# Refresh overlay mapped state for section OSECT.
F;void;overlay_update;struct obj_section *osect;osect
M;const struct target_desc *;core_read_description;struct target_ops *target, bfd *abfd;target, abfd
# Handle special encoding of static variables in stabs debug info.
F;const char *;static_transform_name;const char *name;name
# Set if the address in N_SO or N_FUN stabs may be zero.
v;int;sofun_address_maybe_missing;;;0;0;;0
# Parse the instruction at ADDR storing in the record execution log
# the registers REGCACHE and memory ranges that will be affected when
# the instruction executes, along with their current values.
# Return -1 if something goes wrong, 0 otherwise.
M;int;process_record;struct regcache *regcache, CORE_ADDR addr;regcache, addr
# Save process state after a signal.
# Return -1 if something goes wrong, 0 otherwise.
M;int;process_record_signal;struct regcache *regcache, enum gdb_signal signal;regcache, signal
# Signal translation: translate inferior's signal (target's) number
# into GDB's representation. The implementation of this method must
# be host independent. IOW, don't rely on symbols of the NAT_FILE
# header (the nm-*.h files), the host <signal.h> header, or similar
# headers. This is mainly used when cross-debugging core files ---
# "Live" targets hide the translation behind the target interface
# (target_wait, target_resume, etc.).
M;enum gdb_signal;gdb_signal_from_target;int signo;signo
# Signal translation: translate the GDB's internal signal number into
# the inferior's signal (target's) representation. The implementation
# of this method must be host independent. IOW, don't rely on symbols
# of the NAT_FILE header (the nm-*.h files), the host <signal.h>
# header, or similar headers.
# Return the target signal number if found, or -1 if the GDB internal
# signal number is invalid.
M;int;gdb_signal_to_target;enum gdb_signal signal;signal
# Extra signal info inspection.
#
# Return a type suitable to inspect extra signal information.
M;struct type *;get_siginfo_type;void;
# Record architecture-specific information from the symbol table.
M;void;record_special_symbol;struct objfile *objfile, asymbol *sym;objfile, sym
# Function for the 'catch syscall' feature.
# Get architecture-specific system calls information from registers.
M;LONGEST;get_syscall_number;thread_info *thread;thread
# The filename of the XML syscall for this architecture.
v;const char *;xml_syscall_file;;;0;0;;0;pstring (gdbarch->xml_syscall_file)
# Information about system calls from this architecture
v;struct syscalls_info *;syscalls_info;;;0;0;;0;host_address_to_string (gdbarch->syscalls_info)
# SystemTap related fields and functions.
# A NULL-terminated array of prefixes used to mark an integer constant
# on the architecture's assembly.
# For example, on x86 integer constants are written as:
#
# \$10 ;; integer constant 10
#
# in this case, this prefix would be the character \`\$\'.
v;const char *const *;stap_integer_prefixes;;;0;0;;0;pstring_list (gdbarch->stap_integer_prefixes)
# A NULL-terminated array of suffixes used to mark an integer constant
# on the architecture's assembly.
v;const char *const *;stap_integer_suffixes;;;0;0;;0;pstring_list (gdbarch->stap_integer_suffixes)
# A NULL-terminated array of prefixes used to mark a register name on
# the architecture's assembly.
# For example, on x86 the register name is written as:
#
# \%eax ;; register eax
#
# in this case, this prefix would be the character \`\%\'.
v;const char *const *;stap_register_prefixes;;;0;0;;0;pstring_list (gdbarch->stap_register_prefixes)
# A NULL-terminated array of suffixes used to mark a register name on
# the architecture's assembly.
v;const char *const *;stap_register_suffixes;;;0;0;;0;pstring_list (gdbarch->stap_register_suffixes)
# A NULL-terminated array of prefixes used to mark a register
# indirection on the architecture's assembly.
# For example, on x86 the register indirection is written as:
#
# \(\%eax\) ;; indirecting eax
#
# in this case, this prefix would be the charater \`\(\'.
#
# Please note that we use the indirection prefix also for register
# displacement, e.g., \`4\(\%eax\)\' on x86.
v;const char *const *;stap_register_indirection_prefixes;;;0;0;;0;pstring_list (gdbarch->stap_register_indirection_prefixes)
# A NULL-terminated array of suffixes used to mark a register
# indirection on the architecture's assembly.
# For example, on x86 the register indirection is written as:
#
# \(\%eax\) ;; indirecting eax
#
# in this case, this prefix would be the charater \`\)\'.
#
# Please note that we use the indirection suffix also for register
# displacement, e.g., \`4\(\%eax\)\' on x86.
v;const char *const *;stap_register_indirection_suffixes;;;0;0;;0;pstring_list (gdbarch->stap_register_indirection_suffixes)
# Prefix(es) used to name a register using GDB's nomenclature.
#
# For example, on PPC a register is represented by a number in the assembly
# language (e.g., \`10\' is the 10th general-purpose register). However,
# inside GDB this same register has an \`r\' appended to its name, so the 10th
# register would be represented as \`r10\' internally.
v;const char *;stap_gdb_register_prefix;;;0;0;;0;pstring (gdbarch->stap_gdb_register_prefix)
# Suffix used to name a register using GDB's nomenclature.
v;const char *;stap_gdb_register_suffix;;;0;0;;0;pstring (gdbarch->stap_gdb_register_suffix)
# Check if S is a single operand.
#
# Single operands can be:
# \- Literal integers, e.g. \`\$10\' on x86
# \- Register access, e.g. \`\%eax\' on x86
# \- Register indirection, e.g. \`\(\%eax\)\' on x86
# \- Register displacement, e.g. \`4\(\%eax\)\' on x86
#
# This function should check for these patterns on the string
# and return 1 if some were found, or zero otherwise. Please try to match
# as much info as you can from the string, i.e., if you have to match
# something like \`\(\%\', do not match just the \`\(\'.
M;int;stap_is_single_operand;const char *s;s
# Function used to handle a "special case" in the parser.
#
# A "special case" is considered to be an unknown token, i.e., a token
# that the parser does not know how to parse. A good example of special
# case would be ARM's register displacement syntax:
#
# [R0, #4] ;; displacing R0 by 4
#
# Since the parser assumes that a register displacement is of the form:
#
# <number> <indirection_prefix> <register_name> <indirection_suffix>
#
# it means that it will not be able to recognize and parse this odd syntax.
# Therefore, we should add a special case function that will handle this token.
#
# This function should generate the proper expression form of the expression
# using GDB\'s internal expression mechanism (e.g., \`write_exp_elt_opcode\'
# and so on). It should also return 1 if the parsing was successful, or zero
# if the token was not recognized as a special token (in this case, returning
# zero means that the special parser is deferring the parsing to the generic
# parser), and should advance the buffer pointer (p->arg).
M;int;stap_parse_special_token;struct stap_parse_info *p;p
# DTrace related functions.
# The expression to compute the NARTGth+1 argument to a DTrace USDT probe.
# NARG must be >= 0.
M;void;dtrace_parse_probe_argument;struct parser_state *pstate, int narg;pstate, narg
# True if the given ADDR does not contain the instruction sequence
# corresponding to a disabled DTrace is-enabled probe.
M;int;dtrace_probe_is_enabled;CORE_ADDR addr;addr
# Enable a DTrace is-enabled probe at ADDR.
M;void;dtrace_enable_probe;CORE_ADDR addr;addr
# Disable a DTrace is-enabled probe at ADDR.
M;void;dtrace_disable_probe;CORE_ADDR addr;addr
# True if the list of shared libraries is one and only for all
# processes, as opposed to a list of shared libraries per inferior.
# This usually means that all processes, although may or may not share
# an address space, will see the same set of symbols at the same
# addresses.
v;int;has_global_solist;;;0;0;;0
# On some targets, even though each inferior has its own private
# address space, the debug interface takes care of making breakpoints
# visible to all address spaces automatically. For such cases,
# this property should be set to true.
v;int;has_global_breakpoints;;;0;0;;0
# True if inferiors share an address space (e.g., uClinux).
m;int;has_shared_address_space;void;;;default_has_shared_address_space;;0
# True if a fast tracepoint can be set at an address.
m;int;fast_tracepoint_valid_at;CORE_ADDR addr, std::string *msg;addr, msg;;default_fast_tracepoint_valid_at;;0
# Guess register state based on tracepoint location. Used for tracepoints
# where no registers have been collected, but there's only one location,
# allowing us to guess the PC value, and perhaps some other registers.
# On entry, regcache has all registers marked as unavailable.
m;void;guess_tracepoint_registers;struct regcache *regcache, CORE_ADDR addr;regcache, addr;;default_guess_tracepoint_registers;;0
# Return the "auto" target charset.
f;const char *;auto_charset;void;;default_auto_charset;default_auto_charset;;0
# Return the "auto" target wide charset.
f;const char *;auto_wide_charset;void;;default_auto_wide_charset;default_auto_wide_charset;;0
# If non-empty, this is a file extension that will be opened in place
# of the file extension reported by the shared library list.
#
# This is most useful for toolchains that use a post-linker tool,
# where the names of the files run on the target differ in extension
# compared to the names of the files GDB should load for debug info.
v;const char *;solib_symbols_extension;;;;;;;pstring (gdbarch->solib_symbols_extension)
# If true, the target OS has DOS-based file system semantics. That
# is, absolute paths include a drive name, and the backslash is
# considered a directory separator.
v;int;has_dos_based_file_system;;;0;0;;0
# Generate bytecodes to collect the return address in a frame.
# Since the bytecodes run on the target, possibly with GDB not even
# connected, the full unwinding machinery is not available, and
# typically this function will issue bytecodes for one or more likely
# places that the return address may be found.
m;void;gen_return_address;struct agent_expr *ax, struct axs_value *value, CORE_ADDR scope;ax, value, scope;;default_gen_return_address;;0
# Implement the "info proc" command.
M;void;info_proc;const char *args, enum info_proc_what what;args, what
# Implement the "info proc" command for core files. Noe that there
# are two "info_proc"-like methods on gdbarch -- one for core files,
# one for live targets.
M;void;core_info_proc;const char *args, enum info_proc_what what;args, what
# Iterate over all objfiles in the order that makes the most sense
# for the architecture to make global symbol searches.
#
# CB is a callback function where OBJFILE is the objfile to be searched,
# and CB_DATA a pointer to user-defined data (the same data that is passed
# when calling this gdbarch method). The iteration stops if this function
# returns nonzero.
#
# CB_DATA is a pointer to some user-defined data to be passed to
# the callback.
#
# If not NULL, CURRENT_OBJFILE corresponds to the objfile being
# inspected when the symbol search was requested.
m;void;iterate_over_objfiles_in_search_order;iterate_over_objfiles_in_search_order_cb_ftype *cb, void *cb_data, struct objfile *current_objfile;cb, cb_data, current_objfile;0;default_iterate_over_objfiles_in_search_order;;0
# Ravenscar arch-dependent ops.
v;struct ravenscar_arch_ops *;ravenscar_ops;;;NULL;NULL;;0;host_address_to_string (gdbarch->ravenscar_ops)
# Return non-zero if the instruction at ADDR is a call; zero otherwise.
m;int;insn_is_call;CORE_ADDR addr;addr;;default_insn_is_call;;0
# Return non-zero if the instruction at ADDR is a return; zero otherwise.
m;int;insn_is_ret;CORE_ADDR addr;addr;;default_insn_is_ret;;0
# Return non-zero if the instruction at ADDR is a jump; zero otherwise.
m;int;insn_is_jump;CORE_ADDR addr;addr;;default_insn_is_jump;;0
# Read one auxv entry from *READPTR, not reading locations >= ENDPTR.
# Return 0 if *READPTR is already at the end of the buffer.
# Return -1 if there is insufficient buffer for a whole entry.
# Return 1 if an entry was read into *TYPEP and *VALP.
M;int;auxv_parse;gdb_byte **readptr, gdb_byte *endptr, CORE_ADDR *typep, CORE_ADDR *valp;readptr, endptr, typep, valp
# Print the description of a single auxv entry described by TYPE and VAL
# to FILE.
m;void;print_auxv_entry;struct ui_file *file, CORE_ADDR type, CORE_ADDR val;file, type, val;;default_print_auxv_entry;;0
# Find the address range of the current inferior's vsyscall/vDSO, and
# write it to *RANGE. If the vsyscall's length can't be determined, a
# range with zero length is returned. Returns true if the vsyscall is
# found, false otherwise.
m;int;vsyscall_range;struct mem_range *range;range;;default_vsyscall_range;;0
# Allocate SIZE bytes of PROT protected page aligned memory in inferior.
# PROT has GDB_MMAP_PROT_* bitmask format.
# Throw an error if it is not possible. Returned address is always valid.
f;CORE_ADDR;infcall_mmap;CORE_ADDR size, unsigned prot;size, prot;;default_infcall_mmap;;0
# Deallocate SIZE bytes of memory at ADDR in inferior from gdbarch_infcall_mmap.
# Print a warning if it is not possible.
f;void;infcall_munmap;CORE_ADDR addr, CORE_ADDR size;addr, size;;default_infcall_munmap;;0
# Return string (caller has to use xfree for it) with options for GCC
# to produce code for this target, typically "-m64", "-m32" or "-m31".
# These options are put before CU's DW_AT_producer compilation options so that
# they can override it. Method may also return NULL.
m;char *;gcc_target_options;void;;;default_gcc_target_options;;0
# Return a regular expression that matches names used by this
# architecture in GNU configury triplets. The result is statically
# allocated and must not be freed. The default implementation simply
# returns the BFD architecture name, which is correct in nearly every
# case.
m;const char *;gnu_triplet_regexp;void;;;default_gnu_triplet_regexp;;0
# Return the size in 8-bit bytes of an addressable memory unit on this
# architecture. This corresponds to the number of 8-bit bytes associated to
# each address in memory.
m;int;addressable_memory_unit_size;void;;;default_addressable_memory_unit_size;;0
# Functions for allowing a target to modify its disassembler options.
v;const char *;disassembler_options_implicit;;;0;0;;0;pstring (gdbarch->disassembler_options_implicit)
v;char **;disassembler_options;;;0;0;;0;pstring_ptr (gdbarch->disassembler_options)
v;const disasm_options_and_args_t *;valid_disassembler_options;;;0;0;;0;host_address_to_string (gdbarch->valid_disassembler_options)
# Type alignment.
m;ULONGEST;type_align;struct type *type;type;;default_type_align;;0
EOF
}
#
# The .log file
#
exec > new-gdbarch.log
function_list | while do_read
do
cat <<EOF
${class} ${returntype} ${function} ($formal)
EOF
for r in ${read}
do
eval echo \"\ \ \ \ ${r}=\${${r}}\"
done
if class_is_predicate_p && fallback_default_p
then
echo "Error: predicate function ${function} can not have a non- multi-arch default" 1>&2
kill $$
exit 1
fi
if [ "x${invalid_p}" = "x0" -a -n "${postdefault}" ]
then
echo "Error: postdefault is useless when invalid_p=0" 1>&2
kill $$
exit 1
fi
if class_is_multiarch_p
then
if class_is_predicate_p ; then :
elif test "x${predefault}" = "x"
then
echo "Error: pure multi-arch function ${function} must have a predefault" 1>&2
kill $$
exit 1
fi
fi
echo ""
done
exec 1>&2
compare_new gdbarch.log
copyright ()
{
cat <<EOF
/* *INDENT-OFF* */ /* THIS FILE IS GENERATED -*- buffer-read-only: t -*- */
/* vi:set ro: */
/* Dynamic architecture support for GDB, the GNU debugger.
Copyright (C) 1998-2019 Free Software Foundation, Inc.
This file is part of GDB.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* This file was created with the aid of \`\`gdbarch.sh''.
The Bourne shell script \`\`gdbarch.sh'' creates the files
\`\`new-gdbarch.c'' and \`\`new-gdbarch.h and then compares them
against the existing \`\`gdbarch.[hc]''. Any differences found
being reported.
If editing this file, please also run gdbarch.sh and merge any
changes into that script. Conversely, when making sweeping changes
to this file, modifying gdbarch.sh and using its output may prove
easier. */
EOF
}
#
# The .h file
#
exec > new-gdbarch.h
copyright
cat <<EOF
#ifndef GDBARCH_H
#define GDBARCH_H
#include <vector>
#include "frame.h"
#include "dis-asm.h"
#include "gdb_obstack.h"
struct floatformat;
struct ui_file;
struct value;
struct objfile;
struct obj_section;
struct minimal_symbol;
struct regcache;
struct reggroup;
struct regset;
struct disassemble_info;
struct target_ops;
struct obstack;
struct bp_target_info;
struct target_desc;
struct symbol;
struct displaced_step_closure;
struct syscall;
struct agent_expr;
struct axs_value;
struct stap_parse_info;
struct parser_state;
struct ravenscar_arch_ops;
struct mem_range;
struct syscalls_info;
struct thread_info;
struct ui_out;
#include "regcache.h"
/* The architecture associated with the inferior through the
connection to the target.
The architecture vector provides some information that is really a
property of the inferior, accessed through a particular target:
ptrace operations; the layout of certain RSP packets; the solib_ops
vector; etc. To differentiate architecture accesses to
per-inferior/target properties from
per-thread/per-frame/per-objfile properties, accesses to
per-inferior/target properties should be made through this
gdbarch. */
/* This is a convenience wrapper for 'current_inferior ()->gdbarch'. */
extern struct gdbarch *target_gdbarch (void);
/* Callback type for the 'iterate_over_objfiles_in_search_order'
gdbarch method. */
typedef int (iterate_over_objfiles_in_search_order_cb_ftype)
(struct objfile *objfile, void *cb_data);
/* Callback type for regset section iterators. The callback usually
invokes the REGSET's supply or collect method, to which it must
pass a buffer - for collects this buffer will need to be created using
COLLECT_SIZE, for supply the existing buffer being read from should
be at least SUPPLY_SIZE. SECT_NAME is a BFD section name, and HUMAN_NAME
is used for diagnostic messages. CB_DATA should have been passed
unchanged through the iterator. */
typedef void (iterate_over_regset_sections_cb)
(const char *sect_name, int supply_size, int collect_size,
const struct regset *regset, const char *human_name, void *cb_data);
/* For a function call, does the function return a value using a
normal value return or a structure return - passing a hidden
argument pointing to storage. For the latter, there are two
cases: language-mandated structure return and target ABI
structure return. */
enum function_call_return_method
{
/* Standard value return. */
return_method_normal = 0,
/* Language ABI structure return. This is handled
by passing the return location as the first parameter to
the function, even preceding "this". */
return_method_hidden_param,
/* Target ABI struct return. This is target-specific; for instance,
on ia64 the first argument is passed in out0 but the hidden
structure return pointer would normally be passed in r8. */
return_method_struct,
};
EOF
# function typedef's
printf "\n"
printf "\n"
printf "/* The following are pre-initialized by GDBARCH. */\n"
function_list | while do_read
do
if class_is_info_p
then
printf "\n"
printf "extern ${returntype} gdbarch_${function} (struct gdbarch *gdbarch);\n"
printf "/* set_gdbarch_${function}() - not applicable - pre-initialized. */\n"
fi
done
# function typedef's
printf "\n"
printf "\n"
printf "/* The following are initialized by the target dependent code. */\n"
function_list | while do_read
do
if [ -n "${comment}" ]
then
echo "${comment}" | sed \
-e '2 s,#,/*,' \
-e '3,$ s,#, ,' \
-e '$ s,$, */,'
fi
if class_is_predicate_p
then
printf "\n"
printf "extern int gdbarch_${function}_p (struct gdbarch *gdbarch);\n"
fi
if class_is_variable_p
then
printf "\n"
printf "extern ${returntype} gdbarch_${function} (struct gdbarch *gdbarch);\n"
printf "extern void set_gdbarch_${function} (struct gdbarch *gdbarch, ${returntype} ${function});\n"
fi
if class_is_function_p
then
printf "\n"
if [ "x${formal}" = "xvoid" ] && class_is_multiarch_p
then
printf "typedef ${returntype} (gdbarch_${function}_ftype) (struct gdbarch *gdbarch);\n"
elif class_is_multiarch_p
then
printf "typedef ${returntype} (gdbarch_${function}_ftype) (struct gdbarch *gdbarch, ${formal});\n"
else
printf "typedef ${returntype} (gdbarch_${function}_ftype) (${formal});\n"
fi
if [ "x${formal}" = "xvoid" ]
then
printf "extern ${returntype} gdbarch_${function} (struct gdbarch *gdbarch);\n"
else
printf "extern ${returntype} gdbarch_${function} (struct gdbarch *gdbarch, ${formal});\n"
fi
printf "extern void set_gdbarch_${function} (struct gdbarch *gdbarch, gdbarch_${function}_ftype *${function});\n"
fi
done
# close it off
cat <<EOF
extern struct gdbarch_tdep *gdbarch_tdep (struct gdbarch *gdbarch);
/* Mechanism for co-ordinating the selection of a specific
architecture.
GDB targets (*-tdep.c) can register an interest in a specific
architecture. Other GDB components can register a need to maintain
per-architecture data.
The mechanisms below ensures that there is only a loose connection
between the set-architecture command and the various GDB
components. Each component can independently register their need
to maintain architecture specific data with gdbarch.
Pragmatics:
Previously, a single TARGET_ARCHITECTURE_HOOK was provided. It
didn't scale.
The more traditional mega-struct containing architecture specific
data for all the various GDB components was also considered. Since
GDB is built from a variable number of (fairly independent)
components it was determined that the global aproach was not
applicable. */
/* Register a new architectural family with GDB.
Register support for the specified ARCHITECTURE with GDB. When
gdbarch determines that the specified architecture has been
selected, the corresponding INIT function is called.
--
The INIT function takes two parameters: INFO which contains the
information available to gdbarch about the (possibly new)
architecture; ARCHES which is a list of the previously created
\`\`struct gdbarch'' for this architecture.
The INFO parameter is, as far as possible, be pre-initialized with
information obtained from INFO.ABFD or the global defaults.
The ARCHES parameter is a linked list (sorted most recently used)
of all the previously created architures for this architecture
family. The (possibly NULL) ARCHES->gdbarch can used to access
values from the previously selected architecture for this
architecture family.
The INIT function shall return any of: NULL - indicating that it
doesn't recognize the selected architecture; an existing \`\`struct
gdbarch'' from the ARCHES list - indicating that the new
architecture is just a synonym for an earlier architecture (see
gdbarch_list_lookup_by_info()); a newly created \`\`struct gdbarch''
- that describes the selected architecture (see gdbarch_alloc()).
The DUMP_TDEP function shall print out all target specific values.
Care should be taken to ensure that the function works in both the
multi-arch and non- multi-arch cases. */
struct gdbarch_list
{
struct gdbarch *gdbarch;
struct gdbarch_list *next;
};
struct gdbarch_info
{
/* Use default: NULL (ZERO). */
const struct bfd_arch_info *bfd_arch_info;
/* Use default: BFD_ENDIAN_UNKNOWN (NB: is not ZERO). */
enum bfd_endian byte_order;
enum bfd_endian byte_order_for_code;
/* Use default: NULL (ZERO). */
bfd *abfd;
/* Use default: NULL (ZERO). */
union
{
/* Architecture-specific information. The generic form for targets
that have extra requirements. */
struct gdbarch_tdep_info *tdep_info;
/* Architecture-specific target description data. Numerous targets
need only this, so give them an easy way to hold it. */
struct tdesc_arch_data *tdesc_data;
/* SPU file system ID. This is a single integer, so using the
generic form would only complicate code. Other targets may
reuse this member if suitable. */
int *id;
};
/* Use default: GDB_OSABI_UNINITIALIZED (-1). */
enum gdb_osabi osabi;
/* Use default: NULL (ZERO). */
const struct target_desc *target_desc;
};
typedef struct gdbarch *(gdbarch_init_ftype) (struct gdbarch_info info, struct gdbarch_list *arches);
typedef void (gdbarch_dump_tdep_ftype) (struct gdbarch *gdbarch, struct ui_file *file);
/* DEPRECATED - use gdbarch_register() */
extern void register_gdbarch_init (enum bfd_architecture architecture, gdbarch_init_ftype *);
extern void gdbarch_register (enum bfd_architecture architecture,
gdbarch_init_ftype *,
gdbarch_dump_tdep_ftype *);
/* Return a freshly allocated, NULL terminated, array of the valid
architecture names. Since architectures are registered during the
_initialize phase this function only returns useful information
once initialization has been completed. */
extern const char **gdbarch_printable_names (void);
/* Helper function. Search the list of ARCHES for a GDBARCH that
matches the information provided by INFO. */
extern struct gdbarch_list *gdbarch_list_lookup_by_info (struct gdbarch_list *arches, const struct gdbarch_info *info);
/* Helper function. Create a preliminary \`\`struct gdbarch''. Perform
basic initialization using values obtained from the INFO and TDEP
parameters. set_gdbarch_*() functions are called to complete the
initialization of the object. */
extern struct gdbarch *gdbarch_alloc (const struct gdbarch_info *info, struct gdbarch_tdep *tdep);
/* Helper function. Free a partially-constructed \`\`struct gdbarch''.
It is assumed that the caller freeds the \`\`struct
gdbarch_tdep''. */
extern void gdbarch_free (struct gdbarch *);
/* Get the obstack owned by ARCH. */
extern obstack *gdbarch_obstack (gdbarch *arch);
/* Helper function. Allocate memory from the \`\`struct gdbarch''
obstack. The memory is freed when the corresponding architecture
is also freed. */
#define GDBARCH_OBSTACK_CALLOC(GDBARCH, NR, TYPE) \
obstack_calloc<TYPE> (gdbarch_obstack ((GDBARCH)), (NR))
#define GDBARCH_OBSTACK_ZALLOC(GDBARCH, TYPE) \
obstack_zalloc<TYPE> (gdbarch_obstack ((GDBARCH)))
/* Duplicate STRING, returning an equivalent string that's allocated on the
obstack associated with GDBARCH. The string is freed when the corresponding
architecture is also freed. */
extern char *gdbarch_obstack_strdup (struct gdbarch *arch, const char *string);
/* Helper function. Force an update of the current architecture.
The actual architecture selected is determined by INFO, \`\`(gdb) set
architecture'' et.al., the existing architecture and BFD's default
architecture. INFO should be initialized to zero and then selected
fields should be updated.
Returns non-zero if the update succeeds. */
extern int gdbarch_update_p (struct gdbarch_info info);
/* Helper function. Find an architecture matching info.
INFO should be initialized using gdbarch_info_init, relevant fields
set, and then finished using gdbarch_info_fill.
Returns the corresponding architecture, or NULL if no matching
architecture was found. */
extern struct gdbarch *gdbarch_find_by_info (struct gdbarch_info info);
/* Helper function. Set the target gdbarch to "gdbarch". */
extern void set_target_gdbarch (struct gdbarch *gdbarch);
/* Register per-architecture data-pointer.
Reserve space for a per-architecture data-pointer. An identifier
for the reserved data-pointer is returned. That identifer should
be saved in a local static variable.
Memory for the per-architecture data shall be allocated using
gdbarch_obstack_zalloc. That memory will be deleted when the
corresponding architecture object is deleted.
When a previously created architecture is re-selected, the
per-architecture data-pointer for that previous architecture is
restored. INIT() is not re-called.
Multiple registrarants for any architecture are allowed (and
strongly encouraged). */
struct gdbarch_data;
typedef void *(gdbarch_data_pre_init_ftype) (struct obstack *obstack);
extern struct gdbarch_data *gdbarch_data_register_pre_init (gdbarch_data_pre_init_ftype *init);
typedef void *(gdbarch_data_post_init_ftype) (struct gdbarch *gdbarch);
extern struct gdbarch_data *gdbarch_data_register_post_init (gdbarch_data_post_init_ftype *init);
extern void deprecated_set_gdbarch_data (struct gdbarch *gdbarch,
struct gdbarch_data *data,
void *pointer);
extern void *gdbarch_data (struct gdbarch *gdbarch, struct gdbarch_data *);
/* Set the dynamic target-system-dependent parameters (architecture,
byte-order, ...) using information found in the BFD. */
extern void set_gdbarch_from_file (bfd *);
/* Initialize the current architecture to the "first" one we find on
our list. */
extern void initialize_current_architecture (void);
/* gdbarch trace variable */
extern unsigned int gdbarch_debug;
extern void gdbarch_dump (struct gdbarch *gdbarch, struct ui_file *file);
/* Return the number of cooked registers (raw + pseudo) for ARCH. */
static inline int
gdbarch_num_cooked_regs (gdbarch *arch)
{
return gdbarch_num_regs (arch) + gdbarch_num_pseudo_regs (arch);
}
#endif
EOF
exec 1>&2
#../move-if-change new-gdbarch.h gdbarch.h
compare_new gdbarch.h
#
# C file
#
exec > new-gdbarch.c
copyright
cat <<EOF
#include "defs.h"
#include "arch-utils.h"
#include "gdbcmd.h"
#include "inferior.h"
#include "symcat.h"
#include "floatformat.h"
#include "reggroups.h"
#include "osabi.h"
#include "gdb_obstack.h"
#include "observable.h"
#include "regcache.h"
#include "objfiles.h"
#include "auxv.h"
#include "frame-unwind.h"
#include "dummy-frame.h"
/* Static function declarations */
static void alloc_gdbarch_data (struct gdbarch *);
/* Non-zero if we want to trace architecture code. */
#ifndef GDBARCH_DEBUG
#define GDBARCH_DEBUG 0
#endif
unsigned int gdbarch_debug = GDBARCH_DEBUG;
static void
show_gdbarch_debug (struct ui_file *file, int from_tty,
struct cmd_list_element *c, const char *value)
{
fprintf_filtered (file, _("Architecture debugging is %s.\\n"), value);
}
static const char *
pformat (const struct floatformat **format)
{
if (format == NULL)
return "(null)";
else
/* Just print out one of them - this is only for diagnostics. */
return format[0]->name;
}
static const char *
pstring (const char *string)
{
if (string == NULL)
return "(null)";
return string;
}
static const char *
pstring_ptr (char **string)
{
if (string == NULL || *string == NULL)
return "(null)";
return *string;
}
/* Helper function to print a list of strings, represented as "const
char *const *". The list is printed comma-separated. */
static const char *
pstring_list (const char *const *list)
{
static char ret[100];
const char *const *p;
size_t offset = 0;
if (list == NULL)
return "(null)";
ret[0] = '\0';
for (p = list; *p != NULL && offset < sizeof (ret); ++p)
{
size_t s = xsnprintf (ret + offset, sizeof (ret) - offset, "%s, ", *p);
offset += 2 + s;
}
if (offset > 0)
{
gdb_assert (offset - 2 < sizeof (ret));
ret[offset - 2] = '\0';
}
return ret;
}
EOF
# gdbarch open the gdbarch object
printf "\n"
printf "/* Maintain the struct gdbarch object. */\n"
printf "\n"
printf "struct gdbarch\n"
printf "{\n"
printf " /* Has this architecture been fully initialized? */\n"
printf " int initialized_p;\n"
printf "\n"
printf " /* An obstack bound to the lifetime of the architecture. */\n"
printf " struct obstack *obstack;\n"
printf "\n"
printf " /* basic architectural information. */\n"
function_list | while do_read
do
if class_is_info_p
then
printf " ${returntype} ${function};\n"
fi
done
printf "\n"
printf " /* target specific vector. */\n"
printf " struct gdbarch_tdep *tdep;\n"
printf " gdbarch_dump_tdep_ftype *dump_tdep;\n"
printf "\n"
printf " /* per-architecture data-pointers. */\n"
printf " unsigned nr_data;\n"
printf " void **data;\n"
printf "\n"
cat <<EOF
/* Multi-arch values.
When extending this structure you must:
Add the field below.
Declare set/get functions and define the corresponding
macro in gdbarch.h.
gdbarch_alloc(): If zero/NULL is not a suitable default,
initialize the new field.
verify_gdbarch(): Confirm that the target updated the field
correctly.
gdbarch_dump(): Add a fprintf_unfiltered call so that the new
field is dumped out
get_gdbarch(): Implement the set/get functions (probably using
the macro's as shortcuts).
*/
EOF
function_list | while do_read
do
if class_is_variable_p
then
printf " ${returntype} ${function};\n"
elif class_is_function_p
then
printf " gdbarch_${function}_ftype *${function};\n"
fi
done
printf "};\n"
# Create a new gdbarch struct
cat <<EOF
/* Create a new \`\`struct gdbarch'' based on information provided by
\`\`struct gdbarch_info''. */
EOF
printf "\n"
cat <<EOF
struct gdbarch *
gdbarch_alloc (const struct gdbarch_info *info,
struct gdbarch_tdep *tdep)
{
struct gdbarch *gdbarch;
/* Create an obstack for allocating all the per-architecture memory,
then use that to allocate the architecture vector. */
struct obstack *obstack = XNEW (struct obstack);
obstack_init (obstack);
gdbarch = XOBNEW (obstack, struct gdbarch);
memset (gdbarch, 0, sizeof (*gdbarch));
gdbarch->obstack = obstack;
alloc_gdbarch_data (gdbarch);
gdbarch->tdep = tdep;
EOF
printf "\n"
function_list | while do_read
do
if class_is_info_p
then
printf " gdbarch->${function} = info->${function};\n"
fi
done
printf "\n"
printf " /* Force the explicit initialization of these. */\n"
function_list | while do_read
do
if class_is_function_p || class_is_variable_p
then
if [ -n "${predefault}" -a "x${predefault}" != "x0" ]
then
printf " gdbarch->${function} = ${predefault};\n"
fi
fi
done
cat <<EOF
/* gdbarch_alloc() */
return gdbarch;
}
EOF
# Free a gdbarch struct.
printf "\n"
printf "\n"
cat <<EOF
obstack *gdbarch_obstack (gdbarch *arch)
{
return arch->obstack;
}
/* See gdbarch.h. */
char *
gdbarch_obstack_strdup (struct gdbarch *arch, const char *string)
{
return obstack_strdup (arch->obstack, string);
}
/* Free a gdbarch struct. This should never happen in normal
operation --- once you've created a gdbarch, you keep it around.
However, if an architecture's init function encounters an error
building the structure, it may need to clean up a partially
constructed gdbarch. */
void
gdbarch_free (struct gdbarch *arch)
{
struct obstack *obstack;
gdb_assert (arch != NULL);
gdb_assert (!arch->initialized_p);
obstack = arch->obstack;
obstack_free (obstack, 0); /* Includes the ARCH. */
xfree (obstack);
}
EOF
# verify a new architecture
cat <<EOF
/* Ensure that all values in a GDBARCH are reasonable. */
static void
verify_gdbarch (struct gdbarch *gdbarch)
{
string_file log;
/* fundamental */
if (gdbarch->byte_order == BFD_ENDIAN_UNKNOWN)
log.puts ("\n\tbyte-order");
if (gdbarch->bfd_arch_info == NULL)
log.puts ("\n\tbfd_arch_info");
/* Check those that need to be defined for the given multi-arch level. */
EOF
function_list | while do_read
do
if class_is_function_p || class_is_variable_p
then
if [ "x${invalid_p}" = "x0" ]
then
printf " /* Skip verify of ${function}, invalid_p == 0 */\n"
elif class_is_predicate_p
then
printf " /* Skip verify of ${function}, has predicate. */\n"
# FIXME: See do_read for potential simplification
elif [ -n "${invalid_p}" -a -n "${postdefault}" ]
then
printf " if (${invalid_p})\n"
printf " gdbarch->${function} = ${postdefault};\n"
elif [ -n "${predefault}" -a -n "${postdefault}" ]
then
printf " if (gdbarch->${function} == ${predefault})\n"
printf " gdbarch->${function} = ${postdefault};\n"
elif [ -n "${postdefault}" ]
then
printf " if (gdbarch->${function} == 0)\n"
printf " gdbarch->${function} = ${postdefault};\n"
elif [ -n "${invalid_p}" ]
then
printf " if (${invalid_p})\n"
printf " log.puts (\"\\\\n\\\\t${function}\");\n"
elif [ -n "${predefault}" ]
then
printf " if (gdbarch->${function} == ${predefault})\n"
printf " log.puts (\"\\\\n\\\\t${function}\");\n"
fi
fi
done
cat <<EOF
if (!log.empty ())
internal_error (__FILE__, __LINE__,
_("verify_gdbarch: the following are invalid ...%s"),
log.c_str ());
}
EOF
# dump the structure
printf "\n"
printf "\n"
cat <<EOF
/* Print out the details of the current architecture. */
void
gdbarch_dump (struct gdbarch *gdbarch, struct ui_file *file)
{
const char *gdb_nm_file = "<not-defined>";
#if defined (GDB_NM_FILE)
gdb_nm_file = GDB_NM_FILE;
#endif
fprintf_unfiltered (file,
"gdbarch_dump: GDB_NM_FILE = %s\\n",
gdb_nm_file);
EOF
function_list | sort '-t;' -k 3 | while do_read
do
# First the predicate
if class_is_predicate_p
then
printf " fprintf_unfiltered (file,\n"
printf " \"gdbarch_dump: gdbarch_${function}_p() = %%d\\\\n\",\n"
printf " gdbarch_${function}_p (gdbarch));\n"
fi
# Print the corresponding value.
if class_is_function_p
then
printf " fprintf_unfiltered (file,\n"
printf " \"gdbarch_dump: ${function} = <%%s>\\\\n\",\n"
printf " host_address_to_string (gdbarch->${function}));\n"
else
# It is a variable
case "${print}:${returntype}" in
:CORE_ADDR )
fmt="%s"
print="core_addr_to_string_nz (gdbarch->${function})"
;;
:* )
fmt="%s"
print="plongest (gdbarch->${function})"
;;
* )
fmt="%s"
;;
esac
printf " fprintf_unfiltered (file,\n"
printf " \"gdbarch_dump: ${function} = %s\\\\n\",\n" "${fmt}"
printf " ${print});\n"
fi
done
cat <<EOF
if (gdbarch->dump_tdep != NULL)
gdbarch->dump_tdep (gdbarch, file);
}
EOF
# GET/SET
printf "\n"
cat <<EOF
struct gdbarch_tdep *
gdbarch_tdep (struct gdbarch *gdbarch)
{
if (gdbarch_debug >= 2)
fprintf_unfiltered (gdb_stdlog, "gdbarch_tdep called\\n");
return gdbarch->tdep;
}
EOF
printf "\n"
function_list | while do_read
do
if class_is_predicate_p
then
printf "\n"
printf "int\n"
printf "gdbarch_${function}_p (struct gdbarch *gdbarch)\n"
printf "{\n"
printf " gdb_assert (gdbarch != NULL);\n"
printf " return ${predicate};\n"
printf "}\n"
fi
if class_is_function_p
then
printf "\n"
printf "${returntype}\n"
if [ "x${formal}" = "xvoid" ]
then
printf "gdbarch_${function} (struct gdbarch *gdbarch)\n"
else
printf "gdbarch_${function} (struct gdbarch *gdbarch, ${formal})\n"
fi
printf "{\n"
printf " gdb_assert (gdbarch != NULL);\n"
printf " gdb_assert (gdbarch->${function} != NULL);\n"
if class_is_predicate_p && test -n "${predefault}"
then
# Allow a call to a function with a predicate.
printf " /* Do not check predicate: ${predicate}, allow call. */\n"
fi
printf " if (gdbarch_debug >= 2)\n"
printf " fprintf_unfiltered (gdb_stdlog, \"gdbarch_${function} called\\\\n\");\n"
if [ "x${actual}" = "x-" -o "x${actual}" = "x" ]
then
if class_is_multiarch_p
then
params="gdbarch"
else
params=""
fi
else
if class_is_multiarch_p
then
params="gdbarch, ${actual}"
else
params="${actual}"
fi
fi
if [ "x${returntype}" = "xvoid" ]
then
printf " gdbarch->${function} (${params});\n"
else
printf " return gdbarch->${function} (${params});\n"
fi
printf "}\n"
printf "\n"
printf "void\n"
printf "set_gdbarch_${function} (struct gdbarch *gdbarch,\n"
printf " `echo ${function} | sed -e 's/./ /g'` gdbarch_${function}_ftype ${function})\n"
printf "{\n"
printf " gdbarch->${function} = ${function};\n"
printf "}\n"
elif class_is_variable_p
then
printf "\n"
printf "${returntype}\n"
printf "gdbarch_${function} (struct gdbarch *gdbarch)\n"
printf "{\n"
printf " gdb_assert (gdbarch != NULL);\n"
if [ "x${invalid_p}" = "x0" ]
then
printf " /* Skip verify of ${function}, invalid_p == 0 */\n"
elif [ -n "${invalid_p}" ]
then
printf " /* Check variable is valid. */\n"
printf " gdb_assert (!(${invalid_p}));\n"
elif [ -n "${predefault}" ]
then
printf " /* Check variable changed from pre-default. */\n"
printf " gdb_assert (gdbarch->${function} != ${predefault});\n"
fi
printf " if (gdbarch_debug >= 2)\n"
printf " fprintf_unfiltered (gdb_stdlog, \"gdbarch_${function} called\\\\n\");\n"
printf " return gdbarch->${function};\n"
printf "}\n"
printf "\n"
printf "void\n"
printf "set_gdbarch_${function} (struct gdbarch *gdbarch,\n"
printf " `echo ${function} | sed -e 's/./ /g'` ${returntype} ${function})\n"
printf "{\n"
printf " gdbarch->${function} = ${function};\n"
printf "}\n"
elif class_is_info_p
then
printf "\n"
printf "${returntype}\n"
printf "gdbarch_${function} (struct gdbarch *gdbarch)\n"
printf "{\n"
printf " gdb_assert (gdbarch != NULL);\n"
printf " if (gdbarch_debug >= 2)\n"
printf " fprintf_unfiltered (gdb_stdlog, \"gdbarch_${function} called\\\\n\");\n"
printf " return gdbarch->${function};\n"
printf "}\n"
fi
done
# All the trailing guff
cat <<EOF
/* Keep a registry of per-architecture data-pointers required by GDB
modules. */
struct gdbarch_data
{
unsigned index;
int init_p;
gdbarch_data_pre_init_ftype *pre_init;
gdbarch_data_post_init_ftype *post_init;
};
struct gdbarch_data_registration
{
struct gdbarch_data *data;
struct gdbarch_data_registration *next;
};
struct gdbarch_data_registry
{
unsigned nr;
struct gdbarch_data_registration *registrations;
};
struct gdbarch_data_registry gdbarch_data_registry =
{
0, NULL,
};
static struct gdbarch_data *
gdbarch_data_register (gdbarch_data_pre_init_ftype *pre_init,
gdbarch_data_post_init_ftype *post_init)
{
struct gdbarch_data_registration **curr;
/* Append the new registration. */
for (curr = &gdbarch_data_registry.registrations;
(*curr) != NULL;
curr = &(*curr)->next);
(*curr) = XNEW (struct gdbarch_data_registration);
(*curr)->next = NULL;
(*curr)->data = XNEW (struct gdbarch_data);
(*curr)->data->index = gdbarch_data_registry.nr++;
(*curr)->data->pre_init = pre_init;
(*curr)->data->post_init = post_init;
(*curr)->data->init_p = 1;
return (*curr)->data;
}
struct gdbarch_data *
gdbarch_data_register_pre_init (gdbarch_data_pre_init_ftype *pre_init)
{
return gdbarch_data_register (pre_init, NULL);
}
struct gdbarch_data *
gdbarch_data_register_post_init (gdbarch_data_post_init_ftype *post_init)
{
return gdbarch_data_register (NULL, post_init);
}
/* Create/delete the gdbarch data vector. */
static void
alloc_gdbarch_data (struct gdbarch *gdbarch)
{
gdb_assert (gdbarch->data == NULL);
gdbarch->nr_data = gdbarch_data_registry.nr;
gdbarch->data = GDBARCH_OBSTACK_CALLOC (gdbarch, gdbarch->nr_data, void *);
}
/* Initialize the current value of the specified per-architecture
data-pointer. */
void
deprecated_set_gdbarch_data (struct gdbarch *gdbarch,
struct gdbarch_data *data,
void *pointer)
{
gdb_assert (data->index < gdbarch->nr_data);
gdb_assert (gdbarch->data[data->index] == NULL);
gdb_assert (data->pre_init == NULL);
gdbarch->data[data->index] = pointer;
}
/* Return the current value of the specified per-architecture
data-pointer. */
void *
gdbarch_data (struct gdbarch *gdbarch, struct gdbarch_data *data)
{
gdb_assert (data->index < gdbarch->nr_data);
if (gdbarch->data[data->index] == NULL)
{
/* The data-pointer isn't initialized, call init() to get a
value. */
if (data->pre_init != NULL)
/* Mid architecture creation: pass just the obstack, and not
the entire architecture, as that way it isn't possible for
pre-init code to refer to undefined architecture
fields. */
gdbarch->data[data->index] = data->pre_init (gdbarch->obstack);
else if (gdbarch->initialized_p
&& data->post_init != NULL)
/* Post architecture creation: pass the entire architecture
(as all fields are valid), but be careful to also detect
recursive references. */
{
gdb_assert (data->init_p);
data->init_p = 0;
gdbarch->data[data->index] = data->post_init (gdbarch);
data->init_p = 1;
}
else
/* The architecture initialization hasn't completed - punt -
hope that the caller knows what they are doing. Once
deprecated_set_gdbarch_data has been initialized, this can be
changed to an internal error. */
return NULL;
gdb_assert (gdbarch->data[data->index] != NULL);
}
return gdbarch->data[data->index];
}
/* Keep a registry of the architectures known by GDB. */
struct gdbarch_registration
{
enum bfd_architecture bfd_architecture;
gdbarch_init_ftype *init;
gdbarch_dump_tdep_ftype *dump_tdep;
struct gdbarch_list *arches;
struct gdbarch_registration *next;
};
static struct gdbarch_registration *gdbarch_registry = NULL;
static void
append_name (const char ***buf, int *nr, const char *name)
{
*buf = XRESIZEVEC (const char *, *buf, *nr + 1);
(*buf)[*nr] = name;
*nr += 1;
}
const char **
gdbarch_printable_names (void)
{
/* Accumulate a list of names based on the registed list of
architectures. */
int nr_arches = 0;
const char **arches = NULL;
struct gdbarch_registration *rego;
for (rego = gdbarch_registry;
rego != NULL;
rego = rego->next)
{
const struct bfd_arch_info *ap;
ap = bfd_lookup_arch (rego->bfd_architecture, 0);
if (ap == NULL)
internal_error (__FILE__, __LINE__,
_("gdbarch_architecture_names: multi-arch unknown"));
do
{
append_name (&arches, &nr_arches, ap->printable_name);
ap = ap->next;
}
while (ap != NULL);
}
append_name (&arches, &nr_arches, NULL);
return arches;
}
void
gdbarch_register (enum bfd_architecture bfd_architecture,
gdbarch_init_ftype *init,
gdbarch_dump_tdep_ftype *dump_tdep)
{
struct gdbarch_registration **curr;
const struct bfd_arch_info *bfd_arch_info;
/* Check that BFD recognizes this architecture */
bfd_arch_info = bfd_lookup_arch (bfd_architecture, 0);
if (bfd_arch_info == NULL)
{
internal_error (__FILE__, __LINE__,
_("gdbarch: Attempt to register "
"unknown architecture (%d)"),
bfd_architecture);
}
/* Check that we haven't seen this architecture before. */
for (curr = &gdbarch_registry;
(*curr) != NULL;
curr = &(*curr)->next)
{
if (bfd_architecture == (*curr)->bfd_architecture)
internal_error (__FILE__, __LINE__,
_("gdbarch: Duplicate registration "
"of architecture (%s)"),
bfd_arch_info->printable_name);
}
/* log it */
if (gdbarch_debug)
fprintf_unfiltered (gdb_stdlog, "register_gdbarch_init (%s, %s)\n",
bfd_arch_info->printable_name,
host_address_to_string (init));
/* Append it */
(*curr) = XNEW (struct gdbarch_registration);
(*curr)->bfd_architecture = bfd_architecture;
(*curr)->init = init;
(*curr)->dump_tdep = dump_tdep;
(*curr)->arches = NULL;
(*curr)->next = NULL;
}
void
register_gdbarch_init (enum bfd_architecture bfd_architecture,
gdbarch_init_ftype *init)
{
gdbarch_register (bfd_architecture, init, NULL);
}
/* Look for an architecture using gdbarch_info. */
struct gdbarch_list *
gdbarch_list_lookup_by_info (struct gdbarch_list *arches,
const struct gdbarch_info *info)
{
for (; arches != NULL; arches = arches->next)
{
if (info->bfd_arch_info != arches->gdbarch->bfd_arch_info)
continue;
if (info->byte_order != arches->gdbarch->byte_order)
continue;
if (info->osabi != arches->gdbarch->osabi)
continue;
if (info->target_desc != arches->gdbarch->target_desc)
continue;
return arches;
}
return NULL;
}
/* Find an architecture that matches the specified INFO. Create a new
architecture if needed. Return that new architecture. */
struct gdbarch *
gdbarch_find_by_info (struct gdbarch_info info)
{
struct gdbarch *new_gdbarch;
struct gdbarch_registration *rego;
/* Fill in missing parts of the INFO struct using a number of
sources: "set ..."; INFOabfd supplied; and the global
defaults. */
gdbarch_info_fill (&info);
/* Must have found some sort of architecture. */
gdb_assert (info.bfd_arch_info != NULL);
if (gdbarch_debug)
{
fprintf_unfiltered (gdb_stdlog,
"gdbarch_find_by_info: info.bfd_arch_info %s\n",
(info.bfd_arch_info != NULL
? info.bfd_arch_info->printable_name
: "(null)"));
fprintf_unfiltered (gdb_stdlog,
"gdbarch_find_by_info: info.byte_order %d (%s)\n",
info.byte_order,
(info.byte_order == BFD_ENDIAN_BIG ? "big"
: info.byte_order == BFD_ENDIAN_LITTLE ? "little"
: "default"));
fprintf_unfiltered (gdb_stdlog,
"gdbarch_find_by_info: info.osabi %d (%s)\n",
info.osabi, gdbarch_osabi_name (info.osabi));
fprintf_unfiltered (gdb_stdlog,
"gdbarch_find_by_info: info.abfd %s\n",
host_address_to_string (info.abfd));
fprintf_unfiltered (gdb_stdlog,
"gdbarch_find_by_info: info.tdep_info %s\n",
host_address_to_string (info.tdep_info));
}
/* Find the tdep code that knows about this architecture. */
for (rego = gdbarch_registry;
rego != NULL;
rego = rego->next)
if (rego->bfd_architecture == info.bfd_arch_info->arch)
break;
if (rego == NULL)
{
if (gdbarch_debug)
fprintf_unfiltered (gdb_stdlog, "gdbarch_find_by_info: "
"No matching architecture\n");
return 0;
}
/* Ask the tdep code for an architecture that matches "info". */
new_gdbarch = rego->init (info, rego->arches);
/* Did the tdep code like it? No. Reject the change and revert to
the old architecture. */
if (new_gdbarch == NULL)
{
if (gdbarch_debug)
fprintf_unfiltered (gdb_stdlog, "gdbarch_find_by_info: "
"Target rejected architecture\n");
return NULL;
}
/* Is this a pre-existing architecture (as determined by already
being initialized)? Move it to the front of the architecture
list (keeping the list sorted Most Recently Used). */
if (new_gdbarch->initialized_p)
{
struct gdbarch_list **list;
struct gdbarch_list *self;
if (gdbarch_debug)
fprintf_unfiltered (gdb_stdlog, "gdbarch_find_by_info: "
"Previous architecture %s (%s) selected\n",
host_address_to_string (new_gdbarch),
new_gdbarch->bfd_arch_info->printable_name);
/* Find the existing arch in the list. */
for (list = ®o->arches;
(*list) != NULL && (*list)->gdbarch != new_gdbarch;
list = &(*list)->next);
/* It had better be in the list of architectures. */
gdb_assert ((*list) != NULL && (*list)->gdbarch == new_gdbarch);
/* Unlink SELF. */
self = (*list);
(*list) = self->next;
/* Insert SELF at the front. */
self->next = rego->arches;
rego->arches = self;
/* Return it. */
return new_gdbarch;
}
/* It's a new architecture. */
if (gdbarch_debug)
fprintf_unfiltered (gdb_stdlog, "gdbarch_find_by_info: "
"New architecture %s (%s) selected\n",
host_address_to_string (new_gdbarch),
new_gdbarch->bfd_arch_info->printable_name);
/* Insert the new architecture into the front of the architecture
list (keep the list sorted Most Recently Used). */
{
struct gdbarch_list *self = XNEW (struct gdbarch_list);
self->next = rego->arches;
self->gdbarch = new_gdbarch;
rego->arches = self;
}
/* Check that the newly installed architecture is valid. Plug in
any post init values. */
new_gdbarch->dump_tdep = rego->dump_tdep;
verify_gdbarch (new_gdbarch);
new_gdbarch->initialized_p = 1;
if (gdbarch_debug)
gdbarch_dump (new_gdbarch, gdb_stdlog);
return new_gdbarch;
}
/* Make the specified architecture current. */
void
set_target_gdbarch (struct gdbarch *new_gdbarch)
{
gdb_assert (new_gdbarch != NULL);
gdb_assert (new_gdbarch->initialized_p);
current_inferior ()->gdbarch = new_gdbarch;
gdb::observers::architecture_changed.notify (new_gdbarch);
registers_changed ();
}
/* Return the current inferior's arch. */
struct gdbarch *
target_gdbarch (void)
{
return current_inferior ()->gdbarch;
}
void
_initialize_gdbarch (void)
{
add_setshow_zuinteger_cmd ("arch", class_maintenance, &gdbarch_debug, _("\\
Set architecture debugging."), _("\\
Show architecture debugging."), _("\\
When non-zero, architecture debugging is enabled."),
NULL,
show_gdbarch_debug,
&setdebuglist, &showdebuglist);
}
EOF
# close things off
exec 1>&2
#../move-if-change new-gdbarch.c gdbarch.c
compare_new gdbarch.c
|
#!/bin/bash
# Exit immediately if a command returns a non-zero status.
set -e
# RVM overrides the cd, popd, and pushd shell commands, causing the
# "shell_session_update: command not found" error on macOS when executing those
# commands.
unset -f cd popd pushd
################
# Compiler setup
################
# We can't use sudo, so we have to approximate the behaviour of the following:
# $ sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.9 90
mkdir -p ${HOME}/bin
if [ "${YCM_COMPILER}" == "clang" ]; then
ln -s /usr/bin/clang++ ${HOME}/bin/c++
ln -s /usr/bin/clang ${HOME}/bin/cc
# Tell CMake to compile with libc++ when using Clang.
export EXTRA_CMAKE_ARGS="${EXTRA_CMAKE_ARGS} -DHAS_LIBCXX11=ON"
else
ln -s /usr/bin/g++-4.9 ${HOME}/bin/c++
ln -s /usr/bin/gcc-4.9 ${HOME}/bin/cc
fi
ln -s /usr/bin/gcov-4.9 ${HOME}/bin/gcov
if [ -n "${YCM_CLANG_TIDY}" ]; then
ln -s /usr/bin/clang-tidy-3.9 ${HOME}/bin/clang-tidy
fi
export PATH=${HOME}/bin:${PATH}
##############
# Python setup
##############
PYENV_ROOT="${HOME}/.pyenv"
if [ ! -d "${PYENV_ROOT}/.git" ]; then
rm -rf ${PYENV_ROOT}
git clone https://github.com/yyuu/pyenv.git ${PYENV_ROOT}
fi
pushd ${PYENV_ROOT}
git fetch --tags
git checkout v1.2.1
popd
export PATH="${PYENV_ROOT}/bin:${PATH}"
eval "$(pyenv init -)"
if [ "${YCMD_PYTHON_VERSION}" == "2.7" ]; then
# Tests are failing on Python 2.7.0 with the exception "TypeError: argument
# can't be <type 'unicode'>" and importing the coverage module fails on Python
# 2.7.1.
PYENV_VERSION="2.7.2"
else
PYENV_VERSION="3.5.1"
fi
# In order to work with ycmd, python *must* be built as a shared library. This
# is set via the PYTHON_CONFIGURE_OPTS option.
export PYTHON_CONFIGURE_OPTS="--enable-shared"
pyenv install --skip-existing ${PYENV_VERSION}
pyenv rehash
pyenv global ${PYENV_VERSION}
# It is quite easy to get the above series of steps wrong. Verify that the
# version of python actually in the path and used is the version that was
# requested, and fail the build if we broke the travis setup
python_version=$(python -c 'import sys; print( "{0}.{1}".format( sys.version_info[0], sys.version_info[1] ) )')
echo "Checking python version (actual ${python_version} vs expected ${YCMD_PYTHON_VERSION})"
test ${python_version} == ${YCMD_PYTHON_VERSION}
pip install -r test_requirements.txt
# Enable coverage for Python subprocesses. See:
# http://coverage.readthedocs.io/en/latest/subprocess.html
echo -e "import coverage\ncoverage.process_startup()" > \
${PYENV_ROOT}/versions/${PYENV_VERSION}/lib/python${YCMD_PYTHON_VERSION}/site-packages/sitecustomize.py
############
# Rust setup
############
curl https://sh.rustup.rs -sSf | sh -s -- -y
export PATH="${HOME}/.cargo/bin:${PATH}"
rustup update
rustc -Vv
cargo -V
#################################
# JavaScript and TypeScript setup
#################################
# Pre-installed Node.js is too old. Install latest Node.js v4 LTS.
nvm install 4
###############
# Java 8 setup
###############
# Make sure we have the appropriate java for jdt.ls
set +e
jdk_switcher use oraclejdk8
set -e
java -version
JAVA_VERSION=$(java -version 2>&1 | awk -F '"' '/version/ {print $2}')
if [[ "$JAVA_VERSION" < "1.8" ]]; then
echo "Java version $JAVA_VERSION is too old" 1>&2
exit 1
fi
# Done. Undo settings which break travis scripts.
set +e
|
#! /usr/bin/env bash
# This will build all the reveal.md files it finds in the root directory
# These files are also ignored by the jupyterbook script
if ! command -v reveal-md &> /dev/null
then
npm install -g reveal-md
fi
mkdir -p static_slides
reveal-md --static static_slides/slideshows \
--theme css/anu.css --glob '**/*.reveal.md' \
--separator '<--o-->' \
--vertical-separator '<--v-->' \
--static-dirs movies,images
|
function isPrime($number)
{
for ($i = 2; $i <= sqrt($number); $i++)
{
if ($number % $i == 0)
{
return false;
}
}
return true;
} |
<reponame>opentaps/opentaps-1<filename>opentaps/opentaps-common/src/common/org/opentaps/gwt/common/client/listviews/CurrencyColumnConfig.java
/*
* Copyright (c) Open Source Strategies, Inc.
*
* Opentaps is free software: you can redistribute it and/or modify it
* under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Opentaps is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Opentaps. If not, see <http://www.gnu.org/licenses/>.
*/
package org.opentaps.gwt.common.client.listviews;
import java.math.BigDecimal;
import com.google.gwt.i18n.client.NumberFormat;
import com.gwtext.client.data.Record;
import com.gwtext.client.data.Store;
import com.gwtext.client.widgets.grid.CellMetadata;
import com.gwtext.client.widgets.grid.ColumnConfig;
import com.gwtext.client.widgets.grid.Renderer;
import org.opentaps.gwt.common.client.UtilUi;
/**
* A ColumnConfig that displays a currency amount.
* Can be set by giving both the amount column and the currency code column, or by setting
* a static currency code.
*/
public class CurrencyColumnConfig extends ColumnConfig implements Renderer {
private static final String MODULE = CurrencyColumnConfig.class.getName();
private String currencyCode;
private String currencyIndex;
/**
* Create a new CurrencyColumnConfig.
* This will use the default currency code according to the user locale (normally that is NOT wanted)
* unless the currency is set with <code>setCurrencyCode()</code>.
* @param header the column header
* @param dataIndex the data index (the field name of the Store associated with the Grid containing the amount)
*/
public CurrencyColumnConfig(String header, String dataIndex) {
this(header, null, dataIndex);
}
/**
* Create a new CurrencyColumnConfig.
*
* @param header the column header
* @param currencyIndex the currency index (the field name of the Store associated with the Grid containing the currency code)
* @param dataIndex the data index (the field name of the Store associated with the Grid containing the amount)
*/
public CurrencyColumnConfig(String header, String currencyIndex, String dataIndex) {
super(header, dataIndex);
this.currencyIndex = currencyIndex;
this.setRenderer(this);
}
/**
* The implementation of the <code>Renderer</code> interface that produce the content of the cell.
* @param value the value of the current record for this cell
* @param cellMetadata a <code>CellMetadata</code> value
* @param record the current <code>Record</code> value
* @param rowIndex the row index of the current record
* @param colNum the column index of this cell
* @param store a <code>Store</code> value
* @return the cell content as an HTML string
*/
public String render(Object value, CellMetadata cellMetadata, Record record, int rowIndex, int colNum, Store store) {
if (value == null) {
return "";
}
String amount = (String) value;
NumberFormat fmt = null;
String currency = currencyCode;
if (currencyIndex != null) {
currencyCode = record.getAsString(currencyIndex);
}
if (currencyCode == null) {
fmt = NumberFormat.getCurrencyFormat();
} else {
try {
fmt = NumberFormat.getCurrencyFormat(currencyCode);
} catch (Exception e) {
// Note: there is a bug in getCurrencyFormat and it does not work by looking at the properties file
// but is limited to 4 hard coded currencies
UtilUi.logWarning("Cannot set currency format with currency code [" + currencyCode + "] " + e.getMessage(), MODULE, "render");
// manually build the format and use the currency code as the currency symbol
// this pattern is the currency pattern but with the currency symbol removed
fmt = NumberFormat.getFormat("#,##0.00;(#,##0.00)");
return currencyCode + " " + fmt.format(new BigDecimal(amount).doubleValue());
}
}
return fmt.format(new BigDecimal(amount).doubleValue());
}
/**
* Sets the currency code to user.
* @param currencyCode a 3 chars <code>String</code> value
*/
public void setCurrencyCode(String currencyCode) {
this.currencyCode = currencyCode;
}
}
|
<filename>finalProject/src/main/java/co/finalproject/farm/app/community/service/CommunityReplyVO.java
package co.finalproject.farm.app.community.service;
import java.util.Date;
import com.fasterxml.jackson.annotation.JsonFormat;
import lombok.Data;
@Data
public class CommunityReplyVO {
private int comm_rep_no; //댓글번호
private String user_id; //유저 아이디
private int comm_no; //글번호
private String comm_rep_content;
@JsonFormat(pattern = "yyyy-MM-dd") //제이슨 타입으로 데이트타입 인설트 할 거니까 제이슨포맷 사용하면 된다.
private Date comm_rep_date;
}
|
// NOTE: This file was generated by the ServiceGenerator.
// ----------------------------------------------------------------------------
// API:
// Admin Reports API (admin/reports_v1)
// Description:
// Fetches reports for the administrators of G Suite customers about the
// usage, collaboration, security, and risk for their users.
// Documentation:
// https://developers.google.com/admin-sdk/reports/
#if GTLR_BUILT_AS_FRAMEWORK
#import "GTLR/GTLRObject.h"
#else
#import "GTLRObject.h"
#endif
#if GTLR_RUNTIME_VERSION != 3000
#error This file was generated by a different version of ServiceGenerator which is incompatible with this GTLR library source.
#endif
@class GTLRReports_Activity;
@class GTLRReports_Activity_Actor;
@class GTLRReports_Activity_Events_Item;
@class GTLRReports_Activity_Events_Item_Parameters_Item;
@class GTLRReports_Activity_Events_Item_Parameters_Item_MessageValue;
@class GTLRReports_Activity_Events_Item_Parameters_Item_MultiMessageValue_Item;
@class GTLRReports_Activity_Id;
@class GTLRReports_Channel_Params;
@class GTLRReports_NestedParameter;
@class GTLRReports_UsageReport;
@class GTLRReports_UsageReport_Entity;
@class GTLRReports_UsageReport_Parameters_Item;
@class GTLRReports_UsageReport_Parameters_Item_MsgValue_Item;
@class GTLRReports_UsageReports_Warnings_Item;
@class GTLRReports_UsageReports_Warnings_Item_Data_Item;
// Generated comments include content from the discovery document; avoid them
// causing warnings since clang's checks are some what arbitrary.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdocumentation"
NS_ASSUME_NONNULL_BEGIN
/**
* JSON template for a collection of activites.
*
* @note This class supports NSFastEnumeration and indexed subscripting over
* its "items" property. If returned as the result of a query, it should
* support automatic pagination (when @c shouldFetchNextPages is
* enabled).
*/
@interface GTLRReports_Activities : GTLRCollectionObject
/** ETag of the resource. */
@property(nonatomic, copy, nullable) NSString *ETag;
/**
* Each record in read response.
*
* @note This property is used to support NSFastEnumeration and indexed
* subscripting on this class.
*/
@property(nonatomic, strong, nullable) NSArray<GTLRReports_Activity *> *items;
/** Kind of list response this is. */
@property(nonatomic, copy, nullable) NSString *kind;
/** Token for retrieving the next page */
@property(nonatomic, copy, nullable) NSString *nextPageToken;
@end
/**
* JSON template for the activity resource.
*/
@interface GTLRReports_Activity : GTLRObject
/** User doing the action. */
@property(nonatomic, strong, nullable) GTLRReports_Activity_Actor *actor;
/** ETag of the entry. */
@property(nonatomic, copy, nullable) NSString *ETag;
/** Activity events. */
@property(nonatomic, strong, nullable) NSArray<GTLRReports_Activity_Events_Item *> *events;
/**
* Unique identifier for each activity record.
*
* identifier property maps to 'id' in JSON (to avoid Objective C's 'id').
*/
@property(nonatomic, strong, nullable) GTLRReports_Activity_Id *identifier;
/** IP Address of the user doing the action. */
@property(nonatomic, copy, nullable) NSString *ipAddress;
/** Kind of resource this is. */
@property(nonatomic, copy, nullable) NSString *kind;
/** Domain of source customer. */
@property(nonatomic, copy, nullable) NSString *ownerDomain;
@end
/**
* User doing the action.
*/
@interface GTLRReports_Activity_Actor : GTLRObject
/** User or OAuth 2LO request. */
@property(nonatomic, copy, nullable) NSString *callerType;
/** Email address of the user. */
@property(nonatomic, copy, nullable) NSString *email;
/** For OAuth 2LO API requests, consumer_key of the requestor. */
@property(nonatomic, copy, nullable) NSString *key;
/** Obfuscated user id of the user. */
@property(nonatomic, copy, nullable) NSString *profileId;
@end
/**
* GTLRReports_Activity_Events_Item
*/
@interface GTLRReports_Activity_Events_Item : GTLRObject
/** Name of event. */
@property(nonatomic, copy, nullable) NSString *name;
/** Parameter value pairs for various applications. */
@property(nonatomic, strong, nullable) NSArray<GTLRReports_Activity_Events_Item_Parameters_Item *> *parameters;
/** Type of event. */
@property(nonatomic, copy, nullable) NSString *type;
@end
/**
* Unique identifier for each activity record.
*/
@interface GTLRReports_Activity_Id : GTLRObject
/** Application name to which the event belongs. */
@property(nonatomic, copy, nullable) NSString *applicationName;
/** Obfuscated customer ID of the source customer. */
@property(nonatomic, copy, nullable) NSString *customerId;
/** Time of occurrence of the activity. */
@property(nonatomic, strong, nullable) GTLRDateTime *time;
/**
* Unique qualifier if multiple events have the same time.
*
* Uses NSNumber of longLongValue.
*/
@property(nonatomic, strong, nullable) NSNumber *uniqueQualifier;
@end
/**
* GTLRReports_Activity_Events_Item_Parameters_Item
*/
@interface GTLRReports_Activity_Events_Item_Parameters_Item : GTLRObject
/**
* Boolean value of the parameter.
*
* Uses NSNumber of boolValue.
*/
@property(nonatomic, strong, nullable) NSNumber *boolValue;
/**
* Integral value of the parameter.
*
* Uses NSNumber of longLongValue.
*/
@property(nonatomic, strong, nullable) NSNumber *intValue;
/** Nested value of the parameter. */
@property(nonatomic, strong, nullable) GTLRReports_Activity_Events_Item_Parameters_Item_MessageValue *messageValue;
/**
* Multi-int value of the parameter.
*
* Uses NSNumber of longLongValue.
*/
@property(nonatomic, strong, nullable) NSArray<NSNumber *> *multiIntValue;
/** Nested values of the parameter. */
@property(nonatomic, strong, nullable) NSArray<GTLRReports_Activity_Events_Item_Parameters_Item_MultiMessageValue_Item *> *multiMessageValue;
/** Multi-string value of the parameter. */
@property(nonatomic, strong, nullable) NSArray<NSString *> *multiValue;
/** The name of the parameter. */
@property(nonatomic, copy, nullable) NSString *name;
/** String value of the parameter. */
@property(nonatomic, copy, nullable) NSString *value;
@end
/**
* Nested value of the parameter.
*/
@interface GTLRReports_Activity_Events_Item_Parameters_Item_MessageValue : GTLRObject
/** Looping to get parameter values. */
@property(nonatomic, strong, nullable) NSArray<GTLRReports_NestedParameter *> *parameter;
@end
/**
* GTLRReports_Activity_Events_Item_Parameters_Item_MultiMessageValue_Item
*/
@interface GTLRReports_Activity_Events_Item_Parameters_Item_MultiMessageValue_Item : GTLRObject
/** Parameter value. */
@property(nonatomic, strong, nullable) NSArray<GTLRReports_NestedParameter *> *parameter;
@end
/**
* An notification channel used to watch for resource changes.
*/
@interface GTLRReports_Channel : GTLRObject
/** The address where notifications are delivered for this channel. */
@property(nonatomic, copy, nullable) NSString *address;
/**
* Date and time of notification channel expiration, expressed as a Unix
* timestamp, in milliseconds. Optional.
*
* Uses NSNumber of longLongValue.
*/
@property(nonatomic, strong, nullable) NSNumber *expiration;
/**
* A UUID or similar unique string that identifies this channel.
*
* identifier property maps to 'id' in JSON (to avoid Objective C's 'id').
*/
@property(nonatomic, copy, nullable) NSString *identifier;
/**
* Identifies this as a notification channel used to watch for changes to a
* resource. Value: the fixed string "api#channel".
*/
@property(nonatomic, copy, nullable) NSString *kind;
/** Additional parameters controlling delivery channel behavior. Optional. */
@property(nonatomic, strong, nullable) GTLRReports_Channel_Params *params;
/**
* A Boolean value to indicate whether payload is wanted. Optional.
*
* Uses NSNumber of boolValue.
*/
@property(nonatomic, strong, nullable) NSNumber *payload;
/**
* An opaque ID that identifies the resource being watched on this channel.
* Stable across different API versions.
*/
@property(nonatomic, copy, nullable) NSString *resourceId;
/** A version-specific identifier for the watched resource. */
@property(nonatomic, copy, nullable) NSString *resourceUri;
/**
* An arbitrary string delivered to the target address with each notification
* delivered over this channel. Optional.
*/
@property(nonatomic, copy, nullable) NSString *token;
/** The type of delivery mechanism used for this channel. */
@property(nonatomic, copy, nullable) NSString *type;
@end
/**
* Additional parameters controlling delivery channel behavior. Optional.
*
* @note This class is documented as having more properties of NSString. Use @c
* -additionalJSONKeys and @c -additionalPropertyForName: to get the list
* of properties and then fetch them; or @c -additionalProperties to
* fetch them all at once.
*/
@interface GTLRReports_Channel_Params : GTLRObject
@end
/**
* JSON template for a parameter used in various reports.
*/
@interface GTLRReports_NestedParameter : GTLRObject
/**
* Boolean value of the parameter.
*
* Uses NSNumber of boolValue.
*/
@property(nonatomic, strong, nullable) NSNumber *boolValue;
/**
* Integral value of the parameter.
*
* Uses NSNumber of longLongValue.
*/
@property(nonatomic, strong, nullable) NSNumber *intValue;
/**
* Multiple boolean values of the parameter.
*
* Uses NSNumber of boolValue.
*/
@property(nonatomic, strong, nullable) NSArray<NSNumber *> *multiBoolValue;
/**
* Multiple integral values of the parameter.
*
* Uses NSNumber of longLongValue.
*/
@property(nonatomic, strong, nullable) NSArray<NSNumber *> *multiIntValue;
/** Multiple string values of the parameter. */
@property(nonatomic, strong, nullable) NSArray<NSString *> *multiValue;
/** The name of the parameter. */
@property(nonatomic, copy, nullable) NSString *name;
/** String value of the parameter. */
@property(nonatomic, copy, nullable) NSString *value;
@end
/**
* JSON template for a usage report.
*/
@interface GTLRReports_UsageReport : GTLRObject
/** The date to which the record belongs. */
@property(nonatomic, copy, nullable) NSString *date;
/** Information about the type of the item. */
@property(nonatomic, strong, nullable) GTLRReports_UsageReport_Entity *entity;
/** ETag of the resource. */
@property(nonatomic, copy, nullable) NSString *ETag;
/** The kind of object. */
@property(nonatomic, copy, nullable) NSString *kind;
/** Parameter value pairs for various applications. */
@property(nonatomic, strong, nullable) NSArray<GTLRReports_UsageReport_Parameters_Item *> *parameters;
@end
/**
* Information about the type of the item.
*/
@interface GTLRReports_UsageReport_Entity : GTLRObject
/** Obfuscated customer id for the record. */
@property(nonatomic, copy, nullable) NSString *customerId;
/**
* Object key. Only relevant if entity.type = "OBJECT" Note: external-facing
* name of report is "Entities" rather than "Objects".
*/
@property(nonatomic, copy, nullable) NSString *entityId;
/** Obfuscated user id for the record. */
@property(nonatomic, copy, nullable) NSString *profileId;
/** The type of item, can be customer, user, or entity (aka. object). */
@property(nonatomic, copy, nullable) NSString *type;
/** user's email. Only relevant if entity.type = "USER" */
@property(nonatomic, copy, nullable) NSString *userEmail;
@end
/**
* GTLRReports_UsageReport_Parameters_Item
*/
@interface GTLRReports_UsageReport_Parameters_Item : GTLRObject
/**
* Boolean value of the parameter.
*
* Uses NSNumber of boolValue.
*/
@property(nonatomic, strong, nullable) NSNumber *boolValue;
/** RFC 3339 formatted value of the parameter. */
@property(nonatomic, strong, nullable) GTLRDateTime *datetimeValue;
/**
* Integral value of the parameter.
*
* Uses NSNumber of longLongValue.
*/
@property(nonatomic, strong, nullable) NSNumber *intValue;
/** Nested message value of the parameter. */
@property(nonatomic, strong, nullable) NSArray<GTLRReports_UsageReport_Parameters_Item_MsgValue_Item *> *msgValue;
/** The name of the parameter. */
@property(nonatomic, copy, nullable) NSString *name;
/** String value of the parameter. */
@property(nonatomic, copy, nullable) NSString *stringValue;
@end
/**
* GTLRReports_UsageReport_Parameters_Item_MsgValue_Item
*
* @note This class is documented as having more properties of any valid JSON
* type. Use @c -additionalJSONKeys and @c -additionalPropertyForName: to
* get the list of properties and then fetch them; or @c
* -additionalProperties to fetch them all at once.
*/
@interface GTLRReports_UsageReport_Parameters_Item_MsgValue_Item : GTLRObject
@end
/**
* JSON template for a collection of usage reports.
*/
@interface GTLRReports_UsageReports : GTLRObject
/** ETag of the resource. */
@property(nonatomic, copy, nullable) NSString *ETag;
/** The kind of object. */
@property(nonatomic, copy, nullable) NSString *kind;
/** Token for retrieving the next page */
@property(nonatomic, copy, nullable) NSString *nextPageToken;
/** Various application parameter records. */
@property(nonatomic, strong, nullable) NSArray<GTLRReports_UsageReport *> *usageReports;
/** Warnings if any. */
@property(nonatomic, strong, nullable) NSArray<GTLRReports_UsageReports_Warnings_Item *> *warnings;
@end
/**
* GTLRReports_UsageReports_Warnings_Item
*/
@interface GTLRReports_UsageReports_Warnings_Item : GTLRObject
/** Machine readable code / warning type. */
@property(nonatomic, copy, nullable) NSString *code;
/** Key-Value pairs to give detailed information on the warning. */
@property(nonatomic, strong, nullable) NSArray<GTLRReports_UsageReports_Warnings_Item_Data_Item *> *data;
/** Human readable message for the warning. */
@property(nonatomic, copy, nullable) NSString *message;
@end
/**
* GTLRReports_UsageReports_Warnings_Item_Data_Item
*/
@interface GTLRReports_UsageReports_Warnings_Item_Data_Item : GTLRObject
/**
* Key associated with a key-value pair to give detailed information on the
* warning.
*/
@property(nonatomic, copy, nullable) NSString *key;
/**
* Value associated with a key-value pair to give detailed information on the
* warning.
*/
@property(nonatomic, copy, nullable) NSString *value;
@end
NS_ASSUME_NONNULL_END
#pragma clang diagnostic pop
|
#!/bin/bash
cd /home/appuser
git clone -b monolith https://github.com/express42/reddit.git
cd reddit
bundle install
puma -d
|
def findCorrelatedColumn(df, targetCol):
columns = df.columns.tolist()
correlations = df.corr()[targetCol]
max_corr = max(correlations)
col_with_max_corr = correlations[correlations == max_corr].index[0]
return col_with_max_corr |
export { BitbucketCI } from './BitbucketCI';
|
#!/bin/bash
source ./testFuncs.sh
if [ "$CI" == "true" ]; then
echo "Tested under CI"
else
echo "NOT tested under CI"
readDockerContainerState 'localstack'
case "$LOCALSTACK_STATUS" in
"up")
id=$(docker stop $LOCALSTACK_NAME)
echo "Stopped '${LOCALSTACK_ID:0:12}' running as '$LOCALSTACK_NAME'"
;;
esac
fi
|
<gh_stars>0
import React, { useEffect } from 'react';
import styled from 'styled-components';
import Aos from 'aos';
import 'aos/dist/aos.css';
const MissionStyled = styled.section`
background-image: linear-gradient(
180deg,
rgba(0, 0, 0, 0.3) 2.23%,
rgba(230, 57, 74, 0.5) 82.96%
),
url(${(prop) => prop.Image});
background-size: cover;
background-position: center;
padding: 80px 0px;
display: flex;
align-items: center;
justify-content: center;
object-fit: cover;
color: #fff;
`;
const P = styled.p`
padding: 6em 0 1em 0;
line-height: 30px;
text-align: center;
font-size: 18px;
font-weight: 500;
letter-spacing: 1px;
line-height: 39px;
@media (max-width: 360px) {
padding: 1rem 0;
}
`;
function Mission({ Image, text }) {
useEffect(() => {
Aos.init({
duration: 2000,
delay: 500,
once: true,
mirror: true,
});
}, []);
return (
<>
<MissionStyled Image={Image} data-aos='fade-down'>
<div className='container'>
<P data-aos='fade-up'>{text}</P>
</div>
</MissionStyled>
</>
);
}
export default Mission;
|
import datetime
import pytest
from rest_framework.test import APIClient
from rest_framework import status
from django.urls import reverse
from ..models import DemoModel
pytestmark = pytest.mark.django_db
@pytest.fixture
def api_client():
return APIClient()
def test_crud(api_client):
today = datetime.date.today()
today_string = today.strftime("%Y-%m-%d")
data = {
"name": "Foo",
"email": "<EMAIL>",
"date": "1999-10-25",
"text": "some text",
"number": 2,
}
response = api_client.post(reverse("api-demomodel-list"), data=data)
assert response.status_code == status.HTTP_201_CREATED
response = api_client.get(reverse("api-demomodel-list"))
assert response.status_code == status.HTTP_200_OK
response_data = response.json()
pk = response_data[0]["id"]
assert response_data[0]["name"] == "Foo"
assert response_data[0]["email"] == "<EMAIL>"
assert response_data[0]["date"] == "1999-10-25"
assert response_data[0]["text"] == "some text"
assert response_data[0]["number"] == 2
assert response_data[0]["info"] == ""
assert response_data[0]["default_date"] == today_string
assert response_data[0]["default_number"] == 1
assert response_data[0]["default_char"] == "foo default"
new_bad_email = {"email": "foo"}
response = api_client.patch(
reverse("api-demomodel-detail", kwargs={"pk": pk}), data=new_bad_email
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
new_bad_number = {"number": "foo"}
response = api_client.patch(
reverse("api-demomodel-detail", kwargs={"pk": pk}), data=new_bad_number
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
new_bad_date = {"date": "foo"}
response = api_client.patch(
reverse("api-demomodel-detail", kwargs={"pk": pk}), data=new_bad_date
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
new_bad_name = {"name": "toolongforthevalidator"}
response = api_client.patch(
reverse("api-demomodel-detail", kwargs={"pk": pk}), data=new_bad_name
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
new_email = {"email": "<EMAIL>"}
response = api_client.patch(
reverse("api-demomodel-detail", kwargs={"pk": pk}), data=new_email
)
assert response.status_code == status.HTTP_200_OK
response_data = response.json()
assert response_data["email"] == "<EMAIL>"
# other data is still unchanged.
assert response_data["name"] == "Foo"
assert response_data["date"] == "1999-10-25"
assert response_data["text"] == "some text"
assert response_data["number"] == 2
assert response_data["info"] == ""
assert response_data["default_date"] == today_string
assert response_data["default_number"] == 1
assert response_data["default_char"] == "foo default"
# Test SearchFields properly populated
assert DemoModel.objects.get(email="<EMAIL>")
assert DemoModel.objects.get(name="Foo")
assert DemoModel.objects.get(number=2)
assert DemoModel.objects.get(date=datetime.date(1999, 10, 25))
|
/*
* Copyright (c) 2018, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.wso2.testgrid.web.operation;
import org.influxdb.annotation.Column;
import org.influxdb.annotation.Measurement;
import java.time.Instant;
/**
* this class is to hold the data returned from influxDB
*/
@Measurement(name = "mem")
public class TimeLimits {
@Column(name = "time")
private Instant time;
@Column(name = "value")
private Double value;
public Instant getTime() {
return time;
}
}
|
from .generic_plot import PlotLosses |
package com.corsair.sparrow.pirate.oauth.controller;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import com.corsair.sparrow.pirate.core.base.BaseController;
import com.corsair.sparrow.pirate.oauth.service.ISysRoleService;
import com.corsair.sparrow.pirate.oauth.domain.bean.SysRole;
import com.corsair.sparrow.pirate.core.base.PagingRequest;
import com.corsair.sparrow.pirate.core.global.RespEntity;
import com.github.pagehelper.PageInfo;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.validation.BindingResult;
import org.springframework.web.bind.annotation.*;
import javax.validation.Valid;
import java.util.List;
/**
* <p>
* 角色表 前端控制器
* </p>
*
* @author jack
* @since 2019-03-22
*/
@Slf4j
@Api(value = "/sys-role",description = "SysRole表 接口,负责人: jack")
@RestController
@RequestMapping(value = "/sys-role")
public class SysRoleController extends BaseController {
@Autowired
private ISysRoleService sysRoleService;
@GetMapping(value = "/{id}")
@ApiOperation(value = "根据id获取SysRole",notes = "根据id获取SysRole")
public RespEntity<SysRole> getById(@PathVariable Long id){
return RespEntity.ok().setRespData(sysRoleService.getById(id));
}
@GetMapping
@ApiOperation(value = "获取SysRole列表",notes = "获取SysRole列表")
public RespEntity<List<SysRole>> getList(){
return RespEntity.ok().setRespData(sysRoleService.list());
}
@ApiImplicitParams({
@ApiImplicitParam(name = "pageNum", value = "起始页", required = true, paramType = "query"),
@ApiImplicitParam(name = "pageSize", value = "分页Size", required = true, paramType = "query")
})
@ApiOperation(value = "条件分页查询",notes = "条件分页查询")
@GetMapping(value = "/getPageInfo")
public RespEntity<PageInfo<SysRole>> getPageInfo(PagingRequest pagingRequest){
return RespEntity.ok().setRespData(sysRoleService.getPageInfo(pagingRequest,null));
}
@ApiOperation(value = "保存SysRole",notes = "保存SysRole")
@PostMapping
public RespEntity save(@Valid @ModelAttribute SysRole sysRole, BindingResult bindingResult){
if(bindingResult.hasErrors()){
return RespEntity.paramsError(bindingResult.getFieldError().getDefaultMessage());
}
return sysRoleService.save(sysRole)?RespEntity.ok():RespEntity.error();
}
@ApiOperation(value = "修改SysRole",notes = "修改SysRole")
@PutMapping
public RespEntity update(@Valid @RequestBody SysRole sysRole, BindingResult bindingResult){
if(bindingResult.hasErrors()){
return RespEntity.paramsError(bindingResult.getFieldError().getDefaultMessage());
}
return sysRoleService.saveOrUpdate(sysRole)?RespEntity.ok():RespEntity.error();
}
@ApiOperation(value = "根据ID删除记录行",notes = "根据ID删除记录行")
@DeleteMapping(value = "/{id}")
public RespEntity delete(@PathVariable Long id){
return sysRoleService.removeById(id)?RespEntity.ok():RespEntity.error();
}
}
|
#!/bin/sh
echo "building docker images for ${GOOS}/${GOARCH} ..."
REPO="github.com/drone/drone"
# compile the server using the cgo
go build -ldflags "-extldflags \"-static\"" -o release/linux/${GOARCH}/drone-server ${REPO}/cmd/drone-server
# compile the runners with gcc disabled/* Fixed healthcheck path */
export CGO_ENABLED=0
go build -o release/linux/${GOARCH}/drone-agent ${REPO}/cmd/drone-agent // TODO: will be fixed by ng8eke@163.com
go build -o release/linux/${GOARCH}/drone-controller ${REPO}/cmd/drone-controller
|
<gh_stars>1-10
package mad.location.manager.lib.Filters;
/**
* Created by lezh1k on 2/13/18.
*/
public class GeoHash {
static final char base32Table[] = {
'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'j', 'k', 'm', 'n', 'p', 'q', 'r',
's', 't', 'u', 'v', 'w', 'x', 'y', 'z'};
public static final int GEOHASH_MAX_PRECISION = 12;
public static void encode(double srcLat, double srcLon, char buff[], int precision) {
class Interval {
private double min, max;
private Interval(double min, double max) {
this.min = min;
this.max = max;
}
}
Interval lat = new Interval(-90.0, 90.0);
Interval lon = new Interval(-180.0, 180.0);
Interval ci;
boolean isEven = true;
double mid, cd;
int idx = 0; // index into base32 map
int bit = 0; // each char holds 5 bits
int bi = 0; //buffer index
while (precision > 0) {
if (isEven) {
ci = lon;
cd = srcLon;
} else {
ci = lat;
cd = srcLat;
}
mid = (ci.min + ci.max) / 2.0;
idx <<= 1; //idx *= 2
if (cd >= mid) {
ci.min = mid;
idx |= 1; //idx += 1
} else {
ci.max = mid;
}
isEven = !isEven;
if (++bit == 5) {
buff[bi++] = base32Table[idx];
idx = bit = 0;
--precision;
}
}
}
}
|
"""Propose a unique solution for a given sorting problem."""
# Use the Counting Sort algorithm to sort the array of integers in increasing order.
# The time complexity for Counting Sort is O(n + k) where n is the number of elements and k is the range of possible values.
# This algorithm requires extra auxiliary memory of size k.
def counting_sort(array):
min_val = min(array)
max_val = max(array)
aux_arr = [0] * (max_val - min_val + 1)
# Traverse through each element in the input array and store count in aux array
for elem in array:
aux_arr[elem - min_val] += 1
# Traverse through the aux array and place the elements in the input array
count = 0
for i in range(max_val - min_val + 1):
for j in range(aux_arr[i]):
array[count] = min_val + i
count += 1
return array
if __name__ == '__main__':
array = [4, 1, 7, 5, 8, 3, 9]
print(counting_sort(array))
# Output: [1, 3, 4, 5, 7, 8, 9] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.