text stringlengths 1 1.05M |
|---|
/*
* Copyright 2016-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.github.blindpirate.gogradle.task.go;
import com.github.blindpirate.gogradle.util.StringUtils;
import org.gradle.api.Action;
import org.gradle.api.internal.tasks.testing.junit.result.TestClassResult;
import org.gradle.api.internal.tasks.testing.junit.result.TestResultsProvider;
import org.gradle.api.tasks.testing.TestOutputEvent;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static com.github.blindpirate.gogradle.task.go.test.PlainGoTestResultExtractor.GoTestMethodResult;
public class GoTestResultsProvider implements TestResultsProvider {
private List<TestClassResult> testClassResults = new ArrayList<>();
private Map<Long, TestClassResult> idToClassResultMap = new HashMap<>();
public GoTestResultsProvider(List<TestClassResult> results) {
testClassResults.addAll(results);
testClassResults.forEach(result -> idToClassResultMap.put(result.getId(), result));
}
@Override
public void writeAllOutput(long id, TestOutputEvent.Destination destination, Writer writer) {
TestClassResult result = idToClassResultMap.get(id);
String stdout = result.getResults().stream()
.map(methodResult -> (GoTestMethodResult) methodResult)
.map(GoTestMethodResult::getMessage)
.filter(StringUtils::isNotEmpty)
.collect(Collectors.joining("\n"));
try {
writer.write(stdout);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public void writeNonTestOutput(long id, TestOutputEvent.Destination destination, Writer writer) {
throw new UnsupportedOperationException();
}
@Override
public void writeTestOutput(long classId, long testId, TestOutputEvent.Destination destination, Writer writer) {
throw new UnsupportedOperationException();
}
@Override
public void visitClasses(Action<? super TestClassResult> visitor) {
testClassResults.forEach(visitor::execute);
}
@Override
public boolean hasOutput(long id, TestOutputEvent.Destination destination) {
if (destination == TestOutputEvent.Destination.StdErr) {
return false;
}
TestClassResult result = idToClassResultMap.get(id);
return result.getResults().stream()
.map(methodResult -> (GoTestMethodResult) methodResult)
.map(GoTestMethodResult::getMessage)
.anyMatch(StringUtils::isNotEmpty);
}
@Override
public boolean isHasResults() {
return !testClassResults.isEmpty();
}
@Override
public void close() throws IOException {
}
}
|
interface CalendarEvent {
type: string;
row: number;
text: number | string;
inRange?: boolean;
selected?: boolean | Dayjs;
start?: boolean;
} |
import React, { useEffect, useRef } from 'react';
import { View, FlatList, Text } from 'react-native';
import styles from './style';
import { MessageType } from '../../types';
import MessageBubble from '../MessageBubble';
export type ConversationInterfaceProps = {
conversation: MessageType[] | null,
}
const ConversationInterface = ({ conversation }: ConversationInterfaceProps) => {
const flatListRef = useRef(null);
useEffect(() => {
if (flatListRef.current) {
flatListRef.current.scrollToIndex({ animated: true, index: conversation?.length && conversation?.length - 1 || 0 });
}
}, [conversation]);
return (
<View style={styles.feedContainer}>
{
conversation && conversation.length === 0 ? <Text style={styles.noChatText}>No chat yet</Text> :
<FlatList
ref={flatListRef}
data={conversation}
renderItem={({ item }) => <MessageBubble message={item} />}
keyExtractor={(item) => item.id}
showsVerticalScrollIndicator={false}
initialScrollIndex={conversation?.length && conversation?.length - 1 || 0}
onScrollToIndexFailed={(info) => {
const wait = new Promise(resolve => setTimeout(resolve, 500));
wait.then(() => {
flatListRef.current?.scrollToIndex({ index: info.index, animated: true });
});
}
}
/>
}
</View>
)
}
export default ConversationInterface; |
module.exports = {
async up(db) {
await db.collection('ptah-users').updateMany(
{},
{'$set': {'isAdmin': false}}
);
},
async down(db) {
}
};
|
#!/bin/bash
cat >/etc/motd <<EOL
_____
/ _ \ __________ _________ ____
/ /_\ \\___ / | \_ __ \_/ __ \
/ | \/ /| | /| | \/\ ___/
\____|__ /_____ \____/ |__| \___ >
\/ \/ \/
A P P S E R V I C E O N L I N U X
Documentation: http://aka.ms/webapp-linux
PHP quickstart: https://aka.ms/php-qs
PHP version : `php -v | head -n 1 | cut -d ' ' -f 2`
Note: Any data outside '/home' is not persisted
EOL
cat /etc/motd
# Get environment variables to show up in SSH session
eval $(printenv | sed -n "s/^\([^=]\+\)=\(.*\)$/export \1=\2/p" | sed 's/"/\\\"/g' | sed '/=/s//="/' | sed 's/$/"/' >> /etc/profile)
# starting sshd process
sed -i "s/SSH_PORT/$SSH_PORT/g" /etc/ssh/sshd_config
/usr/sbin/sshd
appPath="/home/site/wwwroot"
runFromPath="/tmp/webapp"
startupCommandPath="/opt/startup/startup.sh"
userStartupCommand="$@"
if [ -z "$userStartupCommand" ]
then
userStartupCommand="apache2-foreground";
else
userStartupCommand="$userStartupCommand; apache2-foreground;"
fi
oryxArgs="-appPath $appPath -output $startupCommandPath \
-bindPort $PORT -startupCommand '$userStartupCommand'"
echo "Running oryx $oryxArgs"
eval oryx $oryxArgs
$startupCommandPath
|
ALTER TABLE results_processing_failure ADD COLUMN failure_message TEXT;
ALTER TABLE results_processing_failure ADD COLUMN failure_type TEXT;
ALTER TABLE results_processing_failure ADD COLUMN body_type TEXT;
CREATE INDEX results_processing_failure_body_type_idx ON results_processing_failure(body_type);
CREATE INDEX results_processing_failure_created_timestamp_idx ON results_processing_failure(created_timestamp); |
<filename>src/api/upload.js
import request from '@/utils/request'
const url = process.env.FILE_API
export function uploadImage(data) {
return request({
url: url,
method: 'post',
data
})
}
|
#!/usr/bin/env bash
cd flaskapp
export FLASK_APP='flaskapp:create_app'
export SQLALCHEMY_DATABASE_URI='sqlite:////data/mint.db'
flask db upgrade
waitress-serve --port 8000 --call 'flaskapp:create_app'
|
# frozen_string_literal: true
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
module Google
module Cloud
module Bigquery
module Storage
module V1beta1
# Table reference that includes just the 3 strings needed to identify a table.
# @!attribute [rw] project_id
# @return [::String]
# The assigned project ID of the project.
# @!attribute [rw] dataset_id
# @return [::String]
# The ID of the dataset in the above project.
# @!attribute [rw] table_id
# @return [::String]
# The ID of the table in the above dataset.
class TableReference
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
# All fields in this message optional.
# @!attribute [rw] snapshot_time
# @return [::Google::Protobuf::Timestamp]
# The snapshot time of the table. If not set, interpreted as now.
class TableModifiers
include ::Google::Protobuf::MessageExts
extend ::Google::Protobuf::MessageExts::ClassMethods
end
end
end
end
end
end
|
import numpy as np
class ElectronMicroscopeControl:
def __init__(self):
self._beam_tilt = np.zeros(1) # Initialize beam tilt attribute
def set_beam_tilt(self, tilt):
tilt = np.atleast_1d(tilt)
self._beam_tilt[...] = tilt
def normalize(self, mode="ALL"):
KNOWN_MODES = ["SPOTSIZE", "INTENSITY", "CONDENSER", "MINI_CONDENSER", "OBJECTIVE", "PROJECTOR"]
if mode in KNOWN_MODES:
# Perform normalization based on the specified mode
# Add implementation specific to each mode here
print(f"Normalizing in {mode} mode")
else:
print(f"Invalid mode: {mode}. Please choose from {KNOWN_MODES}")
# Example usage
em_control = ElectronMicroscopeControl()
em_control.set_beam_tilt(30) # Set the tilt of the electron beam to 30 degrees
em_control.normalize("SPOTSIZE") # Normalize in SPOTSIZE mode
em_control.normalize("FOCUS") # Invalid mode: FOCUS. Please choose from ['SPOTSIZE', 'INTENSITY', 'CONDENSER', 'MINI_CONDENSER', 'OBJECTIVE', 'PROJECTOR'] |
#!/bin/bash
# create the directory to store the raster files in ($rundir)
# $LCRFS_LOCALDISC is the location of the local disc
# we make it unique using the $JOB_ID and $USER variables
rundir="${LCRFS_LOCALDISC}/${JOB_ID}_${USER}"
mkdir $rundir
# recall some files from son
lcrfs_recall /backup/clustermuster/buntingp/las/$1 $rundir
# run the script in the same directory as job submission
outname=$rundir/${2}
echo $outname
spdtranslate -i LAS -o UPD -x FIRST_RETURN $rundir/$1 $outname
# save output file back to son
#lcrfs_mkdir /backup/clustermuster/buntingp/spd
lcrfs_save $outname /backup/clustermuster/buntingp/spd
# clean up $rundir when finished (very important!)
rm -rf $rundir
|
do_test()
{
cd "$(dirname "$0")"
./generate.sh
PYTHONPATH=$PYTHONPATH:$(pwd) python orwell/messages/test/test_messages.py
}
do_test
|
sudo openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout localhost.key -out localhost.crt -config localhost.conf
|
class GeometricShape:
def __init__(self, side_length):
self.side_length = side_length
def lado(self, dx):
# Assuming dx is the distance to the base of a right-angled triangle
side_length = (self.side_length ** 2 - dx ** 2) ** 0.5
return side_length |
import { validate } from '../src/pages/zamestnanie'
import { testValidation } from './utils/testValidation'
describe('zamestnanie', () => {
describe('#validate', () => {
testValidation(validate, [
{
input: { employed: undefined },
expected: ['employed'],
},
{ input: { employed: false }, expected: [] },
{
input: { employed: true },
expected: ['r038', 'r039_socialne', 'r039_zdravotne', 'r120', 'r108'],
},
{
input: {
employed: true,
r029_poberal_dochodok: true,
r038: 'a',
r039_socialne: '-1',
r039_zdravotne: '-1',
r108: 'a',
r120: '-1',
},
expected: ['r038', 'r039_socialne', 'r039_zdravotne', 'r120', 'r108'],
},
{
input: {
employed: true,
r029_poberal_dochodok: true,
r038: '10',
r039_socialne: '20',
r039_zdravotne: '20',
r108: '30',
r120: '40',
},
expected: [],
},
])
})
})
|
#!/bin/bash
# Albert Lombarte
# alombarte@gmail.com
# This script updates the SIFO code to the latest and the passed instance as well.
# It is meant to update your code in production.
SCRIPTPATH=$(cd ${0%/*} && echo $PWD/${0##*/})
CORE=`dirname "$SCRIPTPATH"`
CORE=`cd "${CORE}/../.." && pwd -P`
if [ $# != 1 ]
then
echo "This script updates the server with the latest code in the repo"
echo "--USAGE: $0 <instancename>"
exit 0
fi
INSTANCE=$1
CHANGELOG="$CORE/changelog.txt"
USER=`whoami`
INSTANCEPATH="$CORE/instances/$INSTANCE"
TODAY=`date "+%Y-%b-%d %k:%M"`
#if [ "$USER" == "root" ]
#then
# clear
# echo -e "Do not use root for updating code"
# exit 0
#fi
echo "Updating servers..."
echo "************************************************" >> $CHANGELOG
echo "$INSTANCE update: $TODAY ($USER)" >> $CHANGELOG
echo "************************************************" >> $CHANGELOG
echo "From revisions:" >> $CHANGELOG
svn info $CORE | grep Revision >> $CHANGELOG
svn info $INSTANCEPATH | grep Revision >> $CHANGELOG
echo "To:" >> $CHANGELOG
echo "Updating $CORE"
svn update $CORE >> $CHANGELOG
echo "Updating $INSTANCEPATH"
svn update $INSTANCEPATH >> $CHANGELOG
clear
tail -n 100 $CHANGELOG
|
package com.foxconn.iot.repository;
import java.util.List;
import org.springframework.data.jpa.repository.JpaRepository;
import org.springframework.data.jpa.repository.Query;
import org.springframework.data.repository.query.Param;
import com.foxconn.iot.entity.DeviceTypeEntity;
import com.foxconn.iot.entity.DeviceVersionEntity;
import com.foxconn.iot.entity.DeviceVersionVo;
public interface DeviceVersionRepository extends JpaRepository<DeviceVersionEntity, Long> {
DeviceVersionEntity findById(long id);
@Query(value = "select new com.foxconn.iot.entity.DeviceVersionVo(a.id, a.version, a.hardVersion, a.imageUrl, a.details, a.createOn, a.deviceType.id) from DeviceVersionEntity a where a.deviceType.id=:type order by a.createOn desc")
List<DeviceVersionVo> queryByDeviceType(@Param("type") long type);
@Query(value = "select a.deviceType from DeviceVersionEntity a where a.id=:id")
DeviceTypeEntity findDeviceTypeById(@Param("id") long id);
}
|
<reponame>relativeabsolute/AOTYator
#!/usr/bin/env node
console.log('hello!');
|
<gh_stars>1-10
/*
* Copyright 2015-2021 <NAME> <<EMAIL>>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.nexttypes.datatypes;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper;
import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty;
import com.nexttypes.enums.IndexMode;
import com.nexttypes.system.KeyWords;
@JsonPropertyOrder({ KeyWords.MODE, KeyWords.FIELDS })
public class TypeIndex {
protected IndexMode mode;
protected String[] fields;
protected String oldName;
public TypeIndex(IndexMode mode, String[] fields, String oldName) {
this(mode, fields);
this.oldName = oldName;
}
@JsonCreator
public TypeIndex(@JsonProperty(KeyWords.MODE) String mode, @JsonProperty(KeyWords.FIELDS) String fields[]) {
this(IndexMode.valueOf(mode.toUpperCase()), fields);
}
public TypeIndex(IndexMode mode, String fields[]) {
this.mode = mode;
this.fields = fields;
}
@JsonProperty(KeyWords.MODE)
public IndexMode getMode() {
return mode;
}
@JacksonXmlElementWrapper(localName = KeyWords.FIELDS)
@JacksonXmlProperty(localName = KeyWords.FIELD)
@JsonProperty(KeyWords.FIELDS)
public String[] getFields() {
return fields;
}
public String getOldName() {
return oldName;
}
} |
package com.bigsomething.Decryption.MultipleKeys.OneKey;
import com.google.common.collect.ImmutableList;
public class ShiftedAlphabetHelper
{
private static final ImmutableList<String> alphabet = new ImmutableList.Builder<String>()
.add("A","B","C","D","E","F","G","H","I","J","K","L","M","N",
"O","P","Q","R","S","T","U","V","W","X","Y","Z")
.build();
public static ImmutableList<String> getAlphabet()
{
return alphabet;
}
public static ImmutableList<String> shiftAlphabet(int key)
{
return new ImmutableList.Builder<String>()
.addAll(alphabet.subList(key,alphabet.size()))
.addAll(alphabet.subList(0,key))
.build();
}
}
|
"""
Dictionary / HashMap / Map
f : integer --> anything
index: key --> value: anything
dictionary
key: anything --> value: anything
(unique) not unique
{}
"""
words = {
'i': 100,
'am': 20,
'batman': 87
}
print(type(words))
print(words['batman'])
words['batman'] = 100
print(words['batman'])
print(words)
words['hello'] = 1
print(words)
# removing value from dictionary
del words['hello']
print(words)
keys = words.keys()
print(keys)
print(type(keys))
values = words.values()
print(values)
print(type(values))
items = words.items()
print(items)
print(type(items))
# iterating over dict
print('\n\n\n')
# for key in words.keys(): # same as --> in words
# print(key)
# for value in words.values():
# print(value)
# for item in words.items():
# key = item[0]
# value = item[1]
# print(key + ' - ' + str(value))
for key, value in words.items():
print(key + ' - ' + str(value))
|
#!/bin/bash
echo "Adding instance to ECS cluster {{ECS_CLUSTER_NAME}}"
echo ECS_CLUSTER={{ECS_CLUSTER_NAME}} >> /etc/ecs/ecs.config
{{#each DEPENDENCY_SCRIPTS}}
{{{this}}}
{{/each}}
echo "Restarting Docker daemon and ECS service for services such as EFS that require a restart"
service docker restart
start ecs |
import pandas as pd
from datetime import datetime
def get_stock_data():
# Get the stock data from iex.cloud
df = pd.read_json('https://cloud.iexapis.com/stable/stock/market/batch?symbols=AAPL,GOOGL,MSFT,TSLA&types=quote&range=1m&last=10&token=<token>')
# Convert the timestamp from Unix to DateTime
df["quote"]['latestTime'] = df["quote"].apply(lambda row: datetime.fromtimestamp(row['latestTime'] / 1000).strftime('%Y-%m-%d %H:%M:%S'))
# Return the clean data
return df
# Print the stock prices every 10 seconds
while True:
df = get_stock_data()
print(df["quote"])
sleep(10) |
from typing import List, Dict
def parse_dependencies(dependencies: List[str]) -> Dict[str, str]:
parsed_dict = {}
for dependency in dependencies:
package, version = dependency.split('>=') # Split the dependency string into package name and version
package_name = package.split('<')[0] # Extract the package name
version = version.split(',')[0] # Extract the version
parsed_dict[package_name] = version # Add the package name and version to the dictionary
return parsed_dict |
require 'rails_helper'
RSpec.describe Thought, type: :model do
describe 'associations' do
it { should belong_to(:user) }
end
describe 'validations' do
subject { Thought.create(id: 1, user_id: 1, thought: 'We are testing') }
it { should validate_presence_of(:thought) }
end
end
|
public class MaxSubArraySum {
// function to find maximum subArray sum
static int maxSubArraySum(int[] arr)
{
int max_so_far = 0;
int max_ending_here = 0;
for (int i = 0; i < arr.length; i++)
{
max_ending_here = max_ending_here + arr[i];
if (max_ending_here < 0)
max_ending_here = 0;
if (max_so_far < max_ending_here)
max_so_far = max_ending_here;
}
return max_so_far;
}
// main method
public static void main (String[] args)
{
int arr[] = {-2, -3, 4, -1, -2, 1, 5, -3};
int max_sum = maxSubArraySum(arr);
System.out.println("Maximum sum is : "+ max_sum);
}
} |
def find_mean(my_list):
"""Function to calculate the mean of a list of numbers."""
total = 0
for num in my_list:
total += num
return total/len(my_list)
# Test
print(find_mean([2, 4, 6, 8]))
# Output
5.0 |
// 4706. 쌍<NAME>
// 2019.09.05
// 수학
#include<iostream>
#include<cmath>
using namespace std;
int main()
{
while (1)
{
double ta, tb;
cin >> ta >> tb;
if (ta == 0 && tb == 0)
{
break;
}
// 문제에 써있는 공식 적용
printf("%.3lf\n", sqrt(ta*ta - tb * tb) / ta);
}
return 0;
}
|
<reponame>dadviegas/frontend-startup
import './server'
import './register'
|
def hasOccurrence(inputString, char):
for i in range(len(inputString)):
if inputString[i] == char:
return True
return False |
#include <iostream>
using namespace std;
void PrintEvenOdd(int arr[], int n)
{
int even[n], odd[n]; // To store even and odd numbers
int e = 0, o = 0;
// Traverse through the array and segregate even and odd elements
for (int i = 0; i < n; i++) {
/* If a number is divisible by 2 then it is even */
if (arr[i] % 2 == 0) {
even[e] = arr[i];
e++;
}
else {
odd[o] = arr[i];
o++;
}
}
// Print even elements
cout << "Even elements: ";
for (int i = 0; i < e; i++)
cout << even[i] << " ";
cout << "\nOdd elements: ";
// Print odd elements
for (int i = 0; i < o; i++)
cout << odd[i] << " ";
}
// Driver code
int main()
{
int arr[] = { 2, 3, 9, 4, 8, 5 };
int n = sizeof(arr) / sizeof(arr[0]);
PrintEvenOdd(arr, n);
return 0;
} |
list_of_lines = one_large_string.splitlines()
|
#! /bin/bash
#SBATCH -o fftw_plan_020.txt
#SBATCH -J fftw_plan_020
#SBATCH --get-user-env
#SBATCH --clusters=mpp2
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=1
#SBATCH --exclusive
#SBATCH --export=NONE
#SBATCH --time=03:00:00
#declare -x NUMA_BLOCK_ALLOC_VERBOSITY=1
declare -x KMP_AFFINITY="granularity=thread,compact,1,0"
declare -x OMP_NUM_THREADS=1
. /etc/profile.d/modules.sh
module unload gcc
module unload fftw
module unload python
module load python/2.7_anaconda_nompi
module unload intel
module load intel/16.0
module unload mpi.intel
module load mpi.intel/5.1
module load gcc/5
cd /home/hpc/pr63so/di69fol/workspace/SWEET_2015_12_26/benchmarks_performance/rexi_tests_lrz_freq_waves/2015_12_30_scalability_rexi_spec
cd ../../../
. local_software/env_vars.sh
mpiexec.hydra -genv OMP_NUM_THREADS 20 -envall -ppn 1 ./fftw_gen_wisdoms_all.sh 20 FFTW_WISDOM_nofreq_T20
|
#!/bin/bash
#
# Copyright (c) 2019-2020 P3TERX <https://p3terx.com>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
# https://github.com/P3TERX/Actions-OpenWrt
# File name: diy-part1.sh
# Description: OpenWrt DIY script part 1 (Before Update feeds)
#
# Uncomment a feed source
sed -i 's/^#\(.*helloworld\)/\1/' feeds.conf.default
# Add a feed source
#sed -i '$a src-git lienol https://github.com/Lienol/openwrt-package' feeds.conf.default
sed -i '$a src-git kenzo https://github.com/kenzok8/openwrt-packages' feeds.conf.default
sed -i '$a src-git small https://github.com/kenzok8/small' feeds.conf.default
git clone https://github.com/lisaac/luci-lib-docker.git package/DIYss/luci-lib-docker
git clone https://github.com/lisaac/luci-app-dockerman.git package/DIYss/luci-app-dockerman
|
<reponame>joelmoss/concerned_validations
# frozen_string_literal: true
require 'active_model'
# module ActiveModel::Validations::HelperMethods
# def validate_attribute(attribute)
# puts "validate_attribute :#{attribute}"
# end
# end
module ConcernedValidations
class Error < StandardError; end
autoload :Base, 'concerned_validations/base'
autoload :Model, 'concerned_validations/model'
end
|
import { Randomizer, newRandomizer, resolveSeed } from 'dumbfound';
export type Runner = (random: Randomizer) => any;
export type AsyncRunner = (random: Randomizer, cb: jest.DoneCallback) => any;
export interface RandomizedTest {
(name: string, runner: Runner | AsyncRunner, timeout?: number): void;
only: RandomizedTest;
skip: RandomizedTest;
}
function isAsyncRunner(obj: any): obj is AsyncRunner {
return typeof obj === 'function' && obj.length > 1;
}
function isRunner(obj: any): obj is Runner {
return typeof obj === 'function';
}
/**
* Create a function using the given Jest-definer, such as `test` and
* `test.only`.
*/
function generate(testDefiner: jest.It): RandomizedTest {
const result = function(name: string, runner: Runner | AsyncRunner, timeout?: number): void {
const seed = resolveSeed();
// Resolve a callback to support asynchronous calls
let callback: jest.ProvidesCallback;
if(runner.length > 1) {
callback = (done: jest.DoneCallback) => {
const randomizer = newRandomizer(seed);
return runner(randomizer, done);
};
} else {
callback = () => {
const randomizer = newRandomizer(seed);
return (runner as Runner)(randomizer);
};
}
// Define the test
testDefiner(name + ' [seed=' + seed + ']', callback, timeout);
};
Object.defineProperty(result, 'only', {
get() {
return generate(test.only);
}
});
Object.defineProperty(result, 'skip', {
get() {
return generate(test.skip);
}
});
return result as RandomizedTest;
}
export const randomizedTest = generate(test);
|
import React from "react"
import Link from "gatsby-link"
import styled from "styled-components"
import Fade from "react-reveal/Fade"
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"
const JumboContainer = styled.div`
display: flex;
flex-direction: row;
justify-content: space-between;
${"" /* align-content: center; */}
@media only screen and (max-width: 600px) {
flex-direction: column;
}
`
const Wordpress = () => (
<section
className="hero has-background-dark is-fullheight is-bold"
id="wordpress"
>
<div className="container has-text-left">
<JumboContainer class="columns">
<div className="column hero-text">
<h1 className="title has-text-white has-text-weight-bold">
<span className="half-highlight-link">Wordpress Solutions</span>
</h1>
<Fade bottom duration={800}>
<h1 className="title has-text-white">Planning.</h1>
<h2 className="has-text-weight-light subtitle has-text-white">
Wordpress powers over 30% of all websites. Wordpress powers over
30% of all websites. Wordpress powers over 30% of all websites.
</h2>
<Link to="/wordpress">
<div className="is-white is-outlined jumbotron-button button is-medium">
More About Wordpress
<FontAwesomeIcon
className="arrow-right"
icon={["far", "angle-right"]}
/>
</div>
</Link>
</Fade>
</div>
<div className="column hero-text has-text-white has-text-right">
<Fade bottom duration={1600}>
<FontAwesomeIcon
icon={["fab", "wordpress"]}
style={{ fontSize: "30rem" }}
/>
</Fade>
</div>
</JumboContainer>
</div>
</section>
)
export default Wordpress
|
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
# Copy the dSYM into a the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .framework.dSYM "$source")"
binary="${DERIVED_FILES_DIR}/${basename}.framework.dSYM/Contents/Resources/DWARF/${basename}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.framework.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.framework.dSYM"
fi
fi
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/LittleDefend/LittleDefend.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/LittleDefend/LittleDefend.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
package com.ice.restring;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.robolectric.RobolectricTestRunner;
import org.robolectric.RuntimeEnvironment;
import java.util.LinkedHashMap;
import java.util.Map;
import static org.junit.Assert.assertEquals;
@RunWith(RobolectricTestRunner.class)
public class SharedPrefStringRepositoryTest {
@Before
public void setUp() {
}
@Test
public void shouldSetAndGetStringPairs() {
final String LANGUAGE = "en";
Map<String, String> strings = generateStrings(10);
StringRepository stringRepository = new SharedPrefStringRepository(RuntimeEnvironment.application);
stringRepository.setStrings(LANGUAGE, strings);
StringRepository newRepository = new SharedPrefStringRepository(RuntimeEnvironment.application);
assertEquals(strings, newRepository.getStrings(LANGUAGE));
}
@Test
public void shouldGetSingleString() {
final String LANGUAGE = "en";
final int STR_COUNT = 10;
Map<String, String> strings = generateStrings(STR_COUNT);
StringRepository stringRepository = new SharedPrefStringRepository(RuntimeEnvironment.application);
stringRepository.setStrings(LANGUAGE, strings);
StringRepository newRepository = new SharedPrefStringRepository(RuntimeEnvironment.application);
for (int i = 0; i < STR_COUNT; i++) {
assertEquals(newRepository.getString(LANGUAGE, "key" + i), "value" + i);
}
}
@Test
public void shouldSetSingleString() {
final String LANGUAGE = "en";
final int STR_COUNT = 10;
Map<String, String> strings = generateStrings(STR_COUNT);
StringRepository stringRepository = new SharedPrefStringRepository(RuntimeEnvironment.application);
stringRepository.setStrings(LANGUAGE, strings);
stringRepository.setString(LANGUAGE, "key5", "aNewValue");
StringRepository newRepository = new SharedPrefStringRepository(RuntimeEnvironment.application);
assertEquals(newRepository.getString(LANGUAGE, "key5"), "aNewValue");
}
private Map<String, String> generateStrings(int count) {
Map<String, String> strings = new LinkedHashMap<>();
for (int i = 0; i < count; i++) {
strings.put("key" + i, "value" + i);
}
return strings;
}
} |
<filename>src/stack/Boj23253.java<gh_stars>1-10
package stack;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.*;
/**
*
* @author exponential-e
* 백준 23253번: 자료구조는 정말 최고야
*
* @see https://www.acmicpc.net/problem/23253
*
*/
public class Boj23253 {
private static List<Deque<Integer>> room;
private static int[] index;
public static void main(String[] args) throws Exception {
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
StringTokenizer st = new StringTokenizer(br.readLine());
int N = Integer.parseInt(st.nextToken());
int M = Integer.parseInt(st.nextToken());
room = new ArrayList<>();
index = new int[N];
for(int i = 0; i < M; i++) {
room.add(new ArrayDeque<>());
int k = Integer.parseInt(br.readLine());
st = new StringTokenizer(br.readLine());
while(k-- > 0) {
int book = Integer.parseInt(st.nextToken()) - 1;
room.get(i).push(book);
index[book] = i;
}
}
System.out.println(topologyOrderCheck(N));
}
private static String topologyOrderCheck(int n) {
boolean flag = false;
for (int book = 0; book < n; book++) {
Deque<Integer> stack = room.get(index[book]);
if(flag = (stack.isEmpty() || stack.peek() != book)) break;
stack.pop();
continue;
}
return !flag ? "Yes": "No";
}
}
|
<reponame>ayrzjy/00.ayr-studio
/**
*
*/
package org.ayr.main;
/**
* @author ayrzjy
*/
public class Bootstrap {
/**
* @param args
*/
public static void main(String[] args) {
System.out.println("Hello remote");
for (int i = 0; i < 100; i++) {
System.out.println("Hello Git");
}
System.out.println("Hello Git");
}
}
|
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_CONF=default
CND_DISTDIR=dist
TMPDIR=build/${CND_CONF}/${IMAGE_TYPE}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=dist/${CND_CONF}/${IMAGE_TYPE}/logic_gate.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}
OUTPUT_BASENAME=logic_gate.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}
PACKAGE_TOP_DIR=logicgate.x/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/package
rm -rf ${TMPDIR}
mkdir -p ${TMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory ${TMPDIR}/logicgate.x/bin
copyFileToTmpDir "${OUTPUT_PATH}" "${TMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/package/logicgate.x.tar
cd ${TMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/package/logicgate.x.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${TMPDIR}
|
# Generated by Django 2.2.4 on 2019-09-09 19:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('samvrombinator', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='mot',
old_name='voc',
new_name='voc_name',
),
]
|
<filename>tests/basics/int_big_add.py
# tests transition from small to large int representation by addition
# 31-bit overflow
i = 0x3fffffff
print(i + i)
print(-i + -i)
# 63-bit overflow
i = 0x3fffffffffffffff
print(i + i)
print(-i + -i)
|
public void retainStringsContainingSubstring(Collection<String> strings, String substring) {
strings.removeIf(s -> !s.contains(substring));
} |
#!/bin/bash
for folder in "application"; do
find ./ -name '*.php' | xargs sed -i "" 's/Yaf_Application/Yaf\\Application/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Application/Yaf\\Application/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Bootstrap/Yaf\\Bootstrap/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Loader/Yaf\\Loader/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Dispatcher/Yaf\\Dispatcher/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Plugin_Abstract/Yaf\\Plugin_Abstract/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Registry/Yaf\\Registry/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Session/Yaf\\Session/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Config_Abstract/Yaf\\Config_Abstract/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Config_Ini/Yaf\\Config\\Ini/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Config_Simple/Yaf\\Config\\Simple/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Controller_Abstract/Yaf\\Controller_Abstract/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Action_Abstract/Yaf\\Action_Abstract/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_View_Interface/Yaf\\View_Interface/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_View_Simple/Yaf\\View\\Simple/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Request_Abstract/Yaf\\Request_Abstract/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Request_Http/Yaf\\Request\\Http/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Request_Simple/Yaf\\Request\\Simple/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Response_Abstract/Yaf\\Response_Abstract/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Response_Http/Yaf\\Response\\Http/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Response_Cli/Yaf\\Response\\Cli/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Router/Yaf\\Router/g'
find $folder -name '*.php' | xargs sed -i "" 's/Yaf_Exception/Yaf\\Exception/g'
done
find . 'index.php' | xargs sed -i "" 's/Yaf_Application/Yaf\\Application/g'
|
var request = window.requestAnimationFrame ||
window.webkitRequestAnimationFrame ||
function (cb) { window.setTimeout(cb, 1000 / 60) }
var cancel = window.cancelAnimationFrame ||
window.webkitCancelAnimationFrame ||
function (index) { clearTimeout(index); }
export default {
request: function(fn){
return request(fn)
},
cancel: function(index){
return cancel(index)
}
}
|
<filename>tapestry-core/src/test/java/org/apache/tapestry5/integration/app1/ClientDataWrapper.java<gh_stars>10-100
package org.apache.tapestry5.integration.app1;
import java.io.Serializable;
public class ClientDataWrapper implements Serializable
{
private String value;
public ClientDataWrapper(String value)
{
this.value = value;
}
public String getValue()
{
return value;
}
@Override
public String toString()
{
return String.format("ClientDataWrapper[%s]", value);
}
}
|
#!/bin/bash
#
# Install linux kernel for TCP BBR and BBR Plus
#
# Copyright (C) 2021-2023 JinWYP
#
# 4.4 LTS 4.9 LTS 4.14 LTS 4.19 LTS
# 5.4 LTS 5.10 LTS
# 4.x版本内核最新的longterm版本是4.19.113,安装的话只能找个4.19的rpm包来安装了
# 从 Linux 4.9 版本开始,TCP BBR 就已经成为了 Linux 系统内核的一部分。因此,开启 BBR 的首要前提就是当前系统内核版本大于等于 4.9
# Linux 内核 5.6 正式发布了,内置了 wireguard module
# Linux 5.6 引入 FQ-PIE 数据包调度程序以帮助应对 Bufferbloat
# 5.5内核支持cake队列
# 自来光大佬: xamod内核5.8默认队列算法已经改为 fq_pie 之前是cake
# centos8 安装完成默认内核 kernel-core-4.18.0-240.15.1.el8_3.x86_64, kernel-modules-4.18.0-240.15.1.el8_3.x86_64
# ubuntu16 安装完成默认内核 linux-generic 4.4.0.210, linux-headers-4.4.0-210
# ubuntu18 安装完成默认内核 linux-generic 4.15.0.140, linux-headers-4.15.0-140
# ubuntu20 安装完成默认内核 linux-image-5.4.0-70-generic , linux-headers-5.4.0-70
# debian10 安装完成默认内核 4.19.0-16-amd64
# UJX6N 编译的bbr plus 内核 5.10.27-bbrplus 5.9.16 5.4.86
# UJX6N 编译的bbr plus 内核 4.19.164 4.14.213 4.9.264-1.bbrplus
# https://github.com/cx9208/bbrplus/issues/27
# BBR 速度评测
# https://www.shopee6.com/web/web-tutorial/bbr-vs-plus-vs-bbr2.html
# https://hostloc.com/thread-644985-1-1.html
# https://dropbox.tech/infrastructure/evaluating-bbrv2-on-the-dropbox-edge-network
export LC_ALL=C
export LANG=en_US.UTF-8
export LANGUAGE=en_US.UTF-8
sudoCmd=""
if [[ $(/usr/bin/id -u) -ne 0 ]]; then
sudoCmd="sudo"
fi
# fonts color
red(){
echo -e "\033[31m\033[01m$1\033[0m"
}
green(){
echo -e "\033[32m\033[01m$1\033[0m"
}
yellow(){
echo -e "\033[33m\033[01m$1\033[0m"
}
blue(){
echo -e "\033[34m\033[01m$1\033[0m"
}
bold(){
echo -e "\033[1m\033[01m$1\033[0m"
}
Green_font_prefix="\033[32m"
Red_font_prefix="\033[31m"
Green_background_prefix="\033[42;37m"
Red_background_prefix="\033[41;37m"
Font_color_suffix="\033[0m"
osCPU=""
osArchitecture="arm"
osInfo=""
osRelease=""
osReleaseVersion=""
osReleaseVersionNo=""
osReleaseVersionNoShort=""
osReleaseVersionCodeName="CodeName"
osSystemPackage=""
osSystemMdPath=""
osSystemShell="bash"
function checkArchitecture(){
# https://stackoverflow.com/questions/48678152/how-to-detect-386-amd64-arm-or-arm64-os-architecture-via-shell-bash
case $(uname -m) in
i386) osArchitecture="386" ;;
i686) osArchitecture="386" ;;
x86_64) osArchitecture="amd64" ;;
arm) dpkg --print-architecture | grep -q "arm64" && osArchitecture="arm64" || osArchitecture="arm" ;;
aarch64) dpkg --print-architecture | grep -q "arm64" && osArchitecture="arm64" || osArchitecture="arm" ;;
* ) osArchitecture="arm" ;;
esac
}
function checkCPU(){
osCPUText=$(cat /proc/cpuinfo | grep vendor_id | uniq)
if [[ $osCPUText =~ "GenuineIntel" ]]; then
osCPU="intel"
elif [[ $osCPUText =~ "AMD" ]]; then
osCPU="amd"
else
echo
fi
# green " Status 状态显示--当前CPU是: $osCPU"
}
# 检测系统版本号
getLinuxOSVersion(){
if [[ -s /etc/redhat-release ]]; then
osReleaseVersion=$(grep -oE '[0-9.]+' /etc/redhat-release)
else
osReleaseVersion=$(grep -oE '[0-9.]+' /etc/issue)
fi
# https://unix.stackexchange.com/questions/6345/how-can-i-get-distribution-name-and-version-number-in-a-simple-shell-script
if [ -f /etc/os-release ]; then
# freedesktop.org and systemd
source /etc/os-release
osInfo=$NAME
osReleaseVersionNo=$VERSION_ID
if [ -n $VERSION_CODENAME ]; then
osReleaseVersionCodeName=$VERSION_CODENAME
fi
elif type lsb_release >/dev/null 2>&1; then
# linuxbase.org
osInfo=$(lsb_release -si)
osReleaseVersionNo=$(lsb_release -sr)
elif [ -f /etc/lsb-release ]; then
# For some versions of Debian/Ubuntu without lsb_release command
. /etc/lsb-release
osInfo=$DISTRIB_ID
osReleaseVersionNo=$DISTRIB_RELEASE
elif [ -f /etc/debian_version ]; then
# Older Debian/Ubuntu/etc.
osInfo=Debian
osReleaseVersion=$(cat /etc/debian_version)
osReleaseVersionNo=$(sed 's/\..*//' /etc/debian_version)
elif [ -f /etc/redhat-release ]; then
osReleaseVersion=$(grep -oE '[0-9.]+' /etc/redhat-release)
else
# Fall back to uname, e.g. "Linux <version>", also works for BSD, etc.
osInfo=$(uname -s)
osReleaseVersionNo=$(uname -r)
fi
osReleaseVersionNoShort=$(echo $osReleaseVersionNo | sed 's/\..*//')
}
# 检测系统发行版代号
function getLinuxOSRelease(){
if [[ -f /etc/redhat-release ]]; then
osRelease="centos"
osSystemPackage="yum"
osSystemMdPath="/usr/lib/systemd/system/"
osReleaseVersionCodeName=""
elif cat /etc/issue | grep -Eqi "debian|raspbian"; then
osRelease="debian"
osSystemPackage="apt-get"
osSystemMdPath="/lib/systemd/system/"
osReleaseVersionCodeName="buster"
elif cat /etc/issue | grep -Eqi "ubuntu"; then
osRelease="ubuntu"
osSystemPackage="apt-get"
osSystemMdPath="/lib/systemd/system/"
osReleaseVersionCodeName="bionic"
elif cat /etc/issue | grep -Eqi "centos|red hat|redhat"; then
osRelease="centos"
osSystemPackage="yum"
osSystemMdPath="/usr/lib/systemd/system/"
osReleaseVersionCodeName=""
elif cat /proc/version | grep -Eqi "debian|raspbian"; then
osRelease="debian"
osSystemPackage="apt-get"
osSystemMdPath="/lib/systemd/system/"
osReleaseVersionCodeName="buster"
elif cat /proc/version | grep -Eqi "ubuntu"; then
osRelease="ubuntu"
osSystemPackage="apt-get"
osSystemMdPath="/lib/systemd/system/"
osReleaseVersionCodeName="bionic"
elif cat /proc/version | grep -Eqi "centos|red hat|redhat"; then
osRelease="centos"
osSystemPackage="yum"
osSystemMdPath="/usr/lib/systemd/system/"
osReleaseVersionCodeName=""
fi
getLinuxOSVersion
checkArchitecture
checkCPU
virt_check
[[ -z $(echo $SHELL|grep zsh) ]] && osSystemShell="bash" || osSystemShell="zsh"
echo "OS info: ${osInfo}, ${osRelease}, ${osReleaseVersion}, ${osReleaseVersionNo}, ${osReleaseVersionCodeName}, ${osSystemShell}, ${osSystemPackage}, ${osSystemMdPath}"
}
virt_check(){
# if hash ifconfig 2>/dev/null; then
# eth=$(ifconfig)
# fi
virtualx=$(dmesg) 2>/dev/null
if [ $(which dmidecode) ]; then
sys_manu=$(dmidecode -s system-manufacturer) 2>/dev/null
sys_product=$(dmidecode -s system-product-name) 2>/dev/null
sys_ver=$(dmidecode -s system-version) 2>/dev/null
else
sys_manu=""
sys_product=""
sys_ver=""
fi
if grep docker /proc/1/cgroup -qa; then
virtual="Docker"
elif grep lxc /proc/1/cgroup -qa; then
virtual="Lxc"
elif grep -qa container=lxc /proc/1/environ; then
virtual="Lxc"
elif [[ -f /proc/user_beancounters ]]; then
virtual="OpenVZ"
elif [[ "$virtualx" == *kvm-clock* ]]; then
virtual="KVM"
elif [[ "$cname" == *KVM* ]]; then
virtual="KVM"
elif [[ "$cname" == *QEMU* ]]; then
virtual="KVM"
elif [[ "$virtualx" == *"VMware Virtual Platform"* ]]; then
virtual="VMware"
elif [[ "$virtualx" == *"Parallels Software International"* ]]; then
virtual="Parallels"
elif [[ "$virtualx" == *VirtualBox* ]]; then
virtual="VirtualBox"
elif [[ -e /proc/xen ]]; then
virtual="Xen"
elif [[ "$sys_manu" == *"Microsoft Corporation"* ]]; then
if [[ "$sys_product" == *"Virtual Machine"* ]]; then
if [[ "$sys_ver" == *"7.0"* || "$sys_ver" == *"Hyper-V" ]]; then
virtual="Hyper-V"
else
virtual="Microsoft Virtual Machine"
fi
fi
else
virtual="Dedicated母鸡"
fi
}
function installSoftDownload(){
if [[ "${osRelease}" == "debian" || "${osRelease}" == "ubuntu" ]]; then
if [[ "${osRelease}" == "debian" ]]; then
echo "deb http://deb.debian.org/debian buster-backports main contrib non-free" > /etc/apt/sources.list.d/buster-backports.list
echo "deb-src http://deb.debian.org/debian buster-backports main contrib non-free" >> /etc/apt/sources.list.d/buster-backports.list
${sudoCmd} apt update
fi
if ! dpkg -l | grep -qw wget; then
${osSystemPackage} -y install wget curl git
fi
if ! dpkg -l | grep -qw bc; then
${osSystemPackage} -y install bc
# https://stackoverflow.com/questions/11116704/check-if-vt-x-is-activated-without-having-to-reboot-in-linux
${osSystemPackage} -y install cpu-checker
fi
if ! dpkg -l | grep -qw ca-certificates; then
${osSystemPackage} -y install ca-certificates dmidecode
update-ca-certificates
fi
elif [[ "${osRelease}" == "centos" ]]; then
if ! rpm -qa | grep -qw wget; then
${osSystemPackage} -y install wget curl git bc
fi
if ! rpm -qa | grep -qw bc; then
${osSystemPackage} -y install bc
fi
# 处理ca证书
if ! rpm -qa | grep -qw ca-certificates; then
${osSystemPackage} -y install ca-certificates dmidecode
update-ca-trust force-enable
fi
fi
}
function rebootSystem(){
if [ -z $1 ]; then
red "请检查上面的信息 是否有新内核版本, 老内核版本 ${osKernelVersionBackup} 是否已经卸载!"
echo
red "请注意检查 是否把新内核也误删卸载了, 无新内核 ${linuxKernelToInstallVersionFull} 不要重启, 可重新安装内核后再重启! "
fi
echo
read -p "是否立即重启? 请输入[Y/n]?" isRebootInput
isRebootInput=${isRebootInput:-Y}
if [[ $isRebootInput == [Yy] ]]; then
${sudoCmd} reboot
else
exit
fi
}
function promptContinueOpeartion(){
read -p "是否继续操作? 直接回车默认继续操作, 请输入[Y/n]:" isContinueInput
isContinueInput=${isContinueInput:-Y}
if [[ $isContinueInput == [Yy] ]]; then
echo ""
else
exit 1
fi
}
# https://stackoverflow.com/questions/4023830/how-to-compare-two-strings-in-dot-separated-version-format-in-bash
versionCompare () {
if [[ $1 == $2 ]]; then
return 0
fi
local IFS=.
local i ver1=($1) ver2=($2)
# fill empty fields in ver1 with zeros
for ((i=${#ver1[@]}; i<${#ver2[@]}; i++))
do
ver1[i]=0
done
for ((i=0; i<${#ver1[@]}; i++))
do
if [[ -z ${ver2[i]} ]]
then
# fill empty fields in ver2 with zeros
ver2[i]=0
fi
if ((10#${ver1[i]} > 10#${ver2[i]}))
then
return 1
fi
if ((10#${ver1[i]} < 10#${ver2[i]}))
then
return 2
fi
done
return 0
}
versionCompareWithOp () {
versionCompare $1 $2
case $? in
0) op='=';;
1) op='>';;
2) op='<';;
esac
if [[ $op != $3 ]]; then
# echo "Version Number Compare Fail: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'"
return 1
else
# echo "Version Number Compare Pass: '$1 $op $2'"
return 0
fi
}
osKernelVersionFull=$(uname -r)
osKernelVersionBackup=$(uname -r | awk -F "-" '{print $1}')
osKernelVersionShort=$(uname -r | cut -d- -f1 | awk -F "." '{print $1"."$2}')
osKernelBBRStatus=""
systemBBRRunningStatus="no"
systemBBRRunningStatusText=""
function listAvailableLinuxKernel(){
echo
green " =================================================="
green " 状态显示--当前可以被安装的 Linux 内核: "
if [[ "${osRelease}" == "centos" ]]; then
${sudoCmd} yum --disablerepo="*" --enablerepo="elrepo-kernel" list available | grep kernel
else
if [ -z $1 ]; then
${sudoCmd} apt-cache search linux-image
else
${sudoCmd} apt-cache search linux-image | grep $1
fi
fi
green " =================================================="
echo
}
function listInstalledLinuxKernel(){
echo
green " =================================================="
green " 状态显示--当前已安装的 Linux 内核: "
echo
if [[ "${osRelease}" == "debian" || "${osRelease}" == "ubuntu" ]]; then
dpkg --get-selections | grep linux-
# dpkg -l | grep linux-
# dpkg-query -l | grep linux-
# apt list --installed | grep linux-
echo
red " 如安装内核遇到kernel linux-image linux-headers 版本不一致问题, 请手动卸载已安装的kernel"
red " 卸载内核命令1 apt remove -y --purge linux-xxx名称"
red " 卸载内核命令2 apt autoremove -y --purge linux-xxx名称"
elif [[ "${osRelease}" == "centos" ]]; then
${sudoCmd} rpm -qa | grep kernel
echo
red " 如安装内核遇到kernel kernel-headers kernel-devel版本不一致问题, 请手动卸载已安装的kernel"
red " 卸载内核命令 rpm --nodeps -e kernel-xxx名称"
fi
green " =================================================="
echo
}
function showLinuxKernelInfoNoDisplay(){
isKernelSupportBBRVersion="4.9"
if versionCompareWithOp "${isKernelSupportBBRVersion}" "${osKernelVersionShort}" ">"; then
echo
else
osKernelBBRStatus="BBR"
fi
if [[ ${osKernelVersionFull} == *"bbrplus"* ]]; then
osKernelBBRStatus="BBR Plus"
elif [[ ${osKernelVersionFull} == *"xanmod"* ]]; then
osKernelBBRStatus="BBR 和 BBR2"
fi
net_congestion_control=`cat /proc/sys/net/ipv4/tcp_congestion_control | awk '{print $1}'`
net_qdisc=`cat /proc/sys/net/core/default_qdisc | awk '{print $1}'`
net_ecn=`cat /proc/sys/net/ipv4/tcp_ecn | awk '{print $1}'`
if [[ ${osKernelVersionBackup} == *"4.14.129"* ]]; then
# isBBREnabled=$(grep "net.ipv4.tcp_congestion_control" /etc/sysctl.conf | awk -F "=" '{print $2}')
# isBBREnabled=$(sysctl net.ipv4.tcp_available_congestion_control | awk -F "=" '{print $2}')
isBBRTcpEnabled=$(lsmod | grep "bbr" | awk '{print $1}')
isBBRPlusTcpEnabled=$(lsmod | grep "bbrplus" | awk '{print $1}')
isBBR2TcpEnabled=$(lsmod | grep "bbr2" | awk '{print $1}')
else
isBBRTcpEnabled=$(sysctl net.ipv4.tcp_congestion_control | grep "bbr" | awk -F "=" '{print $2}' | awk '{$1=$1;print}')
isBBRPlusTcpEnabled=$(sysctl net.ipv4.tcp_congestion_control | grep "bbrplus" | awk -F "=" '{print $2}' | awk '{$1=$1;print}')
isBBR2TcpEnabled=$(sysctl net.ipv4.tcp_congestion_control | grep "bbr2" | awk -F "=" '{print $2}' | awk '{$1=$1;print}')
fi
if [[ ${net_ecn} == "1" ]]; then
systemECNStatusText="已开启"
elif [[ ${net_ecn} == "0" ]]; then
systemECNStatusText="已关闭"
elif [[ ${net_ecn} == "2" ]]; then
systemECNStatusText="只对入站请求开启(默认值)"
else
systemECNStatusText=""
fi
if [[ ${net_congestion_control} == "bbr" ]]; then
if [[ ${isBBRTcpEnabled} == *"bbr"* ]]; then
systemBBRRunningStatus="bbr"
systemBBRRunningStatusText="BBR 已启动成功"
else
systemBBRRunningStatusText="BBR 启动失败"
fi
elif [[ ${net_congestion_control} == "bbrplus" ]]; then
if [[ ${isBBRPlusTcpEnabled} == *"bbrplus"* ]]; then
systemBBRRunningStatus="bbrplus"
systemBBRRunningStatusText="BBR Plus 已启动成功"
else
systemBBRRunningStatusText="BBR Plus 启动失败"
fi
elif [[ ${net_congestion_control} == "bbr2" ]]; then
if [[ ${isBBR2TcpEnabled} == *"bbr2"* ]]; then
systemBBRRunningStatus="bbr2"
systemBBRRunningStatusText="BBR2 已启动成功"
else
systemBBRRunningStatusText="BBR2 启动失败"
fi
else
systemBBRRunningStatusText="未启动加速模块"
fi
}
function showLinuxKernelInfo(){
# https://stackoverflow.com/questions/8654051/how-to-compare-two-floating-point-numbers-in-bash
# https://stackoverflow.com/questions/229551/how-to-check-if-a-string-contains-a-substring-in-bash
isKernelSupportBBRVersion="4.9"
green " =================================================="
green " 状态显示--当前Linux 内核版本: ${osKernelVersionShort} , $(uname -r) "
if versionCompareWithOp "${isKernelSupportBBRVersion}" "${osKernelVersionShort}" ">"; then
green " 当前系统内核低于4.9, 不支持开启 BBR "
else
green " 当前系统内核高于4.9, 支持开启 BBR, ${systemBBRRunningStatusText}"
fi
if [[ ${osKernelVersionFull} == *"xanmod"* ]]; then
green " 当前系统内核已支持开启 BBR2, ${systemBBRRunningStatusText}"
else
green " 当前系统内核不支持开启 BBR2"
fi
if [[ ${osKernelVersionFull} == *"bbrplus"* ]]; then
green " 当前系统内核已支持开启 BBR Plus, ${systemBBRRunningStatusText}"
else
green " 当前系统内核不支持开启 BBR Plus"
fi
# sysctl net.ipv4.tcp_available_congestion_control 返回值 net.ipv4.tcp_available_congestion_control = bbr cubic reno 或 reno cubic bbr
# sysctl net.ipv4.tcp_congestion_control 返回值 net.ipv4.tcp_congestion_control = bbr
# sysctl net.core.default_qdisc 返回值 net.core.default_qdisc = fq
# lsmod | grep bbr 返回值 tcp_bbr 20480 3 或 tcp_bbr 20480 1 注意:并不是所有的 VPS 都会有此返回值,若没有也属正常。
# isFlagBbr=$(sysctl net.ipv4.tcp_congestion_control | awk '{print $3}')
# if [[ (${isFlagBbr} == *"bbr"*) && (${isFlagBbr} != *"bbrplus"*) && (${isFlagBbr} != *"bbr2"*) ]]; then
# green " 状态显示--是否开启BBR: 已开启 "
# else
# green " 状态显示--是否开启BBR: 未开启 "
# fi
# if [[ ${isFlagBbr} == *"bbrplus"* ]]; then
# green " 状态显示--是否开启BBR Plus: 已开启 "
# else
# green " 状态显示--是否开启BBR Plus: 未开启 "
# fi
# if [[ ${isFlagBbr} == *"bbr2"* ]]; then
# green " 状态显示--是否开启BBR2: 已开启 "
# else
# green " 状态显示--是否开启BBR2: 未开启 "
# fi
green " =================================================="
echo
}
function enableBBRSysctlConfig(){
# https://hostloc.com/thread-644985-1-1.html
# 优质线路用5.5+cake和原版bbr带宽跑的更足,不过cake的话就算高峰也不会像原版bbr那样跑不动,相比plus能慢些,但是区别不大,
# bbr plus的话美西或者一些延迟高的,用起来更好,锐速针对丢包高的有奇效
# 带宽大,并且延迟低不丢包的话5.5+cake在我这比较好,延迟高用plus更好,丢包多锐速最好. 一般130ms以下用cake不错,以上的话用plus更好些
# https://github.com/xanmod/linux/issues/26
# 说白了 bbrplus 就是改了点东西,然后那部分修改在 5.1 内核里合并进去了, 5.1 及以上的内核里自带的 bbr 已经包含了所谓的 bbrplus 的修改。
# PS:bbr 是一直在修改的,比如说 5.0 内核的 bbr,4.15 内核的 bbr 和 4.9 内核的 bbr 其实都是不一样的
# https://sysctl-explorer.net/net/ipv4/tcp_ecn/
removeBbrSysctlConfig
currentBBRText="bbr"
currentQueueText="fq"
currentECNValue="2"
currentECNText=""
if [ $1 = "bbrplus" ]; then
currentBBRText="bbrplus"
else
echo
echo " 请选择开启 (1) BBR 还是 (2) BBR2 网络加速 "
red " 选择 1 BBR 需要内核在 4.9 以上"
red " 选择 2 BBR2 需要内核为 XanMod "
read -p "请选择? 直接回车默认选1 BBR, 请输入[1/2]:" BBRTcpInput
BBRTcpInput=${BBRTcpInput:-1}
if [[ $BBRTcpInput == [2] ]]; then
if [[ ${osKernelVersionFull} == *"xanmod"* ]]; then
currentBBRText="bbr2"
echo
echo " 请选择是否开启 ECN, (1) 关闭 (2) 开启 (3) 仅对入站请求开启 "
red " 注意: 开启 ECN 可能会造成网络设备无法访问"
read -p "请选择? 直接回车默认选1 关闭ECN, 请输入[1/2]:" ECNTcpInput
ECNTcpInput=${ECNTcpInput:-1}
if [[ $ECNTcpInput == [2] ]]; then
currentECNValue="1"
currentECNText="+ ECN"
elif [[ $ECNTcpInput == [3] ]]; then
currentECNValue="2"
else
currentECNValue="0"
fi
else
echo
red " 当前系统内核没有安装 XanMod 内核, 无法开启BBR2, 改为开启BBR"
echo
currentBBRText="bbr"
fi
else
currentBBRText="bbr"
fi
fi
echo
echo " 请选择队列算法 (1) FQ, (2) FQ-Codel, (3) FQ-PIE, (4) CAKE "
red " 选择 2 FQ-Codel 队列算法 需要内核在 4.13 以上"
red " 选择 3 FQ-PIE 队列算法 需要内核在 5.6 以上"
red " 选择 4 CAKE 队列算法 需要内核在 5.5 以上"
read -p "请选择队列算法? 直接回车默认选1 FQ, 请输入[1/2/3/4]:" BBRQueueInput
BBRQueueInput=${BBRQueueInput:-1}
if [[ $BBRQueueInput == [2] ]]; then
currentQueueText="fq_codel"
elif [[ $BBRQueueInput == [3] ]]; then
currentQueueText="fq_pie"
elif [[ $BBRQueueInput == [4] ]]; then
currentQueueText="cake"
else
currentQueueText="fq"
fi
echo "net.core.default_qdisc=${currentQueueText}" >> /etc/sysctl.conf
echo "net.ipv4.tcp_congestion_control=${currentBBRText}" >> /etc/sysctl.conf
echo "net.ipv4.tcp_ecn=${currentECNValue}" >> /etc/sysctl.conf
isSysctlText=$(sysctl -p 2>&1 | grep "No such file")
echo
if [[ -z "$isSysctlText" ]]; then
green " 已成功开启 ${currentBBRText} + ${currentQueueText} ${currentECNText} "
else
green " 已成功开启 ${currentBBRText} ${currentECNText}"
red " 但当前内核版本过低, 开启队列算法 ${currentQueueText} 失败! "
red "请重新运行脚本, 选择'2 开启 BBR 加速'后, 务必再选择 (1)FQ 队列算法 !"
fi
echo
read -p "是否优化系统网络配置? 直接回车默认优化, 请输入[Y/n]:" isOptimizingSystemInput
isOptimizingSystemInput=${isOptimizingSystemInput:-Y}
if [[ $isOptimizingSystemInput == [Yy] ]]; then
addOptimizingSystemConfig "cancel"
else
echo
echo "sysctl -p"
echo
sysctl -p
echo
fi
}
# 卸载 bbr+锐速 配置
function removeBbrSysctlConfig(){
sed -i '/net.core.default_qdisc/d' /etc/sysctl.conf
sed -i '/net.ipv4.tcp_congestion_control/d' /etc/sysctl.conf
sed -i '/net.ipv4.tcp_ecn/d' /etc/sysctl.conf
if [[ -e /appex/bin/lotServer.sh ]]; then
bash <(wget --no-check-certificate -qO- https://git.io/lotServerInstall.sh) uninstall
fi
}
function removeOptimizingSystemConfig(){
removeBbrSysctlConfig
sed -i '/fs.file-max/d' /etc/sysctl.conf
sed -i '/fs.inotify.max_user_instances/d' /etc/sysctl.conf
sed -i '/net.ipv4.tcp_syncookies/d' /etc/sysctl.conf
sed -i '/net.ipv4.tcp_fin_timeout/d' /etc/sysctl.conf
sed -i '/net.ipv4.tcp_tw_reuse/d' /etc/sysctl.conf
sed -i '/net.ipv4.ip_local_port_range/d' /etc/sysctl.conf
sed -i '/net.ipv4.tcp_max_syn_backlog/d' /etc/sysctl.conf
sed -i '/net.ipv4.tcp_max_tw_buckets/d' /etc/sysctl.conf
sed -i '/net.ipv4.route.gc_timeout/d' /etc/sysctl.conf
sed -i '/net.ipv4.tcp_syn_retries/d' /etc/sysctl.conf
sed -i '/net.ipv4.tcp_synack_retries/d' /etc/sysctl.conf
sed -i '/net.core.somaxconn/d' /etc/sysctl.conf
sed -i '/net.core.netdev_max_backlog/d' /etc/sysctl.conf
sed -i '/net.ipv4.tcp_timestamps/d' /etc/sysctl.conf
sed -i '/net.ipv4.tcp_max_orphans/d' /etc/sysctl.conf
# sed -i '/net.ipv4.ip_forward/d' /etc/sysctl.conf
sed -i '/1000000/d' /etc/security/limits.conf
sed -i '/1000000/d' /etc/profile
echo
green " 已删除当前系统的网络优化配置 "
echo
}
function addOptimizingSystemConfig(){
# https://ustack.io/2019-11-21-Linux%E5%87%A0%E4%B8%AA%E9%87%8D%E8%A6%81%E7%9A%84%E5%86%85%E6%A0%B8%E9%85%8D%E7%BD%AE.html
# https://www.cnblogs.com/xkus/p/7463135.html
# 优化系统配置
if grep -q "1000000" "/etc/profile"; then
echo
green " 系统网络配置 已经优化过, 不需要再次优化 "
echo
sysctl -p
echo
exit
fi
if [ -z $1 ]; then
removeOptimizingSystemConfig
fi
echo
green " 开始准备 优化系统网络配置 "
cat >> /etc/sysctl.conf <<-EOF
fs.file-max = 1000000
fs.inotify.max_user_instances = 8192
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_tw_reuse = 1
net.ipv4.ip_local_port_range = 1024 65000
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_max_tw_buckets = 6000
net.ipv4.route.gc_timeout = 100
net.ipv4.tcp_syn_retries = 1
net.ipv4.tcp_synack_retries = 1
net.core.somaxconn = 32768
net.core.netdev_max_backlog = 32768
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_max_orphans = 32768
# forward ipv4
#net.ipv4.ip_forward = 1
EOF
cat >> /etc/security/limits.conf <<-EOF
* soft nofile 1000000
* hard nofile 1000000
EOF
echo "ulimit -SHn 1000000" >> /etc/profile
source /etc/profile
echo
sysctl -p
echo
green " 已完成 系统网络配置的优化 "
echo
rebootSystem "noinfo"
}
function startIpv4(){
cat >> /etc/sysctl.conf <<-EOF
net.ipv4.tcp_retries2 = 8
net.ipv4.tcp_slow_start_after_idle = 0
# forward ipv4
net.ipv6.conf.all.disable_ipv6 = 0
net.ipv6.conf.default.disable_ipv6 = 0
net.ipv4.ip_forward = 1
net.ipv6.conf.all.forwarding = 1
EOF
}
function Enable_IPv6_Support() {
if [[ $(sysctl -a | grep 'disable_ipv6.*=.*1') || $(cat /etc/sysctl.{conf,d/*} | grep 'disable_ipv6.*=.*1') ]]; then
sed -i '/disable_ipv6/d' /etc/sysctl.{conf,d/*}
echo 'net.ipv6.conf.all.disable_ipv6 = 0' >/etc/sysctl.d/ipv6.conf
sysctl -w net.ipv6.conf.all.disable_ipv6=0
fi
}
isInstallFromRepo="no"
userHomePath="${HOME}/download_linux_kernel"
linuxKernelByUser="elrepo"
linuxKernelToBBRType=""
linuxKernelToInstallVersion="5.10"
linuxKernelToInstallVersionFull=""
elrepo_kernel_name="kernel-ml"
elrepo_kernel_version="5.4.110"
altarch_kernel_name="kernel"
altarch_kernel_version="5.4.105"
function downloadFile(){
tempUrl=$1
tempFilename=$(echo "${tempUrl##*/}")
echo "${userHomePath}/${linuxKernelToInstallVersionFull}/${tempFilename}"
if [ -f "${userHomePath}/${linuxKernelToInstallVersionFull}/${tempFilename}" ]; then
green "文件已存在, 不需要下载, 文件原下载地址: $1 "
else
green "文件下载中... 下载地址: $1 "
wget -N --no-check-certificate -P ${userHomePath}/${linuxKernelToInstallVersionFull} $1
fi
echo
}
function installKernel(){
if [ "${linuxKernelToBBRType}" = "bbrplus" ]; then
getVersionBBRPlus
fi
if [[ "${osRelease}" == "debian" || "${osRelease}" == "ubuntu" ]]; then
installDebianUbuntuKernel
elif [[ "${osRelease}" == "centos" ]]; then
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
if [ "${linuxKernelToBBRType}" = "xanmod" ]; then
red " xanmod 内核不支持 Centos 系统安装"
exit 255
fi
if [ "${isInstallFromRepo}" = "yes" ]; then
getLatestCentosKernelVersion
installCentosKernelFromRepo
else
if [ "${linuxKernelToBBRType}" = "bbrplus" ]; then
echo
else
getLatestCentosKernelVersion "manual"
fi
installCentosKernelManual
fi
fi
}
function getVersionBBRPlus(){
if [ "${linuxKernelToInstallVersion}" = "5.14" ]; then
bbrplusKernelVersion=$(getGithubLatestReleaseVersionBBRPlus "UJX6N/bbrplus-5.14")
elif [ "${linuxKernelToInstallVersion}" = "5.10" ]; then
bbrplusKernelVersion=$(getGithubLatestReleaseVersionBBRPlus "UJX6N/bbrplus-5.10")
elif [ "${linuxKernelToInstallVersion}" = "5.4" ]; then
bbrplusKernelVersion=$(getGithubLatestReleaseVersionBBRPlus "UJX6N/bbrplus-5.4")
elif [ "${linuxKernelToInstallVersion}" = "4.19" ]; then
bbrplusKernelVersion=$(getGithubLatestReleaseVersionBBRPlus "UJX6N/bbrplus-4.19")
elif [ "${linuxKernelToInstallVersion}" = "4.14" ]; then
bbrplusKernelVersion=$(getGithubLatestReleaseVersionBBRPlus "UJX6N/bbrplus")
elif [ "${linuxKernelToInstallVersion}" = "4.9" ]; then
bbrplusKernelVersion=$(getGithubLatestReleaseVersionBBRPlus "UJX6N/bbrplus-4.9")
fi
echo
green "UJX6N 编译的 最新的Linux bbrplus 内核版本号为 ${bbrplusKernelVersion}"
echo
}
function getGithubLatestReleaseVersionBBRPlus(){
wget --no-check-certificate -qO- https://api.github.com/repos/$1/tags | grep 'name' | cut -d\" -f4 | head -1 | cut -d- -f1
# wget --no-check-certificate -qO- https://api.github.com/repos/UJX6N/bbrplus-5.14/tags | grep 'name' | cut -d\" -f4 | head -1 | cut -d- -f1
}
function getLatestCentosKernelVersion(){
# https://stackoverflow.com/questions/4988155/is-there-a-bash-command-that-can-tell-the-size-of-a-shell-variable
elrepo_kernel_version_lt_array=($(wget -qO- https://elrepo.org/linux/kernel/el8/x86_64/RPMS | awk -F'\"kernel-lt-' '/>kernel-lt-[4-9]./{print $2}' | cut -d- -f1 | sort -V))
# echo ${elrepo_kernel_version_lt_array[@]}
echo
if [ ${#elrepo_kernel_version_lt_array[@]} -eq 0 ]; then
red " 无法获取到 Centos elrepo 源的最新的Linux 内核 kernel-lt 版本号 "
else
# echo ${elrepo_kernel_version_lt_array[${#elrepo_kernel_version_lt_array[@]} - 1]}
elrepo_kernel_version_lt=${elrepo_kernel_version_lt_array[${#elrepo_kernel_version_lt_array[@]} - 1]}
green "Centos elrepo 源的最新的Linux 内核 kernel-lt 版本号为 ${elrepo_kernel_version_lt}"
fi
if [ -z $1 ]; then
elrepo_kernel_version_ml_array=($(wget -qO- https://elrepo.org/linux/kernel/el8/x86_64/RPMS | awk -F'>kernel-ml-' '/>kernel-ml-[4-9]./{print $2}' | cut -d- -f1 | sort -V))
if [ ${#elrepo_kernel_version_ml_array[@]} -eq 0 ]; then
red " 无法获取到 Centos elrepo 源的最新的Linux 内核 kernel-ml 版本号 "
else
elrepo_kernel_version_ml=${elrepo_kernel_version_ml_array[-1]}
green "Centos elrepo 源的最新的Linux 内核 kernel-ml 版本号为 ${elrepo_kernel_version_ml}"
fi
else
elrepo_kernel_version_ml_teddysun_ftp_array=($(wget --no-check-certificate -qO- https://fr1.teddyvps.com/kernel/el8 | awk -F'>kernel-ml-' '/>kernel-ml-[4-9]./{print $2}' | cut -d- -f1 | sort -V))
elrepo_kernel_version_ml_teddysun_ftp_array_lts=($(wget --no-check-certificate -qO- https://fr1.teddyvps.com/kernel/el8 | awk -F'>kernel-ml-' '/>kernel-ml-[4-9]./{print $2}' | grep -v "elrepo" | cut -d- -f1 | sort -V))
if [ ${#elrepo_kernel_version_ml_teddysun_ftp_array_lts[@]} -eq 0 ]; then
red " 无法获取到由 Teddysun 编译的 Centos 最新的Linux 5.10 内核 kernel-ml 版本号 "
else
elrepo_kernel_version_ml=${elrepo_kernel_version_ml_teddysun_ftp_array[-1]}
elrepo_kernel_version_ml_Teddysun_number_temp=$(echo ${elrepo_kernel_version_ml} | grep -oe "\.[0-9]*\." | grep -oe "[0-9]*" )
elrepo_kernel_version_ml_Teddysun_latest_version_middle=$((elrepo_kernel_version_ml_Teddysun_number_temp-1))
elrepo_kernel_version_ml_Teddysun_latest_version="5.${elrepo_kernel_version_ml_Teddysun_latest_version_middle}"
# https://stackoverflow.com/questions/229551/how-to-check-if-a-string-contains-a-substring-in-bash
for ver in ${elrepo_kernel_version_ml_teddysun_ftp_array_lts[@]}; do
if [[ ${ver} == *"5.10"* ]]; then
# echo "符合所选版本的Linux 5.10 内核版本: ${ver}"
elrepo_kernel_version_ml_Teddysun510=${ver}
fi
if [[ ${ver} == *"${elrepo_kernel_version_ml_Teddysun_latest_version}"* ]]; then
# echo "符合所选版本的Linux 内核版本: ${ver}, ${elrepo_kernel_version_ml_Teddysun_latest_version}"
elrepo_kernel_version_ml_Teddysun_latest=${ver}
fi
done
green "Centos elrepo 源的最新的Linux 内核 kernel-ml 版本号为 ${elrepo_kernel_version_ml}"
green "由 Teddysun 编译的 Centos 最新的Linux 5.10 内核 kernel-ml 版本号为 ${elrepo_kernel_version_ml_Teddysun510}"
green "由 Teddysun 编译的 Centos 最新的Linux 5.xx 内核 kernel-ml 版本号为 ${elrepo_kernel_version_ml_Teddysun_latest}"
fi
fi
echo
}
function installCentosKernelFromRepo(){
green " =================================================="
green " 开始通过 elrepo 源安装 linux 内核, 不支持Centos6 "
green " =================================================="
if [ -n "${osReleaseVersionNoShort}" ]; then
if [ "${linuxKernelToInstallVersion}" = "5.4" ]; then
elrepo_kernel_name="kernel-lt"
elrepo_kernel_version=${elrepo_kernel_version_lt}
else
elrepo_kernel_name="kernel-ml"
elrepo_kernel_version=${elrepo_kernel_version_ml}
fi
if [ "${osKernelVersionBackup}" = "${elrepo_kernel_version}" ]; then
red "当前系统内核版本已经是 ${osKernelVersionBackup} 无需安装! "
promptContinueOpeartion
fi
linuxKernelToInstallVersionFull=${elrepo_kernel_version}
if [ "${osReleaseVersionNoShort}" -eq 7 ]; then
# https://computingforgeeks.com/install-linux-kernel-5-on-centos-7/
# https://elrepo.org/linux/kernel/
# https://elrepo.org/linux/kernel/el7/x86_64/RPMS/
${sudoCmd} yum install -y yum-plugin-fastestmirror
${sudoCmd} yum install -y https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
elif [ "${osReleaseVersionNoShort}" -eq 8 ]; then
# https://elrepo.org/linux/kernel/el8/x86_64/RPMS/
${sudoCmd} yum install -y yum-plugin-fastestmirror
${sudoCmd} yum install -y https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm
else
green " =================================================="
red " 不支持 Centos 7和8 以外的其他版本 安装 linux 内核"
green " =================================================="
exit 255
fi
removeCentosKernelMulti
listAvailableLinuxKernel
echo
green " =================================================="
green " 开始安装 linux 内核版本: ${linuxKernelToInstallVersionFull}"
echo
${sudoCmd} yum -y --enablerepo=elrepo-kernel install ${elrepo_kernel_name}
${sudoCmd} yum -y --enablerepo=elrepo-kernel install ${elrepo_kernel_name}-{devel,headers,tools,tools-libs}
green " =================================================="
green " 安装 linux 内核 ${linuxKernelToInstallVersionFull} 成功! "
red " 请根据以下信息 检查新内核是否安装成功,无新内核不要重启! "
green " =================================================="
echo
showLinuxKernelInfo
listInstalledLinuxKernel
removeCentosKernelMulti "kernel"
listInstalledLinuxKernel
rebootSystem
fi
}
function installCentosKernelManual(){
green " =================================================="
green " 开始手动安装 linux 内核, 不支持Centos6 "
green " =================================================="
echo
yum install -y linux-firmware
mkdir -p ${userHomePath}
cd ${userHomePath}
kernelVersionFirstletter=${linuxKernelToInstallVersion:0:1}
echo
if [ "${linuxKernelToBBRType}" = "bbrplus" ]; then
linuxKernelByUser="UJX6N"
if [ "${linuxKernelToInstallVersion}" = "4.14.129" ]; then
linuxKernelByUser="cx9208"
fi
green " 准备从 ${linuxKernelByUser} github 网站下载 bbrplus ${linuxKernelToInstallVersion} 的linux内核并安装 "
else
linuxKernelByUserTeddysun=""
if [ "${kernelVersionFirstletter}" = "5" ]; then
linuxKernelByUser="elrepo"
if [[ "${linuxKernelToInstallVersion}" == "5.10" || "${linuxKernelToInstallVersion}" == "5.15" || "${linuxKernelToInstallVersion}" == "5.16" ]]; then
linuxKernelByUserTeddysun="Teddysun"
fi
else
linuxKernelByUser="altarch"
fi
if [ "${linuxKernelByUserTeddysun}" = "Teddysun" ]; then
green " 准备从 Teddysun 网站下载 linux ${linuxKernelByUser} 内核并安装 "
else
green " 准备从 ${linuxKernelByUser} 网站下载linux内核并安装 "
fi
fi
echo
if [ "${linuxKernelByUser}" = "elrepo" ]; then
# elrepo
if [ "${linuxKernelToInstallVersion}" = "5.4" ]; then
elrepo_kernel_name="kernel-lt"
elrepo_kernel_version=${elrepo_kernel_version_lt}
elrepo_kernel_filename="elrepo."
ELREPODownloadUrl="https://elrepo.org/linux/kernel/el${osReleaseVersionNoShort}/x86_64/RPMS"
# https://elrepo.org/linux/kernel/el7/x86_64/RPMS/
# https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-lt-5.4.105-1.el7.elrepo.x86_64.rpm
# https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-lt-tools-5.4.109-1.el7.elrepo.x86_64.rpm
# https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-lt-tools-libs-5.4.109-1.el7.elrepo.x86_64.rpm
elif [ "${linuxKernelToInstallVersion}" = "5.10" ]; then
elrepo_kernel_name="kernel-ml"
elrepo_kernel_version=${elrepo_kernel_version_ml_Teddysun510}
elrepo_kernel_filename=""
ELREPODownloadUrl="https://dl.lamp.sh/kernel/el${osReleaseVersionNoShort}"
# https://dl.lamp.sh/kernel/el7/kernel-ml-5.10.23-1.el7.x86_64.rpm
# https://dl.lamp.sh/kernel/el7/kernel-ml-5.10.37-1.el7.x86_64.rpm
# https://dl.lamp.sh/kernel/el8/kernel-ml-5.10.27-1.el8.x86_64.rpm
# https://dl.lamp.sh/kernel/el8/kernel-ml-5.10.27-1.el8.x86_64.rpm
elif [ "${linuxKernelToInstallVersion}" = "${elrepo_kernel_version_ml_Teddysun_latest_version}" ]; then
elrepo_kernel_name="kernel-ml"
elrepo_kernel_version=${elrepo_kernel_version_ml_Teddysun_latest}
elrepo_kernel_filename=""
ELREPODownloadUrl="https://fr1.teddyvps.com/kernel/el${osReleaseVersionNoShort}"
# https://fr1.teddyvps.com/kernel/el7/kernel-ml-5.12.14-1.el7.x86_64.rpm
else
elrepo_kernel_name="kernel-ml"
elrepo_kernel_version=${elrepo_kernel_version_ml}
elrepo_kernel_filename="elrepo."
ELREPODownloadUrl="https://fr1.teddyvps.com/kernel/el${osReleaseVersionNoShort}"
# https://fr1.teddyvps.com/kernel/el7/kernel-ml-5.13.0-1.el7.elrepo.x86_64.rpm
fi
linuxKernelToInstallVersionFull=${elrepo_kernel_version}
mkdir -p ${userHomePath}/${linuxKernelToInstallVersionFull}
cd ${userHomePath}/${linuxKernelToInstallVersionFull}
echo
echo "+++++++++++ elrepo_kernel_version ${elrepo_kernel_version}"
echo
if [ "${osReleaseVersionNoShort}" -eq 7 ]; then
downloadFile ${ELREPODownloadUrl}/${elrepo_kernel_name}-${elrepo_kernel_version}-1.el7.${elrepo_kernel_filename}x86_64.rpm
downloadFile ${ELREPODownloadUrl}/${elrepo_kernel_name}-devel-${elrepo_kernel_version}-1.el7.${elrepo_kernel_filename}x86_64.rpm
downloadFile ${ELREPODownloadUrl}/${elrepo_kernel_name}-headers-${elrepo_kernel_version}-1.el7.${elrepo_kernel_filename}x86_64.rpm
downloadFile ${ELREPODownloadUrl}/${elrepo_kernel_name}-tools-${elrepo_kernel_version}-1.el7.${elrepo_kernel_filename}x86_64.rpm
downloadFile ${ELREPODownloadUrl}/${elrepo_kernel_name}-tools-libs-${elrepo_kernel_version}-1.el7.${elrepo_kernel_filename}x86_64.rpm
else
downloadFile ${ELREPODownloadUrl}/${elrepo_kernel_name}-${elrepo_kernel_version}-1.el8.${elrepo_kernel_filename}x86_64.rpm
downloadFile ${ELREPODownloadUrl}/${elrepo_kernel_name}-devel-${elrepo_kernel_version}-1.el8.${elrepo_kernel_filename}x86_64.rpm
downloadFile ${ELREPODownloadUrl}/${elrepo_kernel_name}-headers-${elrepo_kernel_version}-1.el8.${elrepo_kernel_filename}x86_64.rpm
downloadFile ${ELREPODownloadUrl}/${elrepo_kernel_name}-core-${elrepo_kernel_version}-1.el8.${elrepo_kernel_filename}x86_64.rpm
downloadFile ${ELREPODownloadUrl}/${elrepo_kernel_name}-modules-${elrepo_kernel_version}-1.el8.${elrepo_kernel_filename}x86_64.rpm
downloadFile ${ELREPODownloadUrl}/${elrepo_kernel_name}-tools-${elrepo_kernel_version}-1.el8.${elrepo_kernel_filename}x86_64.rpm
downloadFile ${ELREPODownloadUrl}/${elrepo_kernel_name}-tools-libs-${elrepo_kernel_version}-1.el8.${elrepo_kernel_filename}x86_64.rpm
fi
removeCentosKernelMulti
echo
green " =================================================="
green " 开始安装 linux 内核版本: ${linuxKernelToInstallVersionFull}"
echo
if [ "${osReleaseVersionNoShort}" -eq 8 ]; then
rpm -ivh --force --nodeps ${elrepo_kernel_name}-core-${elrepo_kernel_version}-*.rpm
fi
rpm -ivh --force --nodeps ${elrepo_kernel_name}-${elrepo_kernel_version}-*.rpm
rpm -ivh --force --nodeps ${elrepo_kernel_name}-*.rpm
elif [ "${linuxKernelByUser}" = "altarch" ]; then
# altarch
if [ "${linuxKernelToInstallVersion}" = "4.14" ]; then
altarch_kernel_version="4.14.119-200"
altarchDownloadUrl="https://vault.centos.org/altarch/7.6.1810/kernel/x86_64/Packages"
# https://vault.centos.org/altarch/7.6.1810/kernel/x86_64/Packages/kernel-4.14.119-200.el7.x86_64.rpm
elif [ "${linuxKernelToInstallVersion}" = "4.19" ]; then
altarch_kernel_version="4.19.113-300"
altarchDownloadUrl="https://vault.centos.org/altarch/7.8.2003/kernel/x86_64/Packages"
# https://vault.centos.org/altarch/7.8.2003/kernel/x86_64/Packages/kernel-4.19.113-300.el7.x86_64.rpm
else
altarch_kernel_version="5.4.105"
altarchDownloadUrl="http://mirror.centos.org/altarch/7/kernel/x86_64/Packages"
# http://mirror.centos.org/altarch/7/kernel/x86_64/Packages/kernel-5.4.96-200.el7.x86_64.rpm
fi
linuxKernelToInstallVersionFull=$(echo ${altarch_kernel_version} | cut -d- -f1)
mkdir -p ${userHomePath}/${linuxKernelToInstallVersionFull}
cd ${userHomePath}/${linuxKernelToInstallVersionFull}
if [ "${osReleaseVersionNoShort}" -eq 7 ]; then
if [ "$kernelVersionFirstletter" = "5" ]; then
# http://mirror.centos.org/altarch/7/kernel/x86_64/Packages/
downloadFile ${altarchDownloadUrl}/${altarch_kernel_name}-${altarch_kernel_version}-200.el7.x86_64.rpm
downloadFile ${altarchDownloadUrl}/${altarch_kernel_name}-core-${altarch_kernel_version}-200.el7.x86_64.rpm
downloadFile ${altarchDownloadUrl}/${altarch_kernel_name}-devel-${altarch_kernel_version}-200.el7.x86_64.rpm
downloadFile ${altarchDownloadUrl}/${altarch_kernel_name}-headers-${altarch_kernel_version}-200.el7.x86_64.rpm
downloadFile ${altarchDownloadUrl}/${altarch_kernel_name}-modules-${altarch_kernel_version}-200.el7.x86_64.rpm
downloadFile ${altarchDownloadUrl}/${altarch_kernel_name}-tools-${altarch_kernel_version}-200.el7.x86_64.rpm
downloadFile ${altarchDownloadUrl}/${altarch_kernel_name}-tools-libs-${altarch_kernel_version}-200.el7.x86_64.rpm
else
# https://vault.centos.org/altarch/7.6.1810/kernel/x86_64/Packages/
# https://vault.centos.org/altarch/7.6.1810/kernel/x86_64/Packages/kernel-4.14.119-200.el7.x86_64.rpm
# https://vault.centos.org/altarch/7.8.2003/kernel/x86_64/Packages/
# https://vault.centos.org/altarch/7.8.2003/kernel/i386/Packages/kernel-4.19.113-300.el7.i686.rpm
# https://vault.centos.org/altarch/7.8.2003/kernel/x86_64/Packages/kernel-4.19.113-300.el7.x86_64.rpm
# http://ftp.iij.ad.jp/pub/linux/centos-vault/altarch/7.8.2003/kernel/i386/Packages/kernel-4.19.113-300.el7.i686.rpm
downloadFile ${altarchDownloadUrl}/${altarch_kernel_name}-${altarch_kernel_version}.el7.x86_64.rpm
downloadFile ${altarchDownloadUrl}/${altarch_kernel_name}-core-${altarch_kernel_version}.el7.x86_64.rpm
downloadFile ${altarchDownloadUrl}/${altarch_kernel_name}-devel-${altarch_kernel_version}.el7.x86_64.rpm
downloadFile ${altarchDownloadUrl}/${altarch_kernel_name}-headers-${altarch_kernel_version}.el7.x86_64.rpm
downloadFile ${altarchDownloadUrl}/${altarch_kernel_name}-modules-${altarch_kernel_version}.el7.x86_64.rpm
downloadFile ${altarchDownloadUrl}/${altarch_kernel_name}-tools-${altarch_kernel_version}.el7.x86_64.rpm
downloadFile ${altarchDownloadUrl}/${altarch_kernel_name}-tools-libs-${altarch_kernel_version}.el7.x86_64.rpm
fi
else
red "从 altarch 源没有找到 Centos 8 的 ${linuxKernelToInstallVersion} Kernel "
exit 255
fi
removeCentosKernelMulti
echo
green " =================================================="
green " 开始安装 linux 内核版本: ${linuxKernelToInstallVersionFull}"
echo
rpm -ivh --force --nodeps ${altarch_kernel_name}-core-${altarch_kernel_version}*
rpm -ivh --force --nodeps ${altarch_kernel_name}-*
# yum install -y kernel-*
elif [ "${linuxKernelByUser}" = "cx9208" ]; then
linuxKernelToInstallVersionFull="4.14.129-bbrplus"
if [ "${osReleaseVersionNoShort}" -eq 7 ]; then
mkdir -p ${userHomePath}/${linuxKernelToInstallVersionFull}
cd ${userHomePath}/${linuxKernelToInstallVersionFull}
# https://raw.githubusercontent.com/chiakge/Linux-NetSpeed/master/bbrplus/centos/7/kernel-4.14.129-bbrplus.rpm
# https://raw.githubusercontent.com/chiakge/Linux-NetSpeed/master/bbrplus/centos/7/kernel-headers-4.14.129-bbrplus.rpm
bbrplusDownloadUrl="https://raw.githubusercontent.com/cx9208/Linux-NetSpeed/master/bbrplus/centos/7"
downloadFile ${bbrplusDownloadUrl}/kernel-${linuxKernelToInstallVersionFull}.rpm
downloadFile ${bbrplusDownloadUrl}/kernel-headers-${linuxKernelToInstallVersionFull}.rpm
removeCentosKernelMulti
echo
green " =================================================="
green " 开始安装 linux 内核版本: ${linuxKernelToInstallVersionFull}"
echo
rpm -ivh --force --nodeps kernel-${linuxKernelToInstallVersionFull}.rpm
rpm -ivh --force --nodeps kernel-headers-${linuxKernelToInstallVersionFull}.rpm
else
red "从 cx9208 的 github 网站没有找到 Centos 8 的 ${linuxKernelToInstallVersion} Kernel "
exit 255
fi
elif [ "${linuxKernelByUser}" = "UJX6N" ]; then
linuxKernelToInstallVersionFull="${bbrplusKernelVersion}-bbrplus"
mkdir -p ${userHomePath}/${linuxKernelToInstallVersionFull}
cd ${userHomePath}/${linuxKernelToInstallVersionFull}
if [ "${linuxKernelToInstallVersion}" = "4.14" ]; then
bbrplusDownloadUrl="https://github.com/UJX6N/bbrplus/releases/download/${linuxKernelToInstallVersionFull}"
else
bbrplusDownloadUrl="https://github.com/UJX6N/bbrplus-${linuxKernelToInstallVersion}/releases/download/${linuxKernelToInstallVersionFull}"
fi
if [ "${osReleaseVersionNoShort}" -eq 7 ]; then
# https://github.com/UJX6N/bbrplus-5.14/releases/download/5.14.15-bbrplus/CentOS-7_Required_kernel-bbrplus-5.14.15-1.bbrplus.el7.x86_64.rpm
# https://github.com/UJX6N/bbrplus-5.10/releases/download/5.10.76-bbrplus/CentOS-7_Required_kernel-bbrplus-5.10.76-1.bbrplus.el7.x86_64.rpm
# https://github.com/UJX6N/bbrplus-5.10/releases/download/5.10.27-bbrplus/CentOS-7_Optional_kernel-bbrplus-devel-5.10.27-1.bbrplus.el7.x86_64.rpm
# https://github.com/UJX6N/bbrplus-5.10/releases/download/5.10.27-bbrplus/CentOS-7_Optional_kernel-bbrplus-headers-5.10.27-1.bbrplus.el7.x86_64.rpm
downloadFile ${bbrplusDownloadUrl}/CentOS-7_Required_kernel-bbrplus-${bbrplusKernelVersion}-1.bbrplus.el7.x86_64.rpm
downloadFile ${bbrplusDownloadUrl}/CentOS-7_Optional_kernel-bbrplus-devel-${bbrplusKernelVersion}-1.bbrplus.el7.x86_64.rpm
downloadFile ${bbrplusDownloadUrl}/CentOS-7_Optional_kernel-bbrplus-headers-${bbrplusKernelVersion}-1.bbrplus.el7.x86_64.rpm
removeCentosKernelMulti
echo
green " =================================================="
green " 开始安装 linux 内核版本: ${linuxKernelToInstallVersionFull}"
echo
rpm -ivh --force --nodeps CentOS-7_Required_kernel-bbrplus-${bbrplusKernelVersion}-1.bbrplus.el7.x86_64.rpm
rpm -ivh --force --nodeps *.rpm
else
if [ "${kernelVersionFirstletter}" = "5" ]; then
echo
else
red "从 UJX6N 的 github 网站没有找到 Centos 8 的 ${linuxKernelToInstallVersion} Kernel "
exit 255
fi
# https://github.com/UJX6N/bbrplus-5.14/releases/download/5.14.18-bbrplus/CentOS-8_Required_kernel-bbrplus-core-5.14.18-1.bbrplus.el8.x86_64.rpm
# https://github.com/UJX6N/bbrplus-5.14/releases/download/5.14.18-bbrplus/CentOS-8_Required_kernel-bbrplus-modules-5.14.18-1.bbrplus.el8.x86_64.rpm
# https://github.com/UJX6N/bbrplus-5.14/releases/download/5.14.18-bbrplus/CentOS-8_Optional_kernel-bbrplus-5.14.18-1.bbrplus.el8.x86_64.rpm
# https://github.com/UJX6N/bbrplus-5.14/releases/download/5.14.18-bbrplus/CentOS-8_Optional_kernel-bbrplus-devel-5.14.18-1.bbrplus.el8.x86_64.rpm
# https://github.com/UJX6N/bbrplus-5.14/releases/download/5.14.18-bbrplus/CentOS-8_Optional_kernel-bbrplus-headers-5.14.18-1.bbrplus.el8.x86_64.rpm
# https://github.com/UJX6N/bbrplus-5.10/releases/download/5.10.27-bbrplus/CentOS-8_Optional_kernel-bbrplus-modules-5.10.27-1.bbrplus.el8.x86_64.rpm
# https://github.com/UJX6N/bbrplus-5.14/releases/download/5.14.18-bbrplus/CentOS-8_Optional_kernel-bbrplus-modules-extra-5.14.18-1.bbrplus.el8.x86_64.rpm
downloadFile ${bbrplusDownloadUrl}/CentOS-8_Required_kernel-bbrplus-core-${bbrplusKernelVersion}-1.bbrplus.el8.x86_64.rpm
downloadFile ${bbrplusDownloadUrl}/CentOS-8_Required_kernel-bbrplus-modules-${bbrplusKernelVersion}-1.bbrplus.el8.x86_64.rpm
downloadFile ${bbrplusDownloadUrl}/CentOS-8_Optional_kernel-bbrplus-${bbrplusKernelVersion}-1.bbrplus.el8.x86_64.rpm
downloadFile ${bbrplusDownloadUrl}/CentOS-8_Optional_kernel-bbrplus-devel-${bbrplusKernelVersion}-1.bbrplus.el8.x86_64.rpm
downloadFile ${bbrplusDownloadUrl}/CentOS-8_Optional_kernel-bbrplus-headers-${bbrplusKernelVersion}-1.bbrplus.el8.x86_64.rpm
downloadFile ${bbrplusDownloadUrl}/CentOS-8_Optional_kernel-bbrplus-modules-${bbrplusKernelVersion}-1.bbrplus.el8.x86_64.rpm
downloadFile ${bbrplusDownloadUrl}/CentOS-8_Optional_kernel-bbrplus-modules-extra-${bbrplusKernelVersion}-1.bbrplus.el8.x86_64.rpm
removeCentosKernelMulti
echo
green " =================================================="
green " 开始安装 linux 内核版本: ${linuxKernelToInstallVersionFull}"
echo
rpm -ivh --force --nodeps CentOS-8_Required_kernel-bbrplus-core-${bbrplusKernelVersion}-1.bbrplus.el8.x86_64.rpm
rpm -ivh --force --nodeps *.rpm
fi
fi;
updateGrubConfig
green " =================================================="
green " 安装 linux 内核 ${linuxKernelToInstallVersionFull} 成功! "
red " 请根据以下信息 检查新内核是否安装成功,无新内核不要重启! "
green " =================================================="
echo
showLinuxKernelInfo
removeCentosKernelMulti "kernel"
listInstalledLinuxKernel
rebootSystem
}
function removeCentosKernelMulti(){
listInstalledLinuxKernel
if [ -z $1 ]; then
red " 开始准备删除 kernel-header kernel-devel kernel-tools kernel-tools-libs 内核, 建议删除 "
else
red " 开始准备删除 kernel 内核, 建议删除 "
fi
red " 注意: 删除内核有风险, 可能会导致VPS无法启动, 请先做好备份! "
read -p "是否删除内核? 直接回车默认删除内核, 请输入[Y/n]:" isContinueDelKernelInput
isContinueDelKernelInput=${isContinueDelKernelInput:-Y}
echo
if [[ $isContinueDelKernelInput == [Yy] ]]; then
if [ -z $1 ]; then
removeCentosKernel "kernel-devel"
removeCentosKernel "kernel-header"
removeCentosKernel "kernel-tools"
removeCentosKernel "kernel-ml-devel"
removeCentosKernel "kernel-ml-header"
removeCentosKernel "kernel-ml-tools"
removeCentosKernel "kernel-lt-devel"
removeCentosKernel "kernel-lt-header"
removeCentosKernel "kernel-lt-tools"
removeCentosKernel "kernel-bbrplus-devel"
removeCentosKernel "kernel-bbrplus-headers"
removeCentosKernel "kernel-bbrplus-modules"
else
removeCentosKernel "kernel"
fi
fi
echo
}
function removeCentosKernel(){
# 嗯嗯,用的yum localinstall kernel-ml-* 后,再指定顺序, 用那个 rpm -ivh 包名不行,提示kernel-headers冲突,
# 输入rpm -e --nodeps kernel-headers 提示无法加载到此包,
# 此时需要指定已安装的完整的 rpm 包名。
# rpm -qa | grep kernel
# 可以查看。比如:kernel-ml-headers-5.10.16-1.el7.elrepo.x86_64
# 那么强制删除,则命令为:rpm -e --nodeps kernel-ml-headers-5.10.16-1.el7.elrepo.x86_64
# ${sudoCmd} yum remove kernel-ml kernel-ml-{devel,headers,perf}
# ${sudoCmd} rpm -e --nodeps kernel-headers
# ${sudoCmd} rpm -e --nodeps kernel-ml-headers-${elrepo_kernel_version}-1.el7.elrepo.x86_64
removeKernelNameText="kernel"
removeKernelNameText=$1
grepExcludelinuxKernelVersion=$(echo ${linuxKernelToInstallVersionFull} | cut -d- -f1)
# echo "rpm -qa | grep ${removeKernelNameText} | grep -v ${grepExcludelinuxKernelVersion} | grep -v noarch | wc -l"
rpmOldKernelNumber=$(rpm -qa | grep "${removeKernelNameText}" | grep -v "${grepExcludelinuxKernelVersion}" | grep -v "noarch" | wc -l)
rpmOLdKernelNameList=$(rpm -qa | grep "${removeKernelNameText}" | grep -v "${grepExcludelinuxKernelVersion}" | grep -v "noarch")
# echo "${rpmOLdKernelNameList}"
# https://stackoverflow.com/questions/29269259/extract-value-of-column-from-a-line-variable
if [ "${rpmOldKernelNumber}" -gt "0" ]; then
yellow "========== 准备开始删除旧内核 ${removeKernelNameText} ${osKernelVersionBackup}, 当前要安装新内核版本为: ${grepExcludelinuxKernelVersion}"
red " 当前系统的旧内核 ${removeKernelNameText} ${osKernelVersionBackup} 有 ${rpmOldKernelNumber} 个需要删除"
echo
for((integer = 1; integer <= ${rpmOldKernelNumber}; integer++)); do
rpmOLdKernelName=$(awk "NR==${integer}" <<< "${rpmOLdKernelNameList}")
green "+++++ 开始卸载第 ${integer} 个内核: ${rpmOLdKernelName}. 命令: rpm --nodeps -e ${rpmOLdKernelName}"
rpm --nodeps -e ${rpmOLdKernelName}
green "+++++ 已卸载第 ${integer} 个内核 ${rpmOLdKernelName} +++++"
echo
done
yellow "========== 共 ${rpmOldKernelNumber} 个旧内核 ${removeKernelNameText} ${osKernelVersionBackup} 已经卸载完成"
echo
else
red " 当前需要卸载的系统旧内核 ${removeKernelNameText} ${osKernelVersionBackup} 数量为0 !"
fi
echo
}
# 更新引导文件 grub.conf
updateGrubConfig(){
if [[ "${osRelease}" == "centos" ]]; then
# if [ ! -f "/boot/grub/grub.conf" ]; then
# red "File '/boot/grub/grub.conf' not found, 没找到该文件"
# else
# sed -i 's/^default=.*/default=0/g' /boot/grub/grub.conf
# grub2-set-default 0
# awk -F\' '$1=="menuentry " {print i++ " : " $2}' /boot/grub2/grub.cfg
# egrep ^menuentry /etc/grub2.cfg | cut -f 2 -d \'
# grub2-editenv list
# fi
# https://blog.51cto.com/foxhound/2551477
# 看看最新的 5.10.16 是否排在第一,也就是第 0 位。 如果是,执行:grub2-set-default 0, 然后再看看:grub2-editenv list
green " =================================================="
echo
if [[ ${osReleaseVersionNoShort} = "6" ]]; then
red " 不支持 Centos 6"
exit 255
else
if [ -f "/boot/grub2/grub.cfg" ]; then
grub2-mkconfig -o /boot/grub2/grub.cfg
grub2-set-default 0
elif [ -f "/boot/efi/EFI/centos/grub.cfg" ]; then
grub2-mkconfig -o /boot/efi/EFI/centos/grub.cfg
grub2-set-default 0
elif [ -f "/boot/efi/EFI/redhat/grub.cfg" ]; then
grub2-mkconfig -o /boot/efi/EFI/redhat/grub.cfg
grub2-set-default 0
else
red " /boot/grub2/grub.cfg 没找到该文件,请检查."
exit
fi
echo
green " 查看当前 grub 菜单启动项列表, 确保新安装的内核 ${linuxKernelToInstallVersionFull} 是否在第一项 "
# grubby --info=ALL|awk -F= '$1=="kernel" {print i++ " : " $2}'
awk -F\' '$1=="menuentry " {print i++ " : " $2}' /boot/grub2/grub.cfg
echo
green " 查看当前 grub 启动顺序是否已设置为第一项 "
echo "grub2-editenv list"
grub2-editenv list
green " =================================================="
echo
fi
elif [[ "${osRelease}" == "debian" || "${osRelease}" == "ubuntu" ]]; then
echo
echo "/usr/sbin/update-grub"
/usr/sbin/update-grub
fi
}
function getLatestUbuntuKernelVersion(){
ubuntuKernelLatestVersionArray=($(wget -qO- https://kernel.ubuntu.com/~kernel-ppa/mainline/ | awk -F'\"v' '/v[4-9]\./{print $2}' | cut -d/ -f1 | grep -v - | sort -V))
ubuntuKernelLatestVersion=${ubuntuKernelLatestVersionArray[${#ubuntuKernelLatestVersionArray[@]} - 1]}
echo
green "Ubuntu mainline 最新的Linux 内核 kernel 版本号为 ${ubuntuKernelLatestVersion}"
for ver in ${ubuntuKernelLatestVersionArray[@]}; do
if [[ ${ver} == *"${linuxKernelToInstallVersion}"* ]]; then
# echo "符合所选版本的Linux 内核版本: ${ver}"
ubuntuKernelVersion=${ver}
fi
done
green "即将安装的内核版本: ${ubuntuKernelVersion}"
ubuntuDownloadUrl="https://kernel.ubuntu.com/~kernel-ppa/mainline/v${ubuntuKernelVersion}/amd64"
echo
echo "wget -qO- ${ubuntuDownloadUrl} | awk -F'>' '/-[4-9]\./{print \$7}' | cut -d'<' -f1 | grep -v lowlatency"
ubuntuKernelDownloadUrlArray=($(wget -qO- ${ubuntuDownloadUrl} | awk -F'>' '/-[4-9]\./{print $7}' | cut -d'<' -f1 | grep -v lowlatency ))
# echo "${ubuntuKernelDownloadUrlArray[*]}"
echo
}
function installDebianUbuntuKernel(){
${sudoCmd} apt-get clean
${sudoCmd} apt-get update
${sudoCmd} apt-get install -y dpkg
# https://kernel.ubuntu.com/~kernel-ppa/mainline/
# https://unix.stackexchange.com/questions/545601/how-to-upgrade-the-debian-10-kernel-from-backports-without-recompiling-it-from-s
# https://askubuntu.com/questions/119080/how-to-update-kernel-to-the-latest-mainline-version-without-any-distro-upgrade
# https://sypalo.com/how-to-upgrade-ubuntu
if [ "${isInstallFromRepo}" = "yes" ]; then
if [ "${linuxKernelToBBRType}" = "xanmod" ]; then
green " =================================================="
green " 开始通过 XanMod 官方源安装 linux 内核 ${linuxKernelToInstallVersion}"
green " =================================================="
# https://xanmod.org/
echo 'deb http://deb.xanmod.org releases main' > /etc/apt/sources.list.d/xanmod-kernel.list
wget -qO - https://dl.xanmod.org/gpg.key | sudo apt-key --keyring /etc/apt/trusted.gpg.d/xanmod-kernel.gpg add -
${sudoCmd} apt update
listAvailableLinuxKernel "xanmod"
echo
green " =================================================="
green " 开始安装 linux 内核版本: ${linuxKernelToInstallVersionFull}"
echo
if [ "${linuxKernelToInstallVersion}" = "5.10" ]; then
${sudoCmd} apt install -y linux-xanmod-lts-5.10-generic
else
${sudoCmd} apt install -y linux-xanmod
fi
rebootSystem
else
debianKernelVersion="5.10.0"
green " =================================================="
green " 开始通过 Debian 官方源安装 linux 内核 ${debianKernelVersion}"
green " =================================================="
if [ "${osKernelVersionBackup}" = "${debianKernelVersion}" ]; then
red "当前系统内核版本已经是 ${osKernelVersionBackup} 无需安装! "
promptContinueOpeartion
fi
linuxKernelToInstallVersionFull=${debianKernelVersion}
echo "deb http://deb.debian.org/debian buster-backports main contrib non-free" > /etc/apt/sources.list.d/buster-backports.list
echo "deb-src http://deb.debian.org/debian buster-backports main contrib non-free" >> /etc/apt/sources.list.d/buster-backports.list
${sudoCmd} apt update
listAvailableLinuxKernel
${sudoCmd} apt install -y -t buster-backports linux-image-amd64
${sudoCmd} apt install -y -t buster-backports firmware-linux firmware-linux-nonfree
echo
echo "dpkg --get-selections | grep linux-image-${debianKernelVersion} | awk '/linux-image-[4-9]./{print \$1}' | awk -F'linux-image-' '{print \$2}' "
debianKernelVersionPackageName=$(dpkg --get-selections | grep "${debianKernelVersion}" | awk '/linux-image-[4-9]./{print $1}' | awk -F'linux-image-' '{print $2}')
echo
green " Debian 官方源安装 linux 内核版本: ${debianKernelVersionPackageName}"
green " 开始安装 linux-headers 命令为: apt install -y linux-headers-${debianKernelVersionPackageName}"
echo
${sudoCmd} apt install -y linux-headers-${debianKernelVersionPackageName}
# ${sudoCmd} apt-get -y dist-upgrade
fi
else
green " =================================================="
green " 开始手动安装 linux 内核 "
green " =================================================="
echo
mkdir -p ${userHomePath}
cd ${userHomePath}
linuxKernelByUser=""
if [ "${linuxKernelToBBRType}" = "bbrplus" ]; then
linuxKernelByUser="UJX6N"
if [ "${linuxKernelToInstallVersion}" = "4.14.129" ]; then
linuxKernelByUser="cx9208"
fi
green " 准备从 ${linuxKernelByUser} github 网站下载 bbr plus 的linux内核并安装 "
else
green " 准备从 Ubuntu kernel-ppa mainline 网站下载linux内核并安装 "
fi
echo
if [[ "${osRelease}" == "ubuntu" && ${osReleaseVersionNo} == "16.04" ]]; then
if [ -f "${userHomePath}/libssl1.1_1.1.0g-2ubuntu4_amd64.deb" ]; then
green "文件已存在, 不需要下载, 文件原下载地址: http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.0g-2ubuntu4_amd64.deb "
else
green "文件下载中... 下载地址: http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.0g-2ubuntu4_amd64.deb "
wget -P ${userHomePath} http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.0g-2ubuntu4_amd64.deb
fi
${sudoCmd} dpkg -i libssl1.1_1.1.0g-2ubuntu4_amd64.deb
fi
if [ "${linuxKernelByUser}" = "" ]; then
# https://kernel.ubuntu.com/~kernel-ppa/mainline/v5.11.12/amd64/
# https://kernel.ubuntu.com/~kernel-ppa/mainline/v5.11.12/amd64/linux-image-unsigned-5.11.12-051112-generic_5.11.12-051112.202104071432_amd64.deb
# https://kernel.ubuntu.com/~kernel-ppa/mainline/v5.11.12/amd64/linux-modules-5.11.12-051112-generic_5.11.12-051112.202104071432_amd64.deb
getLatestUbuntuKernelVersion
linuxKernelToInstallVersionFull=${ubuntuKernelVersion}
mkdir -p ${userHomePath}/${linuxKernelToInstallVersionFull}
cd ${userHomePath}/${linuxKernelToInstallVersionFull}
for file in ${ubuntuKernelDownloadUrlArray[@]}; do
downloadFile ${ubuntuDownloadUrl}/${file}
done
elif [ "${linuxKernelByUser}" = "cx9208" ]; then
linuxKernelToInstallVersionFull="4.14.129-bbrplus"
mkdir -p ${userHomePath}/${linuxKernelToInstallVersionFull}
cd ${userHomePath}/${linuxKernelToInstallVersionFull}
# https://raw.githubusercontent.com/chiakge/Linux-NetSpeed/master/bbrplus/debian-ubuntu/x64/linux-headers-4.14.129-bbrplus.deb
# https://raw.githubusercontent.com/chiakge/Linux-NetSpeed/master/bbrplus/debian-ubuntu/x64/linux-image-4.14.129-bbrplus.deb
# https://github.com/cx9208/Linux-NetSpeed/raw/master/bbrplus/debian-ubuntu/x64/linux-headers-4.14.129-bbrplus.deb
# https://github.com/cx9208/Linux-NetSpeed/raw/master/bbrplus/debian-ubuntu/x64/linux-image-4.14.129-bbrplus.deb
# https://raw.githubusercontent.com/cx9208/Linux-NetSpeed/master/bbrplus/debian-ubuntu/x64/linux-headers-4.14.129-bbrplus.deb
# https://raw.githubusercontent.com/cx9208/Linux-NetSpeed/master/bbrplus/debian-ubuntu/x64/linux-image-4.14.129-bbrplus.deb
bbrplusDownloadUrl="https://raw.githubusercontent.com/cx9208/Linux-NetSpeed/master/bbrplus/debian-ubuntu/x64"
downloadFile ${bbrplusDownloadUrl}/linux-image-${linuxKernelToInstallVersionFull}.deb
downloadFile ${bbrplusDownloadUrl}/linux-headers-${linuxKernelToInstallVersionFull}.deb
elif [ "${linuxKernelByUser}" = "UJX6N" ]; then
linuxKernelToInstallVersionFull="${bbrplusKernelVersion}-bbrplus"
mkdir -p ${userHomePath}/${linuxKernelToInstallVersionFull}
cd ${userHomePath}/${linuxKernelToInstallVersionFull}
if [ "${linuxKernelToInstallVersion}" = "4.14" ]; then
bbrplusDownloadUrl="https://github.com/UJX6N/bbrplus/releases/download/${linuxKernelToInstallVersionFull}"
downloadFile ${bbrplusDownloadUrl}/Debian-Ubuntu_Required_linux-image-${bbrplusKernelVersion}-bbrplus_${bbrplusKernelVersion}-bbrplus-1_amd64.deb
downloadFile ${bbrplusDownloadUrl}/Debian-Ubuntu_Required_linux-headers-${bbrplusKernelVersion}-bbrplus_${bbrplusKernelVersion}-bbrplus-1_amd64.deb
else
bbrplusDownloadUrl="https://github.com/UJX6N/bbrplus-${linuxKernelToInstallVersion}/releases/download/${linuxKernelToInstallVersionFull}"
downloadFile ${bbrplusDownloadUrl}/Debian-Ubuntu_Required_linux-image-${bbrplusKernelVersion}-bbrplus_${bbrplusKernelVersion}-bbrplus-1_amd64.deb
downloadFile ${bbrplusDownloadUrl}/Debian-Ubuntu_Required_linux-headers-${bbrplusKernelVersion}-bbrplus_${bbrplusKernelVersion}-bbrplus-1_amd64.deb
fi
# https://github.com/UJX6N/bbrplus-5.10/releases/download/5.10.76-bbrplus/Debian-Ubuntu_Required_linux-image-5.10.76-bbrplus_5.10.76-bbrplus-1_amd64.deb
# https://github.com/UJX6N/bbrplus-5.10/releases/download/5.10.27-bbrplus/Debian-Ubuntu_Required_linux-headers-5.10.27-bbrplus_5.10.27-bbrplus-1_amd64.deb
# https://github.com/UJX6N/bbrplus-5.9/releases/download/5.9.16-bbrplus/Debian-Ubuntu_Required_linux-image-5.9.16-bbrplus_5.9.16-bbrplus-1_amd64.deb
# https://github.com/UJX6N/bbrplus-5.4/releases/download/5.4.109-bbrplus/Debian-Ubuntu_Required_linux-image-5.4.109-bbrplus_5.4.109-bbrplus-1_amd64.deb
# https://github.com/UJX6N/bbrplus-4.19/releases/download/4.19.184-bbrplus/Debian-Ubuntu_Required_linux-image-4.19.184-bbrplus_4.19.184-bbrplus-1_amd64.deb
fi
removeDebianKernelMulti
echo
green " =================================================="
green " 开始安装 linux 内核版本: ${linuxKernelToInstallVersionFull}"
echo
${sudoCmd} dpkg -i *.deb
updateGrubConfig
fi
echo
green " =================================================="
green " 安装 linux 内核 ${linuxKernelToInstallVersionFull} 成功! "
red " 请根据以下信息 检查新内核是否安装成功,无新内核不要重启! "
green " =================================================="
echo
showLinuxKernelInfo
removeDebianKernelMulti "linux-image"
listInstalledLinuxKernel
rebootSystem
}
function removeDebianKernelMulti(){
listInstalledLinuxKernel
echo
if [ -z $1 ]; then
red "===== 开始准备删除 linux-headers linux-modules 内核, 建议删除 "
else
red "===== 开始准备删除 linux-image 内核, 建议删除 "
fi
red " 注意: 删除内核有风险, 可能会导致VPS无法启动, 请先做好备份! "
read -p "是否删除内核? 直接回车默认删除内核, 请输入[Y/n]:" isContinueDelKernelInput
isContinueDelKernelInput=${isContinueDelKernelInput:-Y}
echo
if [[ $isContinueDelKernelInput == [Yy] ]]; then
if [ -z $1 ]; then
removeDebianKernel "linux-modules-extra"
removeDebianKernel "linux-modules"
removeDebianKernel "linux-headers"
removeDebianKernel "linux-image"
# removeDebianKernel "linux-kbuild"
# removeDebianKernel "linux-compiler"
# removeDebianKernel "linux-libc"
else
removeDebianKernel "linux-image"
removeDebianKernel "linux-modules-extra"
removeDebianKernel "linux-modules"
removeDebianKernel "linux-headers"
# ${sudoCmd} apt -y --purge autoremove
fi
fi
echo
}
function removeDebianKernel(){
removeKernelNameText="linux-image"
removeKernelNameText=$1
grepExcludelinuxKernelVersion=$(echo ${linuxKernelToInstallVersionFull} | cut -d- -f1)
# echo "dpkg --get-selections | grep ${removeKernelNameText} | grep -Ev '${grepExcludelinuxKernelVersion}|${removeKernelNameText}-amd64' | awk '{print \$1}' "
rpmOldKernelNumber=$(dpkg --get-selections | grep "${removeKernelNameText}" | grep -Ev "${grepExcludelinuxKernelVersion}|${removeKernelNameText}-amd64" | wc -l)
rpmOLdKernelNameList=$(dpkg --get-selections | grep "${removeKernelNameText}" | grep -Ev "${grepExcludelinuxKernelVersion}|${removeKernelNameText}-amd64" | awk '{print $1}' )
# echo "$rpmOLdKernelNameList"
# https://stackoverflow.com/questions/16212656/grep-exclude-multiple-strings
# https://stackoverflow.com/questions/29269259/extract-value-of-column-from-a-line-variable
# https://askubuntu.com/questions/187888/what-is-the-correct-way-to-completely-remove-an-application
if [ "${rpmOldKernelNumber}" -gt "0" ]; then
yellow "========== 准备开始删除旧内核 ${removeKernelNameText} ${osKernelVersionBackup}, 当前要安装新内核版本为: ${grepExcludelinuxKernelVersion}"
red " 当前系统的旧内核 ${removeKernelNameText} ${osKernelVersionBackup} 有 ${rpmOldKernelNumber} 个需要删除"
echo
for((integer = 1; integer <= ${rpmOldKernelNumber}; integer++)); do
rpmOLdKernelName=$(awk "NR==${integer}" <<< "${rpmOLdKernelNameList}")
green "+++++ 开始卸载第 ${integer} 个内核: ${rpmOLdKernelName}. 命令: apt remove --purge ${rpmOLdKernelName}"
${sudoCmd} apt remove -y --purge ${rpmOLdKernelName}
${sudoCmd} apt autoremove -y ${rpmOLdKernelName}
green "+++++ 已卸载第 ${integer} 个内核 ${rpmOLdKernelName} +++++"
echo
done
yellow "========== 共 ${rpmOldKernelNumber} 个旧内核 ${removeKernelNameText} ${osKernelVersionBackup} 已经卸载完成"
echo
else
red " 当前需要卸载的系统旧内核 ${removeKernelNameText} ${osKernelVersionBackup} 数量为0 !"
fi
echo
}
function vps_netflix_jin(){
# wget -qN --no-check-certificate -O ./nf.sh https://raw.githubusercontent.com/jinwyp/SimpleNetflix/dev/nf.sh && chmod +x ./nf.sh
wget -qN --no-check-certificate -O ./nf.sh https://raw.githubusercontent.com/jinwyp/one_click_script/master/netflix_check.sh && chmod +x ./nf.sh && ./nf.sh
}
function vps_netflix_jin_auto(){
# wget -qN --no-check-certificate -O ./nf.sh https://raw.githubusercontent.com/jinwyp/SimpleNetflix/dev/nf.sh && chmod +x ./nf.sh
cd ${HOME}
wget -qN --no-check-certificate -O ./nf.sh https://raw.githubusercontent.com/jinwyp/one_click_script/master/netflix_check.sh && chmod +x ./nf.sh
echo
green " =================================================="
green " 通过Cron定时任务 每天自动检测Netflix是否解锁非自制剧"
green " 如果检测到Netflix没有解锁 会自动刷新 WARP IP, 默认尝试刷新20次"
green " 刷新日志 log 在 /root/warp_refresh.log"
green " Auto refresh Cloudflare WARP IP to unlock Netflix non-self produced drama"
green " =================================================="
echo
(crontab -l ; echo "10 5 * * 0,1,2,3,4,5,6 /root/nf.sh auto >> /root/warp_refresh.log ") | sort - | uniq - | crontab -
echo
./nf.sh auto
}
function getGithubLatestReleaseVersion(){
# https://github.com/p4gefau1t/trojan-go/issues/63
wget --no-check-certificate -qO- https://api.github.com/repos/$1/tags | grep 'name' | cut -d\" -f4 | head -1 | cut -b 2-
}
# https://unix.stackexchange.com/questions/8656/usr-bin-vs-usr-local-bin-on-linux
versionWgcf="2.2.11"
downloadFilenameWgcf="wgcf_${versionWgcf}_linux_amd64"
configWgcfBinPath="/usr/local/bin"
configWgcfConfigFolderPath="${HOME}/wireguard"
configWgcfAccountFilePath="${configWgcfConfigFolderPath}/wgcf-account.toml"
configWgcfProfileFilePath="${configWgcfConfigFolderPath}/wgcf-profile.conf"
configWARPPortFilePath="${configWgcfConfigFolderPath}/warp-port"
configWireGuardConfigFileFolder="/etc/wireguard"
configWireGuardConfigFilePath="/etc/wireguard/wgcf.conf"
configWireGuardDNSBackupFilePath="/etc/resolv_warp_bak.conf"
configWarpPort="40000"
function installWARPClient(){
# https://developers.cloudflare.com/warp-client/setting-up/linux
echo
green " =================================================="
green " Prepare to install Cloudflare WARP Official client "
green " Cloudflare WARP Official client only support Debian 10/11、Ubuntu 20.04/16.04、CentOS 8"
green " =================================================="
echo
if [[ "${osRelease}" == "debian" || "${osRelease}" == "ubuntu" ]]; then
${sudoCmd} apt-key del 835b8acb
${sudoCmd} apt-key del 8e5f9a5d
${sudoCmd} apt install -y gnupg
${sudoCmd} apt install -y apt-transport-https
curl https://pkg.cloudflareclient.com/pubkey.gpg | ${sudoCmd} apt-key add -
echo "deb http://pkg.cloudflareclient.com/ $osReleaseVersionCodeName main" | sudo tee /etc/apt/sources.list.d/cloudflare-client.list
${sudoCmd} apt-get update
${sudoCmd} apt install -y cloudflare-warp
elif [[ "${osRelease}" == "centos" ]]; then
${sudoCmd} rpm -e gpg-pubkey-835b8acb-*
${sudoCmd} rpm -e gpg-pubkey-8e5f9a5d-*
if [ "${osReleaseVersionNoShort}" -eq 7 ]; then
${sudoCmd} rpm -ivh http://pkg.cloudflareclient.com/cloudflare-release-el7.rpm
${sudoCmd} rpm -ivh http://pkg.cloudflare.com/cloudflare-release-latest.el7.rpm
# red "Cloudflare WARP Official client is not supported on Centos 7"
else
${sudoCmd} rpm -ivh --replacepkgs --replacefiles https://pkg.cloudflareclient.com/cloudflare-release-el8.rpm
# ${sudoCmd} rpm -ivh http://pkg.cloudflareclient.com/cloudflare-release-el8.rpm
fi
${sudoCmd} yum install -y cloudflare-warp
fi
if [[ ! -f "/bin/warp-cli" ]]; then
green " =================================================="
red " ${osInfo}${osReleaseVersionNoShort} ${osReleaseVersionCodeName} is not supported ! "
green " =================================================="
exit
fi
echo
echo
read -p "是否生成随机的WARP SOCKS5 端口号? 直接回车默认 40000 不生成随机端口号, 请输入[y/N]:" isWarpPortInput
isWarpPortInput=${isWarpPortInput:-n}
if [[ $isWarpPortInput == [Nn] ]]; then
echo
else
configWarpPort="$(($RANDOM + 10000))"
fi
mkdir -p ${configWgcfConfigFolderPath}
echo "${configWarpPort}" > "${configWARPPortFilePath}"
${sudoCmd} systemctl enable warp-svc
yes | warp-cli register
echo
echo "warp-cli set-mode proxy"
warp-cli set-mode proxy
echo
echo "warp-cli --accept-tos set-proxy-port ${configWarpPort}"
warp-cli --accept-tos set-proxy-port ${configWarpPort}
echo
echo "warp-cli --accept-tos connect"
warp-cli --accept-tos connect
echo
echo "warp-cli --accept-tos enable-always-on"
warp-cli --accept-tos enable-always-on
echo
checkWarpClientStatus
# (crontab -l ; echo "10 6 * * 0,1,2,3,4,5,6 warp-cli disable-always-on ") | sort - | uniq - | crontab -
# (crontab -l ; echo "11 6 * * 0,1,2,3,4,5,6 warp-cli disconnect ") | sort - | uniq - | crontab -
(crontab -l ; echo "12 6 * * 0,1,2,3,4,5,6 systemctl restart warp-svc ") | sort - | uniq - | crontab -
# (crontab -l ; echo "16 6 * * 0,1,2,3,4,5,6 warp-cli connect ") | sort - | uniq - | crontab -
# (crontab -l ; echo "17 6 * * 0,1,2,3,4,5,6 warp-cli enable-always-on ") | sort - | uniq - | crontab -
echo
green " ================================================== "
green " Cloudflare 官方 WARP Client 安装成功 !"
green " WARP SOCKS5 端口号 ${configWarpPort} "
echo
green " WARP 停止命令: warp-cli disconnect , 停止Always-On命令: warp-cli disable-always-on "
green " WARP 启动命令: warp-cli connect , 开启Always-On命令(保持一直连接WARP): warp-cli enable-always-on "
green " WARP 查看日志: journalctl -n 100 -u warp-svc"
green " WARP 查看运行状态: warp-cli status"
green " WARP 查看连接信息: warp-cli warp-stats"
green " WARP 查看设置信息: warp-cli settings"
green " WARP 查看账户信息: warp-cli account"
echo
green " 用本脚本安装v2ray或xray 可以选择是否 解锁 Netflix 限制 和 避免弹出 Google reCAPTCHA 人机验证 !"
echo
green " 其他脚本安装的v2ray或xray 请自行替换 v2ray或xray 配置文件!"
green " ================================================== "
}
function installWireguard(){
if [[ -f "${configWireGuardConfigFilePath}" ]]; then
green " =================================================="
green " 已安装过 Wireguard, 如需重装 可以选择卸载 Wireguard 后重新安装! "
green " =================================================="
exit
fi
green " =================================================="
green " 准备安装 WireGuard "
echo
red " 安装前建议用本脚本升级linux内核到5.6以上 例如5.10 LTS内核. 也可以不升级内核, 具体请看下面说明"
red " 如果是新的干净的没有换过内核的系统(例如没有安装过BBR Plus内核), 可以不用退出安装其他内核, 直接继续安装 WireGuard"
red " 如果安装过其他内核(例如安装过BBR Plus内核), 建议先安装高于5.6以上的内核, 低于5.6的内核也可以继续安装, 但有几率无法启动 WireGuard"
red " 如遇到 WireGuard 启动失败, 建议重做新系统后, 升级系统到5.10内核, 然后安装WireGuard. 或者重做新系统后不要更换其他内核, 直接安装WireGuard"
green " =================================================="
echo
isKernelSupportWireGuardVersion="5.6"
isKernelBuildInWireGuardModule="no"
if versionCompareWithOp "${isKernelSupportWireGuardVersion}" "${osKernelVersionShort}" ">"; then
red " 当前系统内核为 ${osKernelVersionShort}, 低于5.6的系统内核没有内置 WireGuard Module !"
isKernelBuildInWireGuardModule="no"
else
green " 当前系统内核为 ${osKernelVersionShort}, 系统内核已内置 WireGuard Module"
isKernelBuildInWireGuardModule="yes"
fi
read -p "是否继续操作? 请确认linux内核已正确安装 直接回车默认继续操作, 请输入[Y/n]:" isContinueInput
isContinueInput=${isContinueInput:-Y}
if [[ ${isContinueInput} == [Yy] ]]; then
echo
green " =================================================="
green " 开始安装 WireGuard "
green " =================================================="
else
green " 建议请先用本脚本安装 linux kernel 5.6 以上的内核 !"
exit
fi
echo
echo
if [[ "${osRelease}" == "debian" || "${osRelease}" == "ubuntu" ]]; then
${sudoCmd} apt --fix-broken install -y
${sudoCmd} apt-get update
${sudoCmd} apt install -y openresolv
# ${sudoCmd} apt install -y resolvconf
${sudoCmd} apt install -y net-tools iproute2 dnsutils
echo
if [[ ${isKernelBuildInWireGuardModule} == "yes" ]]; then
green " 当前系统内核版本高于5.6, 直接安装 wireguard-tools "
echo
${sudoCmd} apt install -y wireguard-tools
else
# 安装 wireguard-dkms 后 ubuntu 20 系统 会同时安装 5.4.0-71 内核
green " 当前系统内核版本低于5.6, 直接安装 wireguard wireguard"
echo
${sudoCmd} apt install -y wireguard
# ${sudoCmd} apt install -y wireguard-tools
fi
# if [[ ! -L "/usr/local/bin/resolvconf" ]]; then
# ln -s /usr/bin/resolvectl /usr/local/bin/resolvconf
# fi
${sudoCmd} systemctl enable systemd-resolved.service
${sudoCmd} systemctl start systemd-resolved.service
elif [[ "${osRelease}" == "centos" ]]; then
${sudoCmd} yum install -y epel-release elrepo-release
${sudoCmd} yum install -y net-tools
${sudoCmd} yum install -y iproute
echo
if [[ ${isKernelBuildInWireGuardModule} == "yes" ]]; then
green " 当前系统内核版本高于5.6, 直接安装 wireguard-tools "
echo
if [ "${osReleaseVersionNoShort}" -eq 7 ]; then
${sudoCmd} yum install -y yum-plugin-elrepo
fi
${sudoCmd} yum install -y wireguard-tools
else
if [ "${osReleaseVersionNoShort}" -eq 7 ]; then
if [[ ${osKernelVersionBackup} == *"3.10."* ]]; then
green " 当前系统内核版本为原版Centos 7 ${osKernelVersionBackup} , 直接安装 kmod-wireguard "
${sudoCmd} yum install -y yum-plugin-elrepo
${sudoCmd} yum install -y kmod-wireguard wireguard-tools
else
green " 当前系统内核版本低于5.6, 安装 wireguard-dkms "
${sudoCmd} yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
${sudoCmd} curl -o /etc/yum.repos.d/jdoss-wireguard-epel-7.repo https://copr.fedorainfracloud.org/coprs/jdoss/wireguard/repo/epel-7/jdoss-wireguard-epel-7.repo
${sudoCmd} yum install -y wireguard-dkms wireguard-tools
fi
else
if [[ ${osKernelVersionBackup} == *"4.18."* ]]; then
green " 当前系统内核版本为原版Centos 8 ${osKernelVersionBackup} , 直接安装 kmod-wireguard "
${sudoCmd} yum install -y kmod-wireguard wireguard-tools
else
green " 当前系统内核版本低于5.6, 安装 wireguard-dkms "
${sudoCmd} yum config-manager --set-enabled PowerTools
${sudoCmd} yum copr enable jdoss/wireguard
${sudoCmd} yum install -y wireguard-dkms wireguard-tools
fi
fi
fi
fi
green " ================================================== "
green " Wireguard 安装成功 !"
green " ================================================== "
installWGCF
}
function installWGCF(){
versionWgcf=$(getGithubLatestReleaseVersion "ViRb3/wgcf")
downloadFilenameWgcf="wgcf_${versionWgcf}_linux_amd64"
echo
green " =================================================="
green " 开始安装 Cloudflare WARP 命令行工具 Wgcf ${versionWgcf}"
green " =================================================="
echo
mkdir -p ${configWgcfConfigFolderPath}
mkdir -p ${configWgcfBinPath}
mkdir -p ${configWireGuardConfigFileFolder}
cd ${configWgcfConfigFolderPath}
# https://github.com/ViRb3/wgcf/releases/download/v2.2.10/wgcf_2.2.10_linux_amd64
wget -O ${configWgcfConfigFolderPath}/wgcf --no-check-certificate "https://github.com/ViRb3/wgcf/releases/download/v${versionWgcf}/${downloadFilenameWgcf}"
if [[ -f ${configWgcfConfigFolderPath}/wgcf ]]; then
green " Cloudflare WARP 命令行工具 Wgcf ${versionWgcf} 下载成功!"
echo
else
red " Wgcf ${versionWgcf} 下载失败!"
exit 255
fi
${sudoCmd} chmod +x ${configWgcfConfigFolderPath}/wgcf
cp ${configWgcfConfigFolderPath}/wgcf ${configWgcfBinPath}
# ${configWgcfConfigFolderPath}/wgcf register --config "${configWgcfAccountFilePath}"
if [[ -f ${configWgcfAccountFilePath} ]]; then
echo
else
yes | ${configWgcfConfigFolderPath}/wgcf register
fi
echo
echo
green " =================================================="
yellow " 没有购买过WARP+ 订阅请直接按回车跳过此步, Press enter to continue without WARP+"
echo
yellow " 如已购买过 WARP+ subscription 订阅, 可以填入 license key 启用WARP+"
green " 查看方法: 手机打开 open Cloudflare 1.1.1.1 app, 点击右上菜单 click hamburger menu button on the top-right corner "
green " Navigate to: Account > Key, 选择 Account 菜单里的key 就是 license key"
echo
read -p "请填写 license key? 直接回车默认跳过此步, 请输入:" isWARPLicenseKeyInput
isWARPLicenseKeyInput=${isWARPLicenseKeyInput:-n}
if [[ ${isWARPLicenseKeyInput} == [Nn] ]]; then
echo
else
sed -i "s/license_key =.*/license_key = \"${isWARPLicenseKeyInput}\"/g" ${configWgcfAccountFilePath}
WGCF_LICENSE_KEY="${isWARPLicenseKeyInput}" wgcf update
fi
if [[ -f ${configWgcfProfileFilePath} ]]; then
echo
else
yes | ${configWgcfConfigFolderPath}/wgcf generate
fi
cp ${configWgcfProfileFilePath} ${configWireGuardConfigFilePath}
enableWireguardIPV6OrIPV4
echo
green " 开始临时启动 Wireguard, 用于测试是否启动正常, 运行命令: wg-quick up wgcf"
${sudoCmd} wg-quick up wgcf
echo
green " 开始验证 Wireguard 是否启动正常, 检测是否使用 Cloudflare 的 ipv6 访问 !"
echo
echo "curl -6 ip.p3terx.com"
curl -6 ip.p3terx.com
echo
isWireguardIpv6Working=$(curl -6 ip.p3terx.com | grep CLOUDFLARENET )
echo
if [[ -n "$isWireguardIpv6Working" ]]; then
green " Wireguard 启动正常, 已成功通过 Cloudflare WARP 提供的 IPv6 访问网络! "
else
green " ================================================== "
red " Wireguard 通过 curl -6 ip.p3terx.com, 检测使用CLOUDFLARENET的IPV6 访问失败"
red " 请检查linux 内核安装是否正确"
red " 安装会继续运行, 也有可能安装成功, 只是IPV6 没有使用"
red " 检查 WireGuard 是否启动成功, 可运行查看运行状态命令: systemctl status wg-quick@wgcf"
red " 如果 WireGuard 启动失败, 可运行查看日志命令 寻找原因: journalctl -n 50 -u wg-quick@wgcf"
red " 如遇到 WireGuard 启动失败, 建议重做新系统后, 不要更换其他内核, 直接安装WireGuard"
green " ================================================== "
fi
echo
green " 关闭临时启动用于测试的 Wireguard, 运行命令: wg-quick down wgcf "
${sudoCmd} wg-quick down wgcf
echo
${sudoCmd} systemctl daemon-reload
# 设置开机启动
${sudoCmd} systemctl enable wg-quick@wgcf
# 启用守护进程
${sudoCmd} systemctl start wg-quick@wgcf
checkWireguardBootStatus
echo
green " ================================================== "
green " Wireguard 和 Cloudflare WARP 命令行工具 Wgcf ${versionWgcf} 安装成功 !"
green " Cloudflare WARP 申请的账户配置文件路径: ${configWgcfAccountFilePath} "
green " Cloudflare WARP 生成的 Wireguard 配置文件路径: ${configWireGuardConfigFilePath} "
echo
green " Wireguard 停止命令: systemctl stop wg-quick@wgcf 启动命令: systemctl start wg-quick@wgcf 重启命令: systemctl restart wg-quick@wgcf"
green " Wireguard 查看日志: journalctl -n 50 -u wg-quick@wgcf"
green " Wireguard 查看运行状态: systemctl status wg-quick@wgcf"
echo
green " 用本脚本安装v2ray或xray 可以选择是否 解锁 Netflix 限制 和 避免弹出 Google reCAPTCHA 人机验证 !"
echo
green " 其他脚本安装的v2ray或xray 请自行替换 v2ray或xray 配置文件!"
green " 可参考 如何使用 IPv6 访问 Netflix 的教程 https://ybfl.xyz/111.html 或 https://toutyrater.github.io/app/netflix.html"
green " ================================================== "
}
function enableWireguardIPV6OrIPV4(){
# https://p3terx.com/archives/use-cloudflare-warp-to-add-extra-ipv4-or-ipv6-network-support-to-vps-servers-for-free.html
${sudoCmd} systemctl stop wg-quick@wgcf
cp /etc/resolv.conf ${configWireGuardDNSBackupFilePath}
sed -i '/nameserver 2a00\:1098\:2b\:\:1/d' /etc/resolv.conf
sed -i '/nameserver 8\.8/d' /etc/resolv.conf
sed -i '/nameserver 9\.9/d' /etc/resolv.conf
sed -i '/nameserver 1\.1\.1\.1/d' /etc/resolv.conf
echo
green " ================================================== "
yellow " 请选择为服务器添加 IPv6 网络 还是 IPv4 网络支持: "
echo
green " 1 添加 IPv6 网络 (用于解锁 Netflix 限制 和避免弹出 Google reCAPTCHA 人机验证)"
green " 2 添加 IPv4 网络 (用于给只有 IPv6 的 VPS主机添加 IPv4 网络支持)"
echo
read -p "请选择添加 IPv6 还是 IPv4 网络支持? 直接回车默认选1 , 请输入[1/2]:" isAddNetworkIPv6Input
isAddNetworkIPv6Input=${isAddNetworkIPv6Input:-1}
if [[ ${isAddNetworkIPv6Input} == [2] ]]; then
# 为 IPv6 Only 服务器添加 IPv4 网络支持
sed -i 's/^AllowedIPs = \:\:\/0/# AllowedIPs = \:\:\/0/g' ${configWireGuardConfigFilePath}
sed -i 's/# AllowedIPs = 0\.0\.0\.0/AllowedIPs = 0\.0\.0\.0/g' ${configWireGuardConfigFilePath}
sed -i 's/engage\.cloudflareclient\.com/\[2606\:4700\:d0\:\:a29f\:c001\]/g' ${configWireGuardConfigFilePath}
sed -i 's/162\.159\.192\.1/\[2606\:4700\:d0\:\:a29f\:c001\]/g' ${configWireGuardConfigFilePath}
sed -i 's/^DNS = 1\.1\.1\.1/DNS = 2620:fe\:\:10,2001\:4860\:4860\:\:8888,2606\:4700\:4700\:\:1111/g' ${configWireGuardConfigFilePath}
sed -i 's/^DNS = 8\.8\.8\.8,1\.1\.1\.1,9\.9\.9\.10/DNS = 2620:fe\:\:10,2001\:4860\:4860\:\:8888,2606\:4700\:4700\:\:1111/g' ${configWireGuardConfigFilePath}
echo "nameserver 2a00:1098:2b::1" >> /etc/resolv.conf
echo
green " Wireguard 已成功切换到 对VPS服务器的 IPv4 网络支持"
else
# 为 IPv4 Only 服务器添加 IPv6 网络支持
sed -i 's/^AllowedIPs = 0\.0\.0\.0/# AllowedIPs = 0\.0\.0\.0/g' ${configWireGuardConfigFilePath}
sed -i 's/# AllowedIPs = \:\:\/0/AllowedIPs = \:\:\/0/g' ${configWireGuardConfigFilePath}
sed -i 's/engage\.cloudflareclient\.com/162\.159\.192\.1/g' ${configWireGuardConfigFilePath}
sed -i 's/\[2606\:4700\:d0\:\:a29f\:c001\]/162\.159\.192\.1/g' ${configWireGuardConfigFilePath}
sed -i 's/^DNS = 1\.1\.1\.1/DNS = 8\.8\.8\.8,1\.1\.1\.1,9\.9\.9\.10/g' ${configWireGuardConfigFilePath}
sed -i 's/^DNS = 2620:fe\:\:10,2001\:4860\:4860\:\:8888,2606\:4700\:4700\:\:1111/DNS = 8\.8\.8\.8,1\.1\.1\.1,9\.9\.9\.10/g' ${configWireGuardConfigFilePath}
echo "nameserver 8.8.8.8" >> /etc/resolv.conf
echo "nameserver 8.8.4.4" >> /etc/resolv.conf
echo "nameserver 1.1.1.1" >> /etc/resolv.conf
echo "nameserver 9.9.9.9" >> /etc/resolv.conf
echo "nameserver 9.9.9.10" >> /etc/resolv.conf
echo
green " Wireguard 已成功切换到 对VPS服务器的 IPv6 网络支持"
fi
green " ================================================== "
echo
green " Wireguard 配置信息如下 配置文件路径: ${configWireGuardConfigFilePath} "
cat ${configWireGuardConfigFilePath}
green " ================================================== "
echo
# -n 不为空
if [[ -n $1 ]]; then
${sudoCmd} systemctl start wg-quick@wgcf
else
preferIPV4
fi
}
function preferIPV4(){
if [[ -f "/etc/gai.conf" ]]; then
sed -i '/^precedence \:\:ffff\:0\:0/d' /etc/gai.conf
sed -i '/^label 2002\:\:\/16/d' /etc/gai.conf
fi
# -z 为空
if [[ -z $1 ]]; then
echo "precedence ::ffff:0:0/96 100" >> /etc/gai.conf
echo
green " VPS服务器已成功设置为 IPv4 优先访问网络"
else
green " ================================================== "
yellow " 请为服务器设置 IPv4 还是 IPv6 优先访问: "
echo
green " 1 优先 IPv4 访问网络 (用于 给只有 IPv6 的 VPS主机添加 IPv4 网络支持)"
green " 2 优先 IPv6 访问网络 (用于 解锁 Netflix 限制 和避免弹出 Google reCAPTCHA 人机验证)"
green " 3 删除 IPv4 或 IPv6 优先访问的设置, 还原为系统默认配置"
echo
red " 注意: 选2后 优先使用 IPv6 访问网络 可能造成无法访问某些不支持IPv6的网站! "
red " 注意: 解锁Netflix限制和避免弹出Google人机验证 一般不需要选择2设置IPv6优先访问, 可以在V2ray的配置中单独设置对Netfile和Google使用IPv6访问 "
red " 注意: 由于 trojan 或 trojan-go 不支持配置 使用IPv6优先访问Netfile和Google, 可以选择2 开启服务器优先IPv6访问, 解决 trojan-go 解锁Netfile和Google人机验证问题"
echo
read -p "请选择 IPv4 还是 IPv6 优先访问? 直接回车默认选1, 请输入[1/2/3]:" isPreferIPv4Input
isPreferIPv4Input=${isPreferIPv4Input:-1}
if [[ ${isPreferIPv4Input} == [2] ]]; then
# 设置 IPv6 优先
echo "label 2002::/16 2" >> /etc/gai.conf
echo
green " VPS服务器已成功设置为 IPv6 优先访问网络 "
elif [[ ${isPreferIPv4Input} == [3] ]]; then
echo
green " VPS服务器 已删除 IPv4 或 IPv6 优先访问的设置, 还原为系统默认配置 "
else
# 设置 IPv4 优先
echo "precedence ::ffff:0:0/96 100" >> /etc/gai.conf
echo
green " VPS服务器已成功设置为 IPv4 优先访问网络 "
fi
green " ================================================== "
echo
yellow " 验证 IPv4 或 IPv6 访问网络优先级测试, 命令: curl ip.p3terx.com "
echo
curl ip.p3terx.com
echo
green " 上面信息显示 如果是IPv4地址 则VPS服务器已设置为 IPv4优先访问. 如果是IPv6地址则已设置为 IPv6优先访问 "
green " ================================================== "
fi
echo
}
function removeWireguard(){
green " ================================================== "
red " 准备卸载 Wireguard 和 Cloudflare WARP 命令行工具 Wgcf "
green " ================================================== "
if [[ -f "${configWgcfBinPath}/wgcf" || -f "${configWgcfConfigFolderPath}/wgcf" || -f "/wgcf" ]]; then
${sudoCmd} systemctl stop wg-quick@wgcf.service
${sudoCmd} systemctl disable wg-quick@wgcf.service
${sudoCmd} wg-quick down wgcf
${sudoCmd} wg-quick disable wgcf
if [[ "${osRelease}" == "debian" || "${osRelease}" == "ubuntu" ]]; then
$osSystemPackage -y remove wireguard-tools
$osSystemPackage -y remove wireguard
elif [[ "${osRelease}" == "centos" ]]; then
$osSystemPackage -y remove kmod-wireguard
$osSystemPackage -y remove wireguard-dkms
$osSystemPackage -y remove wireguard-tools
fi
echo
read -p "是否删除Wgcf申请的账号文件, 默认不删除, 方便以后不用在重新注册, 请输入[y/N]:" isWgcfAccountFileRemoveInput
isWgcfAccountFileRemoveInput=${isWgcfAccountFileRemoveInput:-n}
echo
if [[ $isWgcfAccountFileRemoveInput == [Yy] ]]; then
rm -rf ${configWgcfConfigFolderPath}
green " Wgcf申请的账号信息文件 ${configWgcfAccountFilePath} 已删除!"
else
rm -f ${configWgcfProfileFilePath}
green " Wgcf申请的账号信息文件 ${configWgcfAccountFilePath} 已保留! "
fi
rm -f ${configWgcfBinPath}/wgcf
rm -rf ${configWireGuardConfigFileFolder}
rm -f ${osSystemMdPath}wg-quick@wgcf.service
rm -f /usr/bin/wg
rm -f /usr/bin/wg-quick
rm -f /usr/share/man/man8/wg.8
rm -f /usr/share/man/man8/wg-quick.8
[ -d "/etc/wireguard" ] && ("rm -rf /etc/wireguard")
cp -f ${configWireGuardDNSBackupFilePath} /etc/resolv.conf
sleep 2
modprobe -r wireguard
green " ================================================== "
green " Wireguard 和 Cloudflare WARP 命令行工具 Wgcf 卸载完毕 !"
green " ================================================== "
else
red " 系统没有安装 Wireguard 和 Wgcf, 退出卸载"
echo
fi
}
function removeWARP(){
green " ================================================== "
red " 准备卸载 Cloudflare WARP 官方 linux client "
green " ================================================== "
if [[ -f "/usr/bin/warp-cli" ]]; then
${sudoCmd} warp-cli disable-always-on
${sudoCmd} warp-cli disconnect
${sudoCmd} systemctl stop warp-svc
sleep 5s
if [[ "${osRelease}" == "debian" || "${osRelease}" == "ubuntu" ]]; then
${sudoCmd} apt purge -y cloudflare-warp
rm -f /etc/apt/sources.list.d/cloudflare-client.list
elif [[ "${osRelease}" == "centos" ]]; then
yum remove -y cloudflare-warp
fi
rm -f ${configWARPPortFilePath}
crontab -l | grep -v 'warp-cli' | crontab -
crontab -l | grep -v 'warp-svc' | crontab -
green " ================================================== "
green " Cloudflare WARP linux client 卸载完毕 !"
green " ================================================== "
else
red " 系统没有安装 Cloudflare WARP linux client, 退出卸载"
echo
fi
}
function checkWireguardBootStatus(){
echo
green " ================================================== "
isWireguardBootSuccess=$(systemctl status wg-quick@wgcf | grep -E "Active: active")
if [[ -z "${isWireguardBootSuccess}" ]]; then
green " 状态显示-- Wireguard 已启动${Red_font_prefix}失败${Green_font_prefix}! 请查看 Wireguard 运行日志, 寻找错误后重启 Wireguard "
else
green " 状态显示-- Wireguard 已启动成功! "
echo
echo "wgcf trace"
echo
wgcf trace
echo
fi
green " ================================================== "
echo
}
cloudflare_Trace_URL='https://www.cloudflare.com/cdn-cgi/trace'
function checkWarpClientStatus(){
if [[ -f "${configWARPPortFilePath}" ]]; then
configWarpPort=$(cat ${configWARPPortFilePath})
fi
echo
green " ================================================== "
sleep 2s
isWarpClientBootSuccess=$(systemctl is-active warp-svc | grep -E "inactive")
if [[ -z "${isWarpClientBootSuccess}" ]]; then
green " 状态显示-- WARP 已启动成功! "
echo
isWarpClientMode=$(curl -sx "socks5h://127.0.0.1:${configWarpPort}" ${cloudflare_Trace_URL} --connect-timeout 20 | grep warp | cut -d= -f2)
sleep 3s
case ${isWarpClientMode} in
on)
green " 状态显示-- WARP SOCKS5 代理已启动成功, 端口号 ${configWarpPort} ! "
;;
plus)
green " 状态显示-- WARP+ SOCKS5 代理已启动成功, 端口号 ${configWarpPort} ! "
;;
*)
green " 状态显示-- WARP SOCKS5 代理启动${Red_font_prefix}失败${Green_font_prefix}! "
;;
esac
green " ================================================== "
echo
echo "curl -x 'socks5h://127.0.0.1:${configWarpPort}' ${cloudflare_Trace_URL}"
echo
curl -x "socks5h://127.0.0.1:${configWarpPort}" ${cloudflare_Trace_URL}
else
green " 状态显示-- WARP 已启动${Red_font_prefix}失败${Green_font_prefix}! 请查看 WARP 运行日志, 寻找错误后重启 WARP "
fi
green " ================================================== "
echo
}
function restartWireguard(){
echo
echo "systemctl restart wg-quick@wgcf"
systemctl restart wg-quick@wgcf
green " Wireguard 已重启 !"
echo
}
function startWARP(){
echo
echo "systemctl start warp-svc"
systemctl start warp-svc
echo
echo "warp-cli connect"
warp-cli connect
echo
echo "warp-cli enable-always-on"
warp-cli enable-always-on
green " WARP SOCKS5 代理 已启动 !"
}
function stopWARP(){
echo
echo "warp-cli disable-always-on"
warp-cli disable-always-on
echo
echo "warp-cli disconnect"
warp-cli disconnect
echo
echo "systemctl stop warp-svc"
systemctl stop warp-svc
green " WARP SOCKS5 代理 已停止 !"
}
function restartWARP(){
echo
echo "warp-cli disable-always-on"
warp-cli disable-always-on
echo
echo "warp-cli disconnect"
warp-cli disconnect
echo
echo "systemctl restart warp-svc"
systemctl restart warp-svc
sleep 5s
echo
read -p "Press enter to continue"
echo
echo "warp-cli connect"
warp-cli connect
echo
echo "warp-cli enable-always-on"
warp-cli enable-always-on
echo
green " WARP SOCKS5 代理 已重启 !"
echo
}
function checkWireguard(){
echo
green " =================================================="
echo
green " 1. 查看当前系统内核版本, 检查是否因为装了多个版本内核导致 Wireguard 启动失败"
echo
green " 2. 查看 Wireguard 和 WARP SOCKS5 代理运行状态"
echo
green " 3. 查看 Wireguard 运行日志, 如果 Wireguard 启动失败 请用此项查找问题"
green " 4. 启动 Wireguard "
green " 5. 停止 Wireguard "
green " 6. 重启 Wireguard "
green " 7. 查看 Wireguard 和 WARP 运行状态 wgcf status "
green " 8. 查看 Wireguard 配置文件 ${configWireGuardConfigFilePath} "
green " 9. 用VI 编辑 Wireguard 配置文件 ${configWireGuardConfigFilePath} "
echo
green " 11. 查看 WARP SOCKS5 运行日志, 如果 WARP 启动失败 请用此项查找问题"
green " 12. 启动 WARP SOCKS5 代理"
green " 13. 停止 WARP SOCKS5 代理"
green " 14. 重启 WARP SOCKS5 代理"
echo
green " 15. 查看 WARP SOCKS5 运行状态 warp-cli status"
green " 16. 查看 WARP SOCKS5 连接信息 warp-cli warp-stats"
green " 17. 查看 WARP SOCKS5 设置信息 warp-cli settings"
green " 18. 查看 WARP SOCKS5 账户信息 warp-cli account"
green " =================================================="
green " 0. 退出脚本"
echo
read -p "请输入数字:" menuNumberInput
case "$menuNumberInput" in
1 )
showLinuxKernelInfo
listInstalledLinuxKernel
;;
2 )
echo
#echo "systemctl status wg-quick@wgcf"
#systemctl status wg-quick@wgcf
#red " 请查看上面 Active: 一行信息, 如果文字是绿色 active 则为启动正常, 否则启动失败"
checkWireguardBootStatus
checkWarpClientStatus
;;
3 )
echo
echo "journalctl -n 100 -u wg-quick@wgcf"
journalctl -n 100 -u wg-quick@wgcf
red " 请查看上面包含 Error 的信息行, 查找启动失败的原因 "
;;
4 )
echo
echo "systemctl start wg-quick@wgcf"
systemctl start wg-quick@wgcf
echo
green " Wireguard 已启动 !"
checkWireguardBootStatus
;;
5 )
echo
echo "systemctl stop wg-quick@wgcf"
systemctl stop wg-quick@wgcf
echo
green " Wireguard 已停止 !"
checkWireguardBootStatus
;;
6 )
restartWireguard
checkWireguardBootStatus
;;
7 )
echo
green "Running command 'wgcf status' to check device status :"
echo
wgcf status
echo
echo
green "Running command 'wgcf trace' to verify WARP/WARP+ works :"
echo
wgcf trace
echo
;;
8 )
echo
echo "cat ${configWireGuardConfigFilePath}"
cat ${configWireGuardConfigFilePath}
;;
9 )
echo
echo "vi ${configWireGuardConfigFilePath}"
vi ${configWireGuardConfigFilePath}
;;
11 )
echo
echo "journalctl --no-pager -u warp-svc "
journalctl --no-pager -u warp-svc
red " 请查看上面包含 Error 的信息行, 查找启动失败的原因 "
;;
12 )
startWARP
checkWarpClientStatus
;;
13 )
stopWARP
checkWarpClientStatus
;;
14 )
restartWARP
checkWarpClientStatus
;;
15 )
echo
echo "warp-cli status"
warp-cli status
;;
16 )
echo
echo "warp-cli warp-stats"
warp-cli warp-stats
;;
17 )
echo
echo "warp-cli settings"
warp-cli settings
;;
18 )
echo
echo "warp-cli account"
warp-cli account
;;
0 )
exit 1
;;
* )
clear
red "请输入正确数字 !"
sleep 2s
checkWireguard
;;
esac
}
function start_menu(){
clear
if [[ $1 == "first" ]] ; then
getLinuxOSRelease
installSoftDownload
fi
showLinuxKernelInfoNoDisplay
if [[ ${configLanguage} == "cn" ]] ; then
green " =================================================="
green " Linux 内核 一键安装脚本 | 2022-1-27 | By jinwyp | 系统支持:centos7+ / debian10+ / ubuntu16.04+"
green " Linux 内核 4.9 以上都支持开启BBR, 如要开启BBR Plus 则需要安装支持BBR Plus的内核 "
red " 在任何生产环境中请谨慎使用此脚本, 升级内核有风险, 请做好备份!在某些VPS会导致无法启动! "
green " =================================================="
if [[ -z ${osKernelBBRStatus} ]]; then
echo -e " 当前系统内核: ${osKernelVersionBackup} (${virtual}) ${Red_font_prefix}未安装 BBR 或 BBR Plus ${Font_color_suffix} 加速内核, 请先安装4.9以上内核 "
else
if [ ${systemBBRRunningStatus} = "no" ]; then
echo -e " 当前系统内核: ${osKernelVersionBackup} (${virtual}) ${Green_font_prefix}已安装 ${osKernelBBRStatus}${Font_color_suffix} 加速内核, ${Red_font_prefix}${systemBBRRunningStatusText}${Font_color_suffix} "
else
echo -e " 当前系统内核: ${osKernelVersionBackup} (${virtual}) ${Green_font_prefix}已安装 ${osKernelBBRStatus}${Font_color_suffix} 加速内核, ${Green_font_prefix}${systemBBRRunningStatusText}${Font_color_suffix} "
fi
fi
echo -e " 当前拥塞控制算法: ${Green_font_prefix}${net_congestion_control}${Font_color_suffix} ECN: ${Green_font_prefix}${systemECNStatusText}${Font_color_suffix} 当前队列算法: ${Green_font_prefix}${net_qdisc}${Font_color_suffix} "
echo
green " 1. 查看当前系统内核版本, 检查是否支持BBR / BBR2 / BBR Plus"
green " 2. 开启 BBR 或 BBR2 加速, 开启 BBR2 需要安装 XanMod 内核"
green " 3. 开启 BBR Plus 加速"
green " 4. 优化 系统网络配置"
red " 5. 删除 系统网络优化配置"
echo
green " 6. 查看 Wireguard 运行状态"
green " 7. 重启 Wireguard "
green " 8. 查看 WARP SOCKS5 代理运行状态"
green " 9. 重启 WARP SOCKS5"
green " 10. 查看 WireGuard 和 WARP SOCKS5 运行状态, 错误日志, 如果WireGuard启动失败 请选该项排查错误"
echo
green " 11. 安装官方 Cloudflare WARP Client 启动SOCKS5代理, 用于解锁 Netflix 限制"
green " 12. 安装 WireGuard 和 Cloudflare WARP 工具 Wgcf ${versionWgcf}, 启动 IPv4或IPv6, 用于避免弹出Google人机验证"
green " 13. 同时安装 官方 Cloudflare WARP Client, WireGuard 和 命令行工具 Wgcf, 不推荐 "
red " 14. 卸载 WireGuard 和 Cloudflare WARP linux client"
green " 15. 切换 WireGuard 对VPS服务器的 IPv6 和 IPv4 的网络支持"
green " 16. 设置 VPS 服务器 IPv4 还是 IPv6 网络优先访问"
green " 21. 测试 VPS 是否支持 Netflix 非自制剧解锁 支持 WARP SOCKS5 测试 强烈推荐使用 "
green " 22. 自动刷新WARP IP 检测支持 Netflix 非自制剧解锁 "
echo
if [[ "${osRelease}" == "centos" ]]; then
green " 31. 安装 最新版本内核 5.16, 通过elrepo源安装"
green " 32. 安装 LTS内核 5.4 LTS, 通过elrepo源安装"
green " 33. 安装 内核 4.14 LTS, 从 altarch网站 下载安装"
green " 34. 安装 内核 4.19 LTS, 从 altarch网站 下载安装"
green " 35. 安装 内核 5.4 LTS, 从 elrepo网站 下载安装"
echo
green " 36. 安装 内核 5.10 LTS, Teddysun 编译 推荐安装此内核"
green " 37. 安装 内核 5.15, Teddysun 编译"
green " 38. 安装 内核 5.16, 下载安装. (安装最新版内核 可能会高于5.16) "
else
if [[ "${osRelease}" == "debian" ]]; then
green " 41. 安装 最新版本LTS内核 5.10 LTS, 通过 Debian 官方源安装"
echo
fi
green " 42. 安装 最新版本内核 5.16, 通过 Ubuntu kernel mainline 安装"
green " 43. 安装 内核 4.19 LTS, 通过 Ubuntu kernel mainline 安装"
green " 44. 安装 内核 5.4 LTS, 通过 Ubuntu kernel mainline 安装"
green " 45. 安装 内核 5.10 LTS, 通过 Ubuntu kernel mainline 安装"
echo
green " 51. 安装 XanMod Kernel 内核 5.10 LTS, 官方源安装 "
green " 52. 安装 XanMod Kernel 内核 5.14, 官方源安装 "
fi
echo
green " 61. 安装 BBR Plus 内核 4.14.129 LTS, cx9208 编译的 dog250 原版, 推荐使用"
green " 62. 安装 BBR Plus 内核 4.9 LTS, UJX6N 编译"
green " 63. 安装 BBR Plus 内核 4.14 LTS, UJX6N 编译"
green " 64. 安装 BBR Plus 内核 4.19 LTS, UJX6N 编译"
green " 65. 安装 BBR Plus 内核 5.4 LTS, UJX6N 编译"
green " 66. 安装 BBR Plus 内核 5.10 LTS, UJX6N 编译"
green " 67. 安装 BBR Plus 内核 5.14, UJX6N 编译"
echo
green " 0. 退出脚本"
else
green " =================================================="
green " Linux kernel install script | 2022-1-27 | By jinwyp | OS support:centos7+ / debian10+ / ubuntu16.04+"
green " Enable bbr require linux kernel higher than 4.9. Enable bbr plus require special bbr plus kernel "
red " Please use this script with caution in production. Backup your data first! Upgrade linux kernel will cause VPS unable to boot sometimes."
green " =================================================="
if [[ -z ${osKernelBBRStatus} ]]; then
echo -e " Current Kernel: ${osKernelVersionBackup} (${virtual}) ${Red_font_prefix}Not install BBR / BBR Plus ${Font_color_suffix} , Please install kernel which is higher than 4.9"
else
if [ ${systemBBRRunningStatus} = "no" ]; then
echo -e " Current Kernel: ${osKernelVersionBackup} (${virtual}) ${Green_font_prefix}installed ${osKernelBBRStatus}${Font_color_suffix} kernel, ${Red_font_prefix}${systemBBRRunningStatusText}${Font_color_suffix} "
else
echo -e " Current Kernel: ${osKernelVersionBackup} (${virtual}) ${Green_font_prefix}installed ${osKernelBBRStatus}${Font_color_suffix} kernel, ${Green_font_prefix}${systemBBRRunningStatusText}${Font_color_suffix} "
fi
fi
echo -e " Congestion Control Algorithm: ${Green_font_prefix}${net_congestion_control}${Font_color_suffix} ECN: ${Green_font_prefix}${systemECNStatusText}${Font_color_suffix} Network Queue Algorithm: ${Green_font_prefix}${net_qdisc}${Font_color_suffix} "
echo
green " 1. Show current linux kernel version, check supoort BBR / BBR2 / BBR Plus or not"
green " 2. enable bbr / bbr2 acceleration, (bbr2 require XanMod kernel)"
green " 3. enable bbr plus acceleration"
green " 4. Optimize system network configuration"
red " 5. Remove system network optimization configuration"
echo
green " 6. Show Wireguard working status"
green " 7. restart Wireguard "
green " 8. Show WARP SOCKS5 proxy working status"
green " 9. restart WARP SOCKS5 proxy"
green " 10. Show WireGuard and WARP SOCKS5 working status, error log, etc."
echo
green " 11. Install official Cloudflare WARP linux client SOCKS5 proxy, in order to unlock Netflix geo restriction "
green " 12. Install WireGuard and Cloudflare WARP tool Wgcf ${versionWgcf}, enable IPv4 or IPv6, avoid Google reCAPTCHA"
green " 13. Install official Cloudflare WARP linux client, WireGuard and WARP toll Wgcf, not recommended "
red " 14. Remove WireGuard 和 Cloudflare WARP linux client"
green " 15. Switch WireGuard using IPv6 or IPv4 for your VPS"
green " 16. Set VPS using IPv4 or IPv6 firstly to access network"
green " 21. Netflix region and non-self produced drama unlock test, support WARP SOCKS5 proxy and IPv6"
green " 22. Auto refresh Cloudflare WARP IP to unlock Netflix non-self produced drama"
echo
if [[ "${osRelease}" == "centos" ]]; then
green " 31. Install latest linux kernel, 5.16, from elrepo yum repository"
green " 32. Install LTS linux kernel, 5.4 LTS, from elrepo yum repository"
green " 33. Install linux kernel 4.14 LTS, download and install from altarch website"
green " 34. Install linux kernel 4.19 LTS, download and install from altarch website"
green " 35. Install linux kernel 5.4 LTS, download and install from elrepo website"
echo
green " 36. Install linux kernel 5.10 LTS, compile by Teddysun. Recommended"
green " 37. Install linux kernel 5.15, compile by Teddysun."
green " 38. Install linux kernel 5.16, download and install (will install latest kernel, maybe higher than 5.16) "
else
if [[ "${osRelease}" == "debian" ]]; then
green " 41. Install latest LTS linux kernel, 5.10 LTS, from Debian repository source"
echo
fi
green " 42. Install latest linux kernel 5.16, download and install from Ubuntu kernel mainline"
green " 43. Install linux kernel 4.19 LTS, download and install from Ubuntu kernel mainline"
green " 44. Install linux kernel 5.4 LTS, download and install from Ubuntu kernel mainline"
green " 45. Install linux kernel 5.10 LTS, download and install from Ubuntu kernel mainline"
echo
green " 51. Install XanMod kernel 5.10 LTS, from XanMod repository source "
green " 52. Install XanMod kernel 5.14, from XanMod repository source "
fi
echo
green " 61. Install BBR Plus kernel 4.14.129 LTS, compile by cx9208 from original dog250 source code. Recommended"
green " 62. Install BBR Plus kernel 4.9 LTS, compile by UJX6N"
green " 63. Install BBR Plus kernel 4.14 LTS, compile by UJX6N"
green " 64. Install BBR Plus kernel 4.19 LTS, compile by UJX6N"
green " 65. Install BBR Plus kernel 5.4 LTS, compile by UJX6N"
green " 66. Install BBR Plus kernel 5.10 LTS, compile by UJX6N"
green " 67. Install BBR Plus kernel 5.14, compile by UJX6N"
echo
green " 0. exit"
fi
echo
read -p "Please input number:" menuNumberInput
case "$menuNumberInput" in
1 )
showLinuxKernelInfo
listInstalledLinuxKernel
;;
2 )
enableBBRSysctlConfig "bbr"
;;
3 )
enableBBRSysctlConfig "bbrplus"
;;
4 )
addOptimizingSystemConfig
;;
5 )
removeOptimizingSystemConfig
sysctl -p
;;
6 )
checkWireguardBootStatus
;;
7 )
restartWireguard
checkWireguardBootStatus
;;
8 )
checkWarpClientStatus
;;
9 )
restartWARP
checkWarpClientStatus
;;
10 )
checkWireguard
;;
11 )
installWARPClient
;;
12 )
installWireguard
;;
13 )
installWireguard
installWARPClient
;;
14 )
removeWireguard
removeWARP
;;
15 )
enableWireguardIPV6OrIPV4 "redo"
;;
16 )
preferIPV4 "redo"
;;
21 )
vps_netflix_jin
;;
22 )
vps_netflix_jin_auto
;;
31 )
linuxKernelToInstallVersion="5.16"
isInstallFromRepo="yes"
installKernel
;;
32 )
linuxKernelToInstallVersion="5.4"
isInstallFromRepo="yes"
installKernel
;;
33 )
linuxKernelToInstallVersion="4.14"
installKernel
;;
34 )
linuxKernelToInstallVersion="4.19"
installKernel
;;
35 )
linuxKernelToInstallVersion="5.4"
installKernel
;;
36 )
linuxKernelToInstallVersion="5.10"
installKernel
;;
37 )
linuxKernelToInstallVersion="5.15"
installKernel
;;
38 )
linuxKernelToInstallVersion="5.16"
installKernel
;;
41 )
linuxKernelToInstallVersion="5.10"
isInstallFromRepo="yes"
installKernel
;;
42 )
linuxKernelToInstallVersion="5.16.14"
installKernel
;;
43 )
linuxKernelToInstallVersion="4.19"
installKernel
;;
44 )
linuxKernelToInstallVersion="5.4"
installKernel
;;
45 )
linuxKernelToInstallVersion="5.10.105"
installKernel
;;
51 )
linuxKernelToInstallVersion="5.10"
linuxKernelToBBRType="xanmod"
isInstallFromRepo="yes"
installKernel
;;
52 )
linuxKernelToInstallVersion="5.14"
linuxKernelToBBRType="xanmod"
isInstallFromRepo="yes"
installKernel
;;
61 )
linuxKernelToInstallVersion="4.14.129"
linuxKernelToBBRType="bbrplus"
installKernel
;;
62 )
linuxKernelToInstallVersion="4.9"
linuxKernelToBBRType="bbrplus"
installKernel
;;
63 )
linuxKernelToInstallVersion="4.14"
linuxKernelToBBRType="bbrplus"
installKernel
;;
64 )
linuxKernelToInstallVersion="4.19"
linuxKernelToBBRType="bbrplus"
installKernel
;;
65 )
linuxKernelToInstallVersion="5.4"
linuxKernelToBBRType="bbrplus"
installKernel
;;
66 )
linuxKernelToInstallVersion="5.10"
linuxKernelToBBRType="bbrplus"
installKernel
;;
67 )
linuxKernelToInstallVersion="5.14"
linuxKernelToBBRType="bbrplus"
installKernel
;;
88 )
getLatestUbuntuKernelVersion
;;
0 )
exit 1
;;
* )
clear
red "请输入正确数字 !"
sleep 2s
start_menu
;;
esac
}
function setLanguage(){
echo
green " =================================================="
green " Please choose your language"
green " 1. 中文"
green " 2. English"
echo
read -p "Please input your language:" languageInput
case "${languageInput}" in
1 )
echo "cn" > ${configLanguageFilePath}
showMenu
;;
2 )
echo "en" > ${configLanguageFilePath}
showMenu
;;
* )
red " Please input the correct number !"
setLanguage
;;
esac
}
configLanguageFilePath="${HOME}/language_setting_v2ray_trojan.md"
configLanguage="cn"
function showMenu(){
if [ -f "${configLanguageFilePath}" ]; then
configLanguage=$(cat ${configLanguageFilePath})
case "${configLanguage}" in
cn )
start_menu "first"
;;
en )
start_menu "first"
;;
* )
setLanguage
;;
esac
else
setLanguage
fi
}
showMenu
|
#!/bin/bash
#
# Copyright 2017-2020 O2 Czech Republic, a.s.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
while /bin/true; do
sleep 20;
done
|
<filename>src/ml/association/HashTree.java
package ml.association;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
public class HashTree<E> {
static final int DEFAULT_NUM_BRANCHES = 3;
static class Node<E> {
List<ItemSet<E>> bucket;
HashMap<Integer, Node<E>> children;
public Node() {
children = new HashMap<Integer, Node<E>>();
}
public void addToBucket(ItemSet<E> elem) {
if (bucket == null) {
bucket = new ArrayList<ItemSet<E>>();
}
bucket.add(elem);
}
}
Node<E> root;
/**
* The number of branches for an internal node. Default is 3
*/
int numBranches;
/**
* The size of an itemset stored in this tree. This implies that all itemsets should contain equal
* number of items.
*/
int itemsetSize;
/**
* Number of elements in the tree.
*/
private int size;
public HashTree(int itemsetSize) {
initialize(itemsetSize, DEFAULT_NUM_BRANCHES);
}
public HashTree(int itemsetSize, int numBranches) {
initialize(itemsetSize, numBranches);
}
private void initialize(int itemsetSize, int numBranches) {
this.itemsetSize = itemsetSize;
this.numBranches = numBranches;
}
public void insert(ItemSet<E> itemset) {
root = insert(root, itemset, 0);
}
private Node<E> insert(Node<E> curr, ItemSet<E> itemset, int depth) {
if (depth == itemsetSize) { // case 1: Leaf node
// case 1.1: null curr then create node
if (curr == null) {
curr = new Node<E>();
}
// case 1.2: curr exists add to bucket
curr.addToBucket(itemset);
size++;
} else { // case 2: non leaf node
// case 1.1: null curr create node
if (curr == null) {
curr = new Node<E>();
}
// case 1.2: curr exists
int key = computeKey(itemset, depth);
Node<E> child = curr.children.get(key);
child = insert(child, itemset, depth+1);
curr.children.put(key, child);
}
return curr;
}
public int computeKey(ItemSet<E> itemset, int depth) {
return itemset.getItemAt(depth).hashCode() % HashTree.DEFAULT_NUM_BRANCHES;
}
public static <T> HashTree<T> makeTree(List<ItemSet<T>> collection, int itemsetSize) {
HashTree<T> tree = new HashTree<T>(itemsetSize);
for (ItemSet<T> itemset : collection) {
tree.insert(itemset);
}
return tree;
}
public int size() {
return size;
}
public ItemSet<E> get(ItemSet<E> itemset) {
return get(root, itemset, 0);
}
private ItemSet<E> get(Node<E> curr, ItemSet<E> itemset, int depth) {
if (curr == null) {
return null;
}
if (depth != itemsetSize) {
int key = computeKey(itemset, depth);
return get(curr.children.get(key), itemset, depth + 1);
}
int index = curr.bucket.indexOf(itemset);
if (index == -1) {
return null;
}
return curr.bucket.get(index);
}
}
|
/*
* Description: Tools for creating the databases
* License: Apache-2.0
* Copyright: x-file.xyz
*/
package tools;
/**
* Date: 2021-06-16
* Place: Zwingenberg, Germany
* @author brito
*/
public class DatabaseTools {
}
|
from fontTools.pens.basePen import BasePen
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables._g_l_y_f import Glyph
from string import ascii_letters
class TTGlyphPen(BasePen):
def __init__(self, glyphSet):
super(TTGlyphPen, self).__init__(glyphSet)
self.points = []
def _moveTo(self, pt):
self.points.append(('moveTo', pt))
def _lineTo(self, pt):
self.points.append(('lineTo', pt))
def _closePath(self):
self.points.append(('closePath',))
def glyph(self):
glyph = Glyph()
for action in self.points:
if action[0] == 'moveTo':
glyph.moveTo(*action[1])
elif action[0] == 'lineTo':
glyph.lineTo(*action[1])
elif action[0] == 'closePath':
glyph.endPath()
return glyph
def build_font_glyphs(glyph_order):
pen = TTGlyphPen(glyphSet=None)
pen.moveTo((0, 0))
pen.lineTo((0, 500))
pen.lineTo((500, 500))
pen.lineTo((500, 0))
pen.closePath()
glyph = pen.glyph()
glyphs = {g: glyph for g in glyph_order}
return glyphs |
#!/usr/bin/env bash
#
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
# If gcloud is not available make it a no-op, not an error.
which gcloud &> /dev/null || gcloud() { echo "[ignore-gcloud $*]" 1>&2; }
function upload_test_images() {
echo ">> Publishing test images"
local image_dirs="$(find $(dirname $0)/test_images -mindepth 1 -maxdepth 1 -type d)"
local docker_tag=$1
local registry=${KO_DOCKER_REPO%/*}
for image_dir in ${image_dirs}; do
local image_name="$(basename ${image_dir})"
local image="knative.dev/eventing/test/test_images/${image_name}"
ko publish -B ${image}
if [ -n "$docker_tag" ]; then
if [ "$registry" = "docker.io" ]; then
ko publish -B -t ${docker_tag} ${image}
else
image=$KO_DOCKER_REPO/${image_name}
local digest=$(gcloud container images list-tags --filter="tags:latest" --format='get(digest)' ${image})
echo "Tagging ${image}@${digest} with $docker_tag"
gcloud -q container images add-tag ${image}@${digest} ${image}:$docker_tag
fi
fi
done
}
: ${KO_DOCKER_REPO:?"You must set 'KO_DOCKER_REPO', see DEVELOPMENT.md"}
upload_test_images $@
|
#!/bin/bash
curl -sc /tmp/cookie "https://drive.google.com/uc?export=download&id=1Pad37X7GjZF30DqSlESqQoh_mW-JWSy0" > /dev/null
CODE="$(awk '/_warning_/ {print $NF}' /tmp/cookie)"
curl -Lb /tmp/cookie "https://drive.google.com/uc?export=download&confirm=${CODE}&id=1Pad37X7GjZF30DqSlESqQoh_mW-JWSy0" -o resources.tar.gz
tar -zxvf resources.tar.gz
rm resources.tar.gz
echo Download finished.
|
<reponame>Juanitoclement/FE-SmartHome<gh_stars>1-10
import axios from "axios/index";
import { AC_ON, AC_OFF, GET_AC, GET_AC_STATUS, SET_TIMER, SET_TEMPERATURE } from "./actionType";
const apiUrl = "http://api.myhomie.me:8000/homie/device/AC/";
const httpOptions = {
headers: {
"Content-type": "application/form-data",
mandatory: localStorage.getItem("token")
},
params: {
accessToken: localStorage.getItem("token")
}
};
function turnOnAc(deviceID) {
return {
type: AC_ON,
acOnPayload: new Promise(resolve => {
axios
.get(apiUrl + "turn-on-ac", {
headers: {
"Content-type": "application/form-data",
mandatory: localStorage.getItem("token")
},
params: {
deviceID: deviceID,
accessToken: localStorage.getItem("token")
}
})
.then(response => resolve(response.data));
})
};
}
function turnOffAc(deviceID) {
return {
type: AC_OFF,
acOffPayload: new Promise(resolve => {
axios
.get(apiUrl + "turn-off-ac", {
headers: {
"Content-type": "application/form-data",
mandatory: localStorage.getItem("token")
},
params: {
deviceID: deviceID,
accessToken: localStorage.getItem("token")
}
})
.then(response => resolve(response.data));
})
};
}
function getAc() {
return {
type: GET_AC,
getAcPayload: new Promise(resolve => {
axios.get(apiUrl + "get-all-users-ac", httpOptions).then(response => {
console.log(response);
return resolve(response);
});
})
};
}
function getAcStatus(id) {
return {
type: GET_AC_STATUS,
getAcStatus: new Promise(resolve => {
axios
.get(apiUrl + "get-ac-by-device-id", {
headers: {
"Content-type": "application/form-data",
mandatory: localStorage.getItem("token")
},
params: {
deviceID: id,
accessToken: localStorage.getItem("token")
}
})
.then(response => {
console.log(response);
return resolve(response);
});
})
};
}
function setTimer(id, start, end) {
return {
type: SET_TIMER,
setACTime: new Promise(resolve => {
axios
.get(apiUrl + "set-timer-ac", {
headers: {
"Content-type": "application/form-data",
mandatory: localStorage.getItem("token")
},
params: {
deviceID: id,
StringStart: start,
StringEnd: end,
accessToken: localStorage.getItem("token")
}
})
.then(response => {
console.log(response);
return resolve(response);
});
})
};
}
function setAcTemperature(id, temperature) {
return {
type: SET_TEMPERATURE,
setTemperaturePayload: new Promise(resolve => {
axios
.get(apiUrl + "set-temperature", {
headers: {
"Content-type": "application/form-data",
mandatory: localStorage.getItem("token")
},
params: {
accessToken: localStorage.getItem("token"),
deviceID: id,
temperature: temperature
}
})
.then(response => {
console.log(response);
return resolve(response);
});
})
};
}
export {
turnOnAc,
turnOffAc,
getAc,
getAcStatus,
setTimer,
setAcTemperature
// newTodoFailure,
// newTodoSuccess
};
|
#!/bin/bash
#------------------------------------------------------------------
# setup
#------------------------------------------------------------------
set -e
scriptdir=$(cd $(dirname $0) && pwd)
source ${scriptdir}/common.bash
header C#
#------------------------------------------------------------------
# Run the test
setup
cdk init -l csharp -t app
dotnet build \
--source https://api.nuget.org/v3/index.json \
src
cdk synth
|
<reponame>suhuanzheng7784877/ParallelExecute
package org.para.distributed.master;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.para.distributed.dto.WorkerNode;
import org.para.distributed.util.SortStrategy;
/**
* worker资源机器的管理者
*
* @author liuyan
* @Email:<EMAIL>
* @version 0.1
* @Date: 2013-11-16 下午2:48:08
* @Copyright: 2013 story All rights reserved.
*
*/
public class WorkerManagers {
/**
* 暂存节点信息
*/
private final static Set<WorkerNode> workerNodes = new CopyOnWriteArraySet<WorkerNode>();
public static Set<WorkerNode> getWorkernodes() {
return workerNodes;
}
/**
* 选出最靠前的几个节点
*
* @param topN
* ,选择几个节点
* @return
*/
public static List<WorkerNode> selectTopFreeWorkerNode(int parallelNum) {
//底层是数组拷贝,所以之后新加入的结点,不在候选范围中
List<WorkerNode> list = new ArrayList<WorkerNode>(workerNodes);
int listSize = list.size();
if (0 == listSize) {
return null;
}
// 所有可用节点的个数
int allWorkerNodesSize = listSize;
if (parallelNum <= 0) {
// 非法参数,设为1
parallelNum = 1;
} else if (parallelNum >= listSize) {
// 非法参数,设为所有可用节点的个数
parallelNum = allWorkerNodesSize;
}
// 重新执行负载均衡,重新对资源池进行排序
reSortBlance(list);
return list.subList(0, parallelNum);
}
/**
* 增加节点资源
*
* @param workerNode
*/
public static void addOrReplaceWorkerNode(WorkerNode workerNode) {
if (workerNodes.contains(workerNode)) {
removeWorkerNode(workerNode);
}
workerNodes.add(workerNode);
}
/**
* 重新执行负载均衡,重新对资源池进行排序
*
* @param list
* :候选资源的资源池集合
*/
public static void reSortBlance(List<WorkerNode> list) {
// 开始资源排序,对候选节点资源进行排序
SortStrategy.sortCandidateList(list);
}
/**
* 删除节点资源
*
* @param workerNode
*/
public static void removeWorkerNode(WorkerNode workerNode) {
workerNodes.remove(workerNode);
}
/**
* 删除所有节点资源信息
*
* @param workerNode
*/
public static void clearWorkerNode() {
workerNodes.clear();
}
/**
* 获取节点资源的个数
*
* @return
*/
public static int getWorkerNodeSize() {
return workerNodes.size();
}
/**
* 是否有足够的资源去执行分布式任务
*
* @return
*/
public static boolean isHaveWorkerNodeToExecuteDistributed() {
if (getWorkerNodeSize() > 0) {
return true;
}
return false;
}
}
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.sps;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Iterator;
public final class FindMeetingQuery {
public List<TimeRange> findRange( ArrayList timeRangesArray, MeetingRequest request, Collection<Event> event){
int duration = (int)request.getDuration();
List<TimeRange> ranges = new ArrayList<TimeRange>();
int timeRange = 0;
int timePlus = 0;
for(int i = 0; i < timeRangesArray.size(); i++) {
timeRange = (int)timeRangesArray.get(i);
// if there are duplicates remove those instances
if (i == 0) {
// skip if curr time = Start of Day (0)
if (timeRange == TimeRange.START_OF_DAY) {
continue;
}
ranges.add(TimeRange.fromStartEnd(TimeRange.START_OF_DAY, timeRange, false));
}
else if (i == (timeRangesArray.size() - 1)) {
// skip if curr time = End of Day (1440)
if (timeRange == (TimeRange.END_OF_DAY + 1)) {
break;
}
ranges.add(TimeRange.fromStartEnd(timeRange, TimeRange.END_OF_DAY, true));
}
else {
timePlus = (int)timeRangesArray.get(++i);
// checks for duplicate times if we have duplicates just skip
if (timeRange == timePlus) {
continue;
}
ranges.add(TimeRange.fromStartEnd(timeRange, timePlus, false));
}
}
for(int i = 0; i < ranges.size(); i++) {
// checks to see if there if enough room in the day for each meeting if not remove it
if (ranges.get(i).duration() < request.getDuration()) {
ranges.remove(i);
}
}
return ranges;
}
public Collection<TimeRange> query(Collection<Event> events, MeetingRequest request) {
if (request.getDuration() > TimeRange.WHOLE_DAY.duration()) {
return Arrays.asList();
}
if (events.isEmpty()) {
return Arrays.asList(TimeRange.WHOLE_DAY);
}
int eventCount = 0;
List<TimeRange> avaiablity = new ArrayList<TimeRange>();
ArrayList<Integer> startEndTimes = new ArrayList<Integer>();
int start = 0;
int end = 0;
int startTimePrev = 0;
int endTimePrev = 0;
int index = 0;
for (Event event : events) {
start= event.getWhen().start();
end = event.getWhen().end();
if (event.getAttendees().stream().anyMatch(request.getAttendees()::contains)) {
if (event.getWhen().start() < endTimePrev && event.getWhen().start() > startTimePrev) {
if (event.getWhen().end() > endTimePrev) {
if (startEndTimes.contains(endTimePrev)) {
startEndTimes.set(index, end);
}
}
else if (endTimePrev > event.getWhen().end()) {
index += 1;
startTimePrev = start;
endTimePrev = end;
continue;
}
}
else {
startEndTimes.add(start);
startEndTimes.add(end);
}
}
// same functionally for optional attendees
if (event.getAttendees().stream().anyMatch(request.getOptionalAttendees()::contains)) {
if (event.getWhen().equals(TimeRange.WHOLE_DAY)) {
// if optional user has all day event just skip them
index += 1;
startTimePrev = start;
endTimePrev = end;
continue;
}
if (event.getWhen().start() < endTimePrev && event.getWhen().start() > startTimePrev) {
if (event.getWhen().end() > endTimePrev) {
// if passes both test there is definity an overlap then push starttime for 1 and end time for 2 to get correct meeting times
if (startEndTimes.contains(endTimePrev)) {
startEndTimes.set(index, end);
}
}
else if (endTimePrev > event.getWhen().end()) {
// nested overlap do nothing update Prevs and index
index += 1;
startTimePrev = start;
endTimePrev = end;
continue;
}
}
else {
// no weird overlap or nesting case happened so add as normal
startEndTimes.add(start);
startEndTimes.add(end);
}
}
index += 1;
startTimePrev = start;
endTimePrev = end;
}
Collections.sort(startEndTimes);
if (startEndTimes.isEmpty()) {
return Arrays.asList(TimeRange.WHOLE_DAY);
}
avaiablity = findRange(startEndTimes, request, events);
return avaiablity;
}
}
|
package com.drugbox.Bean.CommentInfo;
import com.drugbox.Bean.IBeanOperation;
/**
* Created by 44247 on 2016/2/22 0022.
*/
public class CommentZanIBean extends IBeanOperation {
private int commentId;
public int getCommentId() {
return commentId;
}
public void setCommentId(int commentId) {
this.commentId = commentId;
}
}
|
<filename>build/esm/shaders/glsl/point.alpha.generic.js
export default /* glsl */ `varying float vPixelSize;
float getGenericAlpha(float mask) {
return vPixelSize * 2.0 * (1.0 - mask);
}
`;
|
<gh_stars>0
import { IsString } from "class-validator";
export class ReactionContentDto {
@IsString()
type: string;
@IsString()
username: string;
@IsString()
id_content: string;
} |
# 스크립트에서 권한을 지정해서 파일을 작성하고 싶을 때
umask 077
# echo 명령어 출력을 권한 600인 임시 파일로 작성
echo "ID: abcd123456" > idinfo.tmp
# 파일 권한 값은 sh로 만들어지면 666
# 디렉터리는 777
# umask 가 1인 곳을 0으로 바꾼다
# 666 --> 1 1 0 1 1 0 1 1 0
# 022 --> 0 0 0 0 1 0 0 1 0
# 결과 --> 1 1 0 1 0 0 1 0 0
# 즉 077 하게 된 결과는 600이 된다.
|
#!/bin/sh
echo "Balandroid v2.0 By Kevin N. Omyonga"
SCRIPT_PATH="./scripts/setup.sh"
read -r -p "Begin the setup?[y/N] " choice
case "$choice" in
[yY][eE][sS]|[yY])
source "$SCRIPT_PATH"
;;
*)
echo "Program Terminated"
;;
esac
|
<filename>zillowAPI/ZillowError.py
class NetworkRequestFail(Exception):
pass
class ZillowRequestError(Exception):
"""
the error code and information returned by zillow api
see: https://www.zillow.com/howto/api/GetZestimate.htm
"""
def __init__(self,code,message):
"""
:param code: should be a int, 0 means successful request
:param message: detail description from zillow
"""
self.code = code
self.message = message
def __str__(self):
return "Zillow Request Error: {}, {}".format(self.code,self.message) |
#! /bin/bash
#export GSOCKET_IP=127.0.0.1
#export GSOCKET_PORT=31337
# Simulate bad network
# https://medium.com/@docler/network-issues-simulation-how-to-test-against-bad-network-conditions-b28f651d8a96
# DEV=wlan0
# tc qdisc add dev ${DEV} root netem loss 1%
# tc qdisc change dev ${DEV} root netem corrupt 2%
# tc qdisc change dev ${DEV} root netem duplicate 1%
# tc qdisc change dev ${DEV} root netem delay 50ms reorder 25%
# # delete all after use:
# tc qdisc del dev ${DEV} root
# depend on: md5sum, bc, rsync, netstat, netcat, dd, ssh, sshd
# Debian packaging: Force CWD to ./tests/
BASEDIR="$(cd "$(dirname "${0}")" || exit; pwd)"
cd "$BASEDIR"
# Sleep for connection time (CT). On localhost this can be 0.1
SLEEP_CT=0.5
if [ x"$GSOCKET_IP" == "x127.0.0.1" ]; then
SLEEP_CT=0.1
fi
if [[ -z $GS_PREFIX ]]; then
GS_PREFIX="$(cd ${BASEDIR}/../tools || exit; pwd)"
GS_BINDIR="$GS_PREFIX"
else
GS_BINDIR="${GS_PREFIX}/bin/"
fi
PATH=${GS_BINDIR}:/usr/local/bin:$PATH
# printf "#! /bin/bash\nexec nc\n" >gs_nc
SLEEP_WD=20 # Max seconds to wait for a process to finish receiving...
command -v md5 >/dev/null 2>&1 && MD5(){ md5 -q "${1}";}
command -v md5sum >/dev/null 2>&1 && MD5() { md5sum "${1}" | cut -f1 -d' ';}
command -v bc >/dev/null 2>&1 || { echo >&2 "bc not installed. apt-get install bc."; exit 255; }
command -v rsync >/dev/null 2>&1 || { echo >&2 "rsync not installed. apt-get install rsync."; exit 255; }
command -v netstat >/dev/null 2>&1 || { echo >&2 "netstat not installed. apt-get install net-tools."; exit 255; }
# Use traditional netcat that supports "netcat -nlp" for cross-platform comp.
# on CentOS there is only nmap's netcat as 'nc' but we are expecting 'netcat()'.
if [[ "$(nc --version 2>&1)" =~ Ncat ]]; then
NC=nc
NC_EOF_ARG=" " #not empty
NC_LISTEN_ARG="-nlp"
else
# Try traditional netcat first.
if command -v netcat >/dev/null 2>&1; then
NC=netcat
else
if command -v nc >/dev/null 2>&1; then
NC=nc #cygwin
else
echo >&2 "netcat not installed. apt-get install netcat."; exit 255;
fi
fi
fi
# Different OSes use different netcats:
if [[ -z "$NC_EOF_ARG" ]]; then
# HERE: Not Ncat
if [[ $($NC --help 2>&1) =~ "close connection on EOF" ]]; then
NC_EOF_ARG="-c"
elif [[ $($NC --help 2>&1) =~ "w timeout" ]]; then
NC_EOF_ARG="-w1"
else
NC_EOF_ARG="-q1"
fi
fi
if [[ -z "$NC_LISTEN_ARG" ]]; then
if [[ $($NC --help 2>&1) =~ "source_port" ]]; then
# apple default : usage: nc [-46AacCDdEFhklMnOortUuvz] [-K tc] [-b boundif] [-i interval] [-p source_port]
# fbsd default : [-P proxy_username] [-p source_port] [-s source] [-T ToS]
# cygwin default: [-P proxy_username] [-p source_port] [-s source] [-T ToS]
NC_LISTEN_ARG="-nl"
else
NC_LISTEN_ARG="-nlp"
fi
fi
export NC
export NC_EOF_ARG
export NC_LISTEN_ARG
export NC_LISTEN="${NC} ${NC_LISTEN_ARG}"
export NC_LISTEN_EOF="${NC} ${NC_LISTEN_ARG} ${NC_EOF_ARG}"
sleep 0.1 &>/dev/null || { echo >&2 "sleep not accepting 0.1. PATH set correct?"; exit 255; }
OK="....[\033[1;32mOK\033[0m]"
FAIL="[\033[1;31mFAILED\033[0m]"
SKIP="[\033[1;33mskipping\033[0m]"
ECHO="echo -e"
NETSTATTCP(){ netstat -ant;}
[[ x"$OSTYPE" == "xsolaris"* ]] && NETSTATTCP(){ netstat -an -f inet; }
[[ x"$OSTYPE" == *BSD* ]] && NETSTATTCP(){ netstat -an -f inet; }
tests="1.1 "
tests+="2.1 2.2 "
tests+="3.1 "
tests+="4.1 4.2 "
tests+="5.1 5.2 5.3 5.4 "
#tests+="5.5 " # cleartext
tests+="6.1 6.2 6.3 6.4 6.5 6.6 " # gs-netcat
#tests+="6.7 " # cleartext
tests+="6.8 " # TOR
tests+="7.1 7.2 7.3 7.4 "
tests+="8.1 8.2 8.3 "
tests+="9.1 9.2 9.3 9.4 "
tests+="10.1 10.2 10.3 " # blitz, gs-sftp
tests+="10.4 " # gs-mount
tests+="10.5 " # gsocket nc
tests+="10.6 " # gsocket socat
tests+="10.7 " # gsocket ssh
if [ x"$1" != x ]; then
tests="$@ "
fi
mk_dummy()
{
[ -f "$1" ] || dd bs=1024 count=$2 if=/dev/urandom of="$1" 2>/dev/null
}
mk_dummy test1k.dat 1
mk_dummy test4k.dat 4
mk_dummy test50k.dat 50
mk_dummy test1M.dat 1024
mk_dummy test50Mx.dat 51200
if [[ -n "$QUICK" ]]; then
cp test50k.dat test50M.dat
else
cp test50Mx.dat test50M.dat
fi
echo "Fubar" >>test50M.dat # Make it an odd length
MD50MB="$(MD5 test50M.dat)"
MD1MB="$(MD5 test1M.dat)"
MDHELLOW="$(echo "Hello World" | MD5 /dev/stdin)"
test_start()
{
rm -f client_out.dat server_out.dat server_err.txt client_err.txt server[123]_out.dat client[12]_out.dat server[123]_err.txt client[12]_err.txt nc[123]_out.dat nc[123]_err.txt
[[ x"$1" != x ]] && $ECHO $*
[[ -s id_sec.txt ]] || new_id
}
fail()
{
$ECHO "${FAIL}"-$*
exit 255
}
skip()
{
$ECHO "${SKIP}" $*
}
# code file1 file2
md5fail()
{
[[ "$(MD5 ${2})" != "$(MD5 ${3})" ]] && fail $1;
}
# Wait until a process has termianted or kill it after SLEEP_WD seconds..
waitkp()
{
x=0;
rounds=`bc <<<"$SLEEP_WD / 0.1"`
while :; do
kill -0 $1 &>/dev/null
if [ $? -ne 0 ]; then
# Break if process is not running.
return
fi
sleep 0.1
x=$(($x + 1))
if [ $x -gt $rounds ]; then
break;
fi
done
echo "Killing hanging process...."
kill -9 $1 &>/dev/null
exit 255
}
waitk()
{
for p in $@; do
waitkp $p
done
}
# Wait for 2 files to become identical...
waitf()
{
x=0;
rounds=`bc <<<"$SLEEP_WD / 0.1"`
while :; do
if [ "$(MD5 $1)" == "$(MD5 $2)" ]; then
return
fi
sleep 0.1
x=$(($x + 1))
if [ $x -gt $rounds ]; then
break;
fi
done
echo "Oops. files not identical...."
}
# Wait for file to match has
waitfhash()
{
x=0;
rounds=`bc <<<"$SLEEP_WD / 0.1"`
while :; do
if [ "$(MD5 $1)" == "$2" ]; then
return
fi
sleep 0.1
x=$(($x + 1))
if [ $x -gt $rounds ]; then
break;
fi
done
echo "Oops. files not identical...."
}
waittcp()
{
x=0;
rounds=`bc <<<"$SLEEP_WD / 0.1"`
while :; do
NETSTATTCP 2>/dev/null | grep LISTEN | grep "$1" &>/dev/null
if [ $? -eq 0 ]; then
return
fi
sleep 0.1
x=$(($x + 1))
if [ $x -gt $rounds ]; then
break;
fi
done
echo "Oops. TCP $1 not listening...."
}
sleep_ct()
{
sleep $SLEEP_CT
}
new_id()
{
# Create a random secret for all tests
../tools/gs-helloworld -g 2>/dev/null >id_sec.txt
}
# killall -9 gs-helloworld gs-pipe gs-full-pipe gs-netcat &>/dev/null
# [[ -f id_sec.txt ]] || new_id
if [[ "$tests" =~ '1.1 ' ]]; then
### 1 - Hello World
test_start -n "Running: Hello World #1.1 ................................"
GSPID="$(sh -c '../tools/gs-helloworld -k id_sec.txt -l 2>server_err.txt >server_out.dat & echo ${!}')"
# sleep 0.5 required or otherwise kernel will send both strings in single
# tcp and that would result in a single read() call on other side.
sleep_ct && (echo "Hello World"; sleep 1; echo "That's the end") | ../tools/gs-helloworld -w -k id_sec.txt 2>client_err.txt >client_out.dat
waitk $GSPID
if [ "$(MD5 client_out.dat)" != "628eca04c4cb6c8f539381be1c5cd325" ]; then fail 1; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '2.1 ' ]]; then
### 2 - Pipe
# Normal (server listening, client connecting)
test_start -n "Running: pipe #2.1 ......................................."
GSPID="$(sh -c '../tools/gs-pipe -k id_sec.txt -l 2>server_err.txt >server_out.dat & echo ${!}')"
sleep_ct && ../tools/gs-pipe -k id_sec.txt <test50k.dat 2>client_err.txt >client_out.dat
waitk $GSPID
if [ "$(MD5 test50k.dat)" != "$(MD5 server_out.dat)" ]; then fail 1; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '2.2 ' ]]; then
### Waiting client test
test_start -n "Running: pipe #2.2 (waiting for server)..................."
GSPID="$(sh -c '../tools/gs-pipe -k id_sec.txt -w <test50k.dat 2>client_err.txt >client_out.dat & echo ${!}')"
sleep_ct && ../tools/gs-pipe -k id_sec.txt -l 2>server_err.txt >server_out.dat
waitk $GSPID
if [ "$(MD5 test50k.dat)" != "$(MD5 server_out.dat)" ]; then fail 1; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '3.1 ' ]]; then
### Impersonate 'listen'
test_start -n "Running: pipe #3.1 (auth token)..........................."
GS1PID="$(sh -c '../tools/gs-pipe -k id_sec.txt -l -a player-alice 2>server1_err.txt >server1_out.dat & echo ${!}')"
GS2PID="$(sh -c '../tools/gs-pipe -k id_sec.txt -l -a player-alice 2>server2_err.txt >server2_out.dat & echo ${!}')"
# Next server should not be allowed to listen (wrong -a key)
sleep_ct
../tools/gs-pipe -k id_sec.txt -l -a player-mallory 2>server3_err.txt >server3_out.dat
RET=$?
if [ $RET -ne 255 ]; then fail 1; fi
# Here: Two servers are still running...
../tools/gs-pipe -k id_sec.txt <test50k.dat 2>client_err.txt >client_out.dat
../tools/gs-pipe -k id_sec.txt <test50k.dat 2>client_err.txt >client_out.dat
waitk $GS1PID $GS2PID &>/dev/null
if [ "$(MD5 test50k.dat)" != "$(MD5 server1_out.dat)" ]; then fail 2; fi
if [ "$(MD5 test50k.dat)" != "$(MD5 server2_out.dat)" ]; then fail 3; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '4.1' ]]; then
### Client to become a server if no server is listening
test_start -n "Running: pipe #4.1 (become server if possible)............"
GSPID="$(sh -c '../tools/gs-pipe -k id_sec.txt -A <test1k.dat 2>server_err.txt >server_out.dat & echo ${!}')"
sleep_ct
../tools/gs-pipe -k id_sec.txt -A <test50k.dat 2>client_err.txt >client_out.dat
waitk $GSPID
FC=0
[[ "$(MD5 test50k.dat)" != "$(MD5 server_out.dat)" ]] && FC=$((FX+1))
[[ "$(MD5 test1k.dat)" != "$(MD5 client_out.dat)" ]] && FC=$((FX+1))
[[ "$FC" != 1 ]] && fail 1
$ECHO "${OK}"
fi
if [[ "$tests" =~ '4.2' ]]; then
# Client already waiting. 2nd client to become server (if no server available)
test_start -n "Running: pipe #4.2 (..while client waiting)..............."
GSPID="$(sh -c '../tools/gs-pipe -k id_sec.txt -w <test50k.dat 2>client_err.txt >client_out.dat & echo ${!}')"
sleep_ct
../tools/gs-pipe -k id_sec.txt -A 2>server_err.txt >server_out.dat
waitk $GSPID
if [ "$(MD5 test50k.dat)" != "$(MD5 server_out.dat)" ]; then fail 1; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '5.1' ]]; then
test_start -n "Running: full-pipe #5.1..................................."
GSPID="$(sh -c '../tools/gs-full-pipe -k id_sec.txt -A <test50k.dat 2>client_err.txt >client_out.dat & echo ${!}')"
sleep_ct
../tools/gs-full-pipe -k id_sec.txt -A <test50k.dat 2>server_err.txt >server_out.dat
waitk $GSPID
if [ "$(MD5 test50k.dat)" != "$(MD5 server_out.dat)" ]; then fail 1; fi
if [ "$(MD5 test50k.dat)" != "$(MD5 client_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '5.2' ]]; then
test_start -n "Running: full-pipe #5.2 (50MB)............................"
GSPID="$(sh -c '../tools/gs-full-pipe -k id_sec.txt -A <test50M.dat 2>client_err.txt >client_out.dat & echo ${!}')"
sleep_ct
../tools/gs-full-pipe -k id_sec.txt -A <test50M.dat 2>server_err.txt >server_out.dat
waitk $GSPID
if [ "$MD50MB" != "$(MD5 server_out.dat)" ]; then fail 1; fi
if [ "$MD50MB" != "$(MD5 client_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '5.3' ]]; then
test_start -n "Running: full-pipe #5.3 (assym sizes, large server)......."
GSPID="$(sh -c '../tools/gs-full-pipe -k id_sec.txt -A <test1M.dat 2>server_err.txt >server_out.dat & echo ${!}')"
sleep_ct
sleep 1
../tools/gs-full-pipe -A -k id_sec.txt <test50k.dat 2>client_err.txt >client_out.dat
waitk $GSPID &>/dev/null
# if [ "$MD50MB" != "$(MD5 server_out.dat)" ]; then fail 1; fi
if [ "$(MD5 test1M.dat)" != "$(MD5 client_out.dat)" ]; then fail 1; fi
if [ "$(MD5 test50k.dat)" != "$(MD5 server_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '5.4' ]]; then
test_start -n "Running: full-pipe #5.4 (assym sizes, small server)......."
GSPID="$(sh -c '../tools/gs-full-pipe -k id_sec.txt -A <test50k.dat 2>server_err.txt >server_out.dat & echo ${!}')"
sleep_ct
sleep 1
../tools/gs-full-pipe -A -k id_sec.txt <test1M.dat 2>client_err.txt >client_out.dat
waitk $GSPID &>/dev/null
# if [ "$MD50MB" != "$(MD5 server_out.dat)" ]; then fail 1; fi
if [ "$(MD5 test1M.dat)" != "$(MD5 server_out.dat)" ]; then fail 1; fi
if [ "$(MD5 test50k.dat)" != "$(MD5 client_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '5.5' ]]; then
test_start -n "Running: full-pipe #5.5 (assymetric sizes, clear)........."
GSPID="$(sh -c '../tools/gs-full-pipe -k id_sec.txt -AC <test1M.dat 2>server_err.txt >server_out.dat & echo ${!}')"
sleep_ct
../tools/gs-full-pipe -k id_sec.txt -AC <test50k.dat 2>client_err.txt >client_out.dat
waitk $GSPID
if [ "$MD1MB" != "$(MD5 client_out.dat)" ]; then fail 1; fi
if [ "$(MD5 test50k.dat)" != "$(MD5 server_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '6.1' ]]; then
test_start -n "Running: netcat #6.1 (stdin, 1MB)........................."
GSPID="$(sh -c '../tools/gs-netcat -k id_sec.txt -w <test1M.dat 2>client_err.txt >client_out.dat & echo ${!}')"
sleep_ct
../tools/gs-netcat -k id_sec.txt -l <test1M.dat 2>server_err.txt >server_out.dat
waitk $GSPID
if [ "$MD1MB" != "$(MD5 server_out.dat)" ]; then fail 1; fi
if [ "$MD1MB" != "$(MD5 client_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '6.2' ]]; then
test_start -n "Running: netcat #6.2 (stdin, assymetric sizes)............"
# GSPID="$(sh -c '../tools/gs-netcat -k id_sec.txt -w <test1M.dat 2>client_err.txt >client_out.dat & echo ${!}')"
GSPID="$(sh -c '../tools/gs-netcat -k id_sec.txt -w <test50k.dat 2>client_err.txt >client_out.dat & echo ${!}')"
sleep_ct
# ../tools/gs-netcat -k id_sec.txt -l <test50k.dat 2>server_err.txt >server_out.dat
../tools/gs-netcat -k id_sec.txt -l <test1M.dat 2>server_err.txt >server_out.dat
waitk $GSPID
if [ "$MD1MB" != "$(MD5 client_out.dat)" ]; then fail 1; fi
if [ "$(MD5 test50k.dat)" != "$(MD5 server_out.dat)" ]; then fail 2; fi
# if [ "$MD1MB" != "$(MD5 server_out.dat)" ]; then fail 1; fi
# if [ "$(MD5 test50k.dat)" != "$(MD5 client_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '6.3' ]]; then
test_start -n "Running: netcat #6.3 (stdin, assym sizes, kill client)...."
GSPID1="$(sh -c '(cat test4k.dat; sleep 30) | ../tools/gs-netcat -k id_sec.txt -w 2>client_err.txt >client_out.dat & echo ${!}')"
GSPID2="$(sh -c '(cat test1k.dat; sleep 30) | ../tools/gs-netcat -k id_sec.txt -l 2>server_err.txt >server_out.dat & echo ${!}')"
# sleep_ct
waitf test4k.dat server_out.dat
waitf test1k.dat client_out.dat
kill -9 $GSPID1 &>/dev/null
waitk $GSPID2
if [ "$(MD5 test4k.dat)" != "$(MD5 server_out.dat)" ]; then fail 1; fi
if [ "$(MD5 test1k.dat)" != "$(MD5 client_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '6.4' ]]; then
test_start -n "Running: netcat #6.4 (stdin, assym sizes, kill server)...."
GSPID1="$(sh -c '(cat test4k.dat; sleep 30) | ../tools/gs-netcat -k id_sec.txt -w 2>client_err.txt >client_out.dat & echo ${!}')"
GSPID2="$(sh -c '(cat test1k.dat; sleep 30) | ../tools/gs-netcat -k id_sec.txt -l 2>server_err.txt >server_out.dat & echo ${!}')"
waitf test4k.dat server_out.dat
waitf test1k.dat client_out.dat
kill -9 $GSPID2 &>/dev/null
waitk $GSPID1
if [ "$(MD5 test4k.dat)" != "$(MD5 server_out.dat)" ]; then fail 1; fi
if [ "$(MD5 test1k.dat)" != "$(MD5 client_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '6.5' ]]; then
test_start -n "Running: netcat #6.5 (/dev/null C2S)......................"
GSPID="$(sh -c '../tools/gs-netcat -k id_sec.txt -w </dev/null 2>client_err.txt >client_out.dat & echo ${!}')"
sleep_ct
../tools/gs-netcat -k id_sec.txt -l <test4k.dat 2>server_err.txt >server_out.dat
waitk $GSPID
if [ -s server_out.dat ]; then fail 1; fi
if [ "$(MD5 test4k.dat)" != "$(MD5 client_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '6.6' ]]; then
test_start -n "Running: netcat #6.6 (/dev/null S2C)......................"
GSPID="$(sh -c '../tools/gs-netcat -k id_sec.txt -w <test4k.dat 2>client_err.txt >client_out.dat & echo ${!}')"
sleep_ct
../tools/gs-netcat -k id_sec.txt -l </dev/null 2>server_err.txt >server_out.dat
waitk $GSPID
if [ -s client_out.dat ]; then fail 1; fi
if [ "$(MD5 test4k.dat)" != "$(MD5 server_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '6.7' ]]; then
test_start -n "Running: netcat #6.7 (stdin, assymetric sizes, clear)....."
GSPID="$(sh -c '../tools/gs-netcat -k id_sec.txt -wC <test1M.dat 2>client_err.txt >client_out.dat & echo ${!}')"
sleep_ct
../tools/gs-netcat -k id_sec.txt -lC <test50k.dat 2>server_err.txt >server_out.dat
waitk $GSPID
if [ "$MD1MB" != "$(MD5 server_out.dat)" ]; then fail 1; fi
if [ "$(MD5 test50k.dat)" != "$(MD5 client_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '6.8' ]]; then
test_start -n "Running: netcat #6.8 (stdin, assymetric sizes, TOR)......."
NETSTATTCP 2>/dev/null | grep LISTEN | grep 9050 &>/dev/null
if [ $? -ne 0 ]; then
skip "(no TOR)"
elif [[ "$GSOCKET_IP" =~ 192\.168\. ]]; then
skip "$GSOCKET_IP"
else
GSPID="$(sh -c '../tools/gs-netcat -k id_sec.txt -wT <test4k.dat 2>client_err.txt >client_out.dat & echo ${!}')"
sleep_ct
../tools/gs-netcat -k id_sec.txt -l <test50k.dat 2>server_err.txt >server_out.dat
waitk $GSPID
if [ "$(MD5 test4k.dat)" != "$(MD5 server_out.dat)" ]; then fail 1; fi
if [ "$(MD5 test50k.dat)" != "$(MD5 client_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
fi
if [[ "$tests" =~ '7.1' ]]; then
test_start -n "Running: netcat #7.1 (cmd, multi connect)................."
GSPID1="$(sh -c '../tools/gs-netcat -k id_sec.txt -l -e "echo Hello World && sleep 1" 2>server_err.txt >server_out.dat & echo ${!}')"
GSPID2="$(sh -c '../tools/gs-netcat -k id_sec.txt -w </dev/null 2>client2_err.txt >client2_out.dat & echo ${!}')"
GSPID3="$(sh -c '../tools/gs-netcat -k id_sec.txt -w </dev/null 2>client3_err.txt >client3_out.dat & echo ${!}')"
../tools//gs-netcat -k id_sec.txt -w </dev/null 2>client_err.txt >client_out.dat
waitk $GSPID2 $GSPID3
kill -9 $GSPID1 &>/dev/null
if [ "${MDHELLOW}" != "$(MD5 client_out.dat)" ]; then fail 1; fi
if [ "${MDHELLOW}" != "$(MD5 client2_out.dat)" ]; then fail 2; fi
if [ "${MDHELLOW}" != "$(MD5 client3_out.dat)" ]; then fail 3; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '7.2' ]]; then
test_start -n "Running: netcat #7.2 (shell, exit)........................"
GSPID1="$(sh -c '../tools/gs-netcat -k id_sec.txt -l -e /bin/sh 2>server_err.txt >server_out.dat & echo ${!}')"
echo "date; echo Hello World; exit" | ../tools/gs-netcat -k id_sec.txt -w 2>client_err.txt >client_out.dat
sleep_ct
kill $GSPID1
if [ "${MDHELLOW}" != "$(tail -1 client_out.dat | MD5 /dev/stdin)" ]; then fail 1; fi
$ECHO "${OK}"
fi
# FreeBSD sends ansi-request to determine screen size. Wait for it to timeout and
# then send our command string.
XCMD="printf \"date && echo Hello World && exit\n\""
if [[ "$OSTYPE" == *"BSD"* ]]; then
XCMD="sleep 3; ${XCMD}"
fi
if [[ "$tests" =~ '7.3' ]]; then
test_start -n "Running: netcat #7.3 (pty shell, exit)...................."
GSPID1="$(sh -c '../tools/gs-netcat -k id_sec.txt -l -i 2>server_err.txt >server_out.dat & echo ${!}')"
# Can not start Client with -i because it's not a tty. Must 'fake' the terminal response.
sh -c "$XCMD" | ../tools/gs-netcat -k id_sec.txt -w 2>client_err.txt >client_out.dat
# (printf "date; echo Hello World; exit\n") | ../tools/gs-netcat -k id_sec.txt -w 2>client_err.txt >client_out.dat
sleep_ct
kill $GSPID1
tail -2 client_out.dat | grep 'Hello World' &>/dev/null
if [ $? -ne 0 ]; then fail 1; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '7.4' ]]; then
test_start -n "Running: netcat #7.4 (multi pty shell, exit).............."
GSPID1="$(sh -c '../tools/gs-netcat -k id_sec.txt -l -i 2>server_err.txt >server_out.dat & echo ${!}')"
GSPID2=$(sh -c "($XCMD) | ../tools/gs-netcat -k id_sec.txt -w 2>client1_err.txt >client1_out.dat & echo \${!}")
GSPID3=$(sh -c "($XCMD) | ../tools/gs-netcat -k id_sec.txt -w 2>client2_err.txt >client2_out.dat & echo \${!}")
GSPID4=$(sh -c "($XCMD) | ../tools/gs-netcat -k id_sec.txt -w 2>client3_err.txt >client3_out.dat & echo \${!}")
waitk $GSPID2 $GSPID3 $GSPID4
kill $GSPID1
if [ x"$(tail -2 client1_out.dat | grep 'Hello World')" == x ]; then fail 1; fi
if [ x"$(tail -2 client2_out.dat | grep 'Hello World')" == x ]; then fail 2; fi
if [ x"$(tail -2 client3_out.dat | grep 'Hello World')" == x ]; then fail 3; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '8.1' ]]; then
test_start -n "Running: netcat #8.1 (port forward server side)..........."
GSPID1="$(sh -c '../tools/gs-netcat -k id_sec.txt -l -d 127.0.0.1 -p 12345 2>server_err.txt >server_out.dat & echo ${!}')"
GSPID2="$(sh -c '(sleep 10) | $NC_LISTEN 12345 >nc1_out.dat 2>nc1_err.txt & echo ${!}')"
waittcp 12345
GSPID3="$(sh -c '../tools/gs-netcat -k id_sec.txt -w <test50k.dat 2>client_err.txt >client_out.dat & echo ${!}')"
waitf test50k.dat nc1_out.dat
kill -9 $GSPID1 $GSPID2 $GSPID3 &>/dev/null
if [ "$(MD5 test50k.dat)" != "$(MD5 nc1_out.dat)" ]; then fail 1; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '8.2' ]]; then
# nc -> Port 12344 -> GS-NET -> Port 12345 -> nc -ln
test_start -n "Running: netcat #8.2 (port forward both sides)............"
GSPID1="$(sh -c '../tools/gs-netcat -k id_sec.txt -l -d 127.0.0.1 -p 12345 2>server_err.txt >server_out.dat & echo ${!}')"
GSPID2="$(sh -c '(sleep 1) | $NC_LISTEN 12345 >nc1_out.dat 2>nc1_err.txt & echo ${!}')"
GSPID3="$(sh -c '../tools/gs-netcat -k id_sec.txt -w -p 12344 2>server2_err.txt >server2_out.dat & echo ${!}')"
waittcp 12344
waittcp 12345
GSPID4="$(sh -c '(cat test50k.dat; sleep 15) | $NC -vn 127.0.0.1 12344 >nc2_out.dat 2>nc2_err.txt & echo ${!}')"
waitf test50k.dat nc1_out.dat
kill -9 $GSPID1 $GSPID2 $GSPID3 $GSPID4 &>/dev/null
if [ "$(MD5 test50k.dat)" != "$(MD5 nc1_out.dat)" ]; then fail 1; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '8.3' ]]; then
# nc -> Port 12344 -> GS-NET -> Port 12345 -> nc -ln
# Bi-Directional
test_start -n "Running: netcat #8.3 (port forward both sides, bi-dir)...."
GSPID1="$(sh -c '../tools/gs-netcat -k id_sec.txt -l -d 127.0.0.1 -p 12345 2>server1_err.txt >server1_out.dat & echo ${!}')"
GSPID2="$(sh -c '(cat test4k.dat; sleep 15) | $NC_LISTEN 12345 >nc1_out.dat 2>nc1_err.txt & echo ${!}')"
GSPID3="$(sh -c '../tools/gs-netcat -k id_sec.txt -w -p 12344 2>client_err.txt >client_out.dat & echo ${!}')"
waittcp 12344
waittcp 12345
GSPID4="$(sh -c '(cat test50k.dat; sleep 15) | $NC -vn 127.0.0.1 12344 >nc2_out.dat 2>nc2_err.txt & echo ${!}')"
waitf test50k.dat nc1_out.dat
waitf test4k.dat nc2_out.dat
kill -9 $GSPID1 $GSPID2 $GSPID3 $GSPID4 &>/dev/null
if [ "$(MD5 test50k.dat)" != "$(MD5 nc1_out.dat)" ]; then fail 1; fi
if [ "$(MD5 test4k.dat)" != "$(MD5 nc2_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
if [[ "$tests" =~ '9.1' ]]; then
# SOCKS test socat -> port 1085 -> GS-NET -> Port 12345 -> nc -ln
test_start -n "Running: netcat #9.1 (socat/socks5)......................."
socat -h 2>/dev/null | grep socks5 &>/dev/null
if [ $? -ne 0 ]; then
skip "(no socat2)"
else
GSPID1="$(sh -c '../tools/gs-netcat -k id_sec.txt -lS 2>server1_err.txt >server1_out.dat & echo ${!}')"
GSPID2="$(sh -c '(cat test4k.dat; sleep 15) | $NC_LISTEN 12345 >nc1_out.dat 2>nc1_err.txt & echo ${!}')"
GSPID3="$(sh -c '../tools/gs-netcat -k id_sec.txt -w -p 1085 2>client_err.txt >client_out.dat & echo ${!}')"
waittcp 1085
waittcp 12345
GSPID4="$(sh -c '(cat test50k.dat; sleep 15) | socat - "SOCKS5:localhost:12345 | TCP:127.1:1085" >nc2_out.dat 2>nc2_err.txt & echo ${!}')"
waitf test50k.dat nc1_out.dat
waitf test4k.dat nc2_out.dat
kill -9 $GSPID1 $GSPID2 $GSPID3 $GSPID4 &>/dev/null
if [ "$(MD5 test50k.dat)" != "$(MD5 nc1_out.dat)" ]; then fail 1; fi
if [ "$(MD5 test4k.dat)" != "$(MD5 nc2_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
fi
if [[ "$tests" =~ '9.2' ]]; then
# SOCKS test socat -> port 1085 -> GS-NET -> Port 12345 -> nc -ln
test_start -n "Running: netcat #9.2 (socat/socks4)......................."
socat -h 2>/dev/null | grep socks4 &>/dev/null
if [ $? -ne 0 ]; then
skip "(no socat)"
else
GSPID1="$(sh -c '../tools/gs-netcat -k id_sec.txt -lS 2>server1_err.txt >server1_out.dat & echo ${!}')"
GSPID2="$(sh -c '(cat test4k.dat; sleep 15) | $NC_LISTEN 12345 >nc1_out.dat 2>nc1_err.txt & echo ${!}')"
GSPID3="$(sh -c '../tools/gs-netcat -k id_sec.txt -p 1085 2>client_err.txt >client_out.dat & echo ${!}')"
waittcp 1085
waittcp 12345
GSPID4="$(sh -c '(cat test50k.dat; sleep 15) | socat - "SOCKS4:127.0.0.1:127.0.0.1:12345,socksport=1085" >nc2_out.dat 2>nc2_err.txt & echo ${!}')"
waitf test50k.dat nc1_out.dat
waitf test4k.dat nc2_out.dat
kill -9 $GSPID1 $GSPID2 $GSPID3 $GSPID4 &>/dev/null
if [ "$(MD5 test50k.dat)" != "$(MD5 nc1_out.dat)" ]; then fail 1; fi
if [ "$(MD5 test4k.dat)" != "$(MD5 nc2_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
fi
if [[ "$tests" =~ '9.3' ]]; then
# SOCKS test socat -> port 1085 -> GS-NET -> Port 12345 -> nc -ln
test_start -n "Running: netcat #9.3 (socat/socks4a)......................"
socat -h 2>/dev/null | grep socks4 &>/dev/null
if [ $? -ne 0 ]; then
skip "(no socat)"
else
GSPID1="$(sh -c '../tools/gs-netcat -k id_sec.txt -lS 2>server1_err.txt >server1_out.dat & echo ${!}')"
GSPID2="$(sh -c '(cat test4k.dat; sleep 15) | $NC_LISTEN 12345 >nc1_out.dat 2>nc1_err.txt & echo ${!}')"
GSPID3="$(sh -c '../tools/gs-netcat -k id_sec.txt -p 1085 2>client_err.txt >client_out.dat & echo ${!}')"
waittcp 1085
waittcp 12345
GSPID4="$(sh -c '(cat test50k.dat; sleep 15) | socat - "SOCKS4a:127.0.0.1:127.0.0.1:12345,socksport=1085" >nc2_out.dat 2>nc2_err.txt & echo ${!}')"
waitf test50k.dat nc1_out.dat
waitf test4k.dat nc2_out.dat
kill -9 $GSPID1 $GSPID2 $GSPID3 $GSPID4 &>/dev/null
if [ "$(MD5 test50k.dat)" != "$(MD5 nc1_out.dat)" ]; then fail 1; fi
if [ "$(MD5 test4k.dat)" != "$(MD5 nc2_out.dat)" ]; then fail 2; fi
$ECHO "${OK}"
fi
fi
if [[ "$tests" =~ '9.4' ]]; then
# SOCKS test with cUrl
test_start -n "Running: netcat #9.4 (curl/socks5, multi)................."
curl --help all 2>/dev/null | grep socks5-hostname &>/dev/null
if [ $? -ne 0 ]; then
skip "(no curl)"
else
GSPID1="$(sh -c '../tools/gs-netcat -k id_sec.txt -lS 2>server_err.txt >server_out.dat & echo ${!}')"
GSPID3="$(sh -c '../tools/gs-netcat -k id_sec.txt -w -p 1085 2>client_err.txt >client_out.dat & echo ${!}')"
waittcp 1085
touch testmp3.dat testmp3-2.dat
GSPID4="$(sh -c 'curl --socks5-hostname 127.0.0.1:1085 --output testmp3.dat https://raw.githubusercontent.com/hackerschoice/thc-art/master/deep-phreakin.mp3 >client1_out.dat 2>client1_err.txt & echo ${!}')"
GSPID5="$(sh -c 'curl --socks5-hostname 127.0.0.1:1085 --output testmp3-2.dat https://raw.githubusercontent.com/hackerschoice/thc-art/master/deep-phreakin.mp3 >client2_out.dat 2>client2_err.txt & echo ${!}')"
waitk $GSPID4 $GSPID5
kill -9 $GSPID1 $GSPID3 &>/dev/null
if [ "$(MD5 testmp3.dat)" != "171a9952951484d020ce1bef52b9eef5" ]; then fail 1; fi
if [ "$(MD5 testmp3-2.dat)" != "171a9952951484d020ce1bef52b9eef5" ]; then fail 2; fi
$ECHO "${OK}"
fi
fi
if [[ "${tests}" =~ '10.1' ]]; then
test_start -n "Running: blitz #10.1 ....................................."
rm -rf test_server test_client
mkdir -p test_server test_client/foo/bar test_client/empty
cp test4k.dat test_client/foo/bar/test4k.dat
cp test1k.dat test_client/foo/bar/test1k.dat
cp test1k.dat test_client/test1k.dat
mkfifo test_client/fifo.io
ln -s foo/bar/test4k.dat test_client/test4k.dat
ln -s /etc/hosts test_client/etc-hosts
ln -s /dev/zero test_client/zero
GSPID1="$(sh -c 'blitz -k id_sec.txt -w -o "RSOPT=--bwlimit=100 -v" test_client/./ 2>client1_err.txt >client1_out.dat & echo ${!}')"
cd test_server
GSPID2="$(sh -c 'blitz -k ../id_sec.txt -l 2>../server1_err.txt >../server1_out.dat & echo ${!}')"
cd ..
waitk $GSPID1
kill $GSPID2
(cd test_client; find . -type f | while read x; do md5fail 1 ../test_server/${x} ${x}; done)
md5fail 2 test_server/test4k.dat test4k.dat
[[ -e test_server/fifo.io ]] && fail 3
[[ -e test_server/zero ]] && fail 4
[[ -e test_server/etc-hosts ]] && fail 5
[[ -L test_server/test4k.dat ]] || fail 6
[[ -d test_server/empty ]] || fail 7
rm -rf test_server test_client
$ECHO "${OK}"
fi
if [[ "${tests}" =~ '10.2' ]]; then
test_start -n "Running: blitz #10.2 (stdin).............................."
rm -rf test_client
mkdir -p test_client
GSPID1="$(sh -c '(echo test1k.dat; echo test4k.dat) | blitz -k id_sec.txt -w -o "RSOPT=--bwlimit=100 -v" -f - 2>client1_err.txt >client1_out.dat & echo ${!}')"
cd test_client
GSPID2="$(sh -c 'blitz -k ../id_sec.txt -l 2>../server1_err.txt >../server1_out.dat & echo ${!}')"
cd ..
waitk $GSPID1
kill $GSPID2
md5fail 1 test1k.dat test_client/test1k.dat
md5fail 2 test4k.dat test_client/test4k.dat
rm -rf test_client
$ECHO "${OK}"
fi
if [[ "${tests}" =~ '10.3' ]]; then
test_start -n "Running: gs-sftp #10.3 ..................................."
rm -rf test_client
mkdir -p test_client
GSPID1="$(bash -c '(echo -en "lcd test_client\nget test4k.dat\nlcd ..\ncd test_client\nput test1k.dat\nls\nquit\n") | gs-sftp -k id_sec.txt -w 2>client1_err.txt >client1_out.dat & echo ${!}')"
GSPID2="$(sh -c 'gs-sftp -k id_sec.txt -l 2>server1_err.txt >server1_out.dat & echo ${!}')"
waitk $GSPID1
kill $GSPID2
md5fail 1 test1k.dat test_client/test1k.dat
md5fail 2 test4k.dat test_client/test4k.dat
# rm -rf test_client
$ECHO "${OK}"
fi
if [[ "${tests}" =~ '10.4' ]]; then
test_start -n "Running: gs-mount #10.4 .................................."
command -v sshfs >/dev/null 2>&1
if [ $? -ne 0 ]; then
skip "(no sshfs)"
else
rm -rf test_client &>/dev/null
rmdir test_mnt &>/dev/null
mkdir -p test_client test_mnt &>/dev/null
cp test1k.dat test4k.dat test_client
GSPID1="$(sh -c 'gs-mount -k id_sec.txt -w test_mnt 2>client1_err.txt >client1_out.dat & echo ${!}')"
GSPID2="$(sh -c 'cd test_client; gs-mount -k ../id_sec.txt -l 2>../server1_err.txt >../server1_out.dat & echo ${!}')"
waitk $GSPID1
md5fail 1 test_mnt/test1k.dat test_client/test1k.dat
md5fail 2 test_mnt/test4k.dat test_client/test4k.dat
if command -v fusermount >/dev/null 2>&1; then
fusermount -zu test_mnt
else
# archLinux -f flag needs superuser (bug in umount)
umount test_mnt &>/dev/null
umount -f test_mnt &>/dev/null
fi
kill $GSPID2
rm -rf test_client
rmdir test_mnt
$ECHO "${OK}"
fi
fi
if [[ "${tests}" =~ '10.5' ]]; then
test_start -n "Running: gsocket nc #10.5 (stdin)........................."
# Can not use nc here because nc does not terminate on EOF from stdin.
# Socat can be configured to terminate 1 second after EOF has been received.
# need sleep 3 on RPI (slow system)
GSPID1="$(sh -c '(cat test4k.dat; sleep 3) | gsocket -k id_sec.txt $NC $NC_EOF_ARG $NC_LISTEN_ARG 31337 2>server_err.txt >server_out.dat & echo ${!}')"
GSPID2="$(sh -c '(cat test1k.dat; sleep 3) | GSOCKET_ARGS=-w gsocket -k id_sec.txt $NC $NC_EOF_ARG -v gsocket 31337 2>client_err.txt >client_out.dat & echo ${!}')"
waitk $GSPID2
kill $GSPID1 &>/dev/null
md5fail 1 test1k.dat server_out.dat
md5fail 2 test4k.dat client_out.dat
$ECHO "${OK}"
fi
if [[ "${tests}" =~ '10.6' ]]; then
test_start -n "Running: gsocket socat #10.6 (stdin)......................"
if ! socat -h 2>/dev/null | grep socks4 &>/dev/null; then
skip "(no socat)"
elif [[ "$OSTYPE" =~ solaris ]]; then
# On Solaris the socat binary is 32 bit (but our gs_so lib is 64).
# Loader wont work.
skip "(32-bit)"
else
# Can not use nc here because nc does not terminate on EOF from stdin.
# Socat can be configured to terminate 1 second after EOF has been received.
GSPID1="$(sh -c 'gsocket -k id_sec.txt socat -T1 -,ignoreeof TCP-LISTEN:31337 <test4k.dat 2>server_err.txt >server_out.dat & echo ${!}')"
GSPID2="$(sh -c 'GSOCKET_ARGS=-w gsocket -k id_sec.txt socat -T1 -,ignoreeof TCP:gsocket:31337 <test1k.dat 2>client_err.txt >client_out.dat & echo ${!}')"
waitk $GSPID2
kill $GSPID1 &>/dev/null
md5fail 1 test1k.dat server_out.dat
md5fail 2 test4k.dat client_out.dat
$ECHO "${OK}"
fi
fi
if [[ "${tests}" =~ '10.7' ]]; then
test_start -n "Running: gsocket ssh #10.7 (stdin)........................"
if [[ "$OSTYPE" =~ solaris ]]; then
# Solaris SSHD does not work unless it's run as root (some PAM shit)
# Also needs -4 flag to run as IPv4 only (still, PAM shit afterwards)
skip "(needs root)"
elif [[ "$OSTYPE" =~ darwin ]]; then
# Can not reliably ld-intercept /usr/sbin/sshd on OSX.
# https://github.com/hackerschoice/gsocket/issues/26
skip "(OSX BUG)"
else
[[ -f ssh_host_rsa_key ]] || ssh-keygen -q -N "" -t rsa -b 2048 -f ssh_host_rsa_key
[[ -d ~/.ssh ]] || mkdir ~/.ssh
[[ -f id_rsa ]] || ssh-keygen -q -N "" -t rsa -b 2048 -f id_rsa
[[ -f ~/.ssh/authorized_keys ]] && cp -a ~/.ssh/authorized_keys ~/.ssh/authorized_keys-backup
cat id_rsa.pub >>~/.ssh/authorized_keys
SSHD_BIN=$(which sshd 2>/dev/null)
[[ -z $SSHD_BIN ]] && SSHD_BIN="/usr/sbin/sshd"
[[ -z $SSHD_BIN ]] && SSHD_BIN="/usr/lib/ssh/sshd"
export SSHD_BIN
[[ -f "$SSHD_BIN" ]] || { echo >&2 "sshd not found"; exit 255; }
GSPID1="$(sh -c 'gsocket -k id_sec.txt $SSHD_BIN -f /dev/null -o HostKey=${PWD}/ssh_host_rsa_key -p 31338 -D 2>server_err.txt >server_out.dat & echo ${!}')"
GSPID2="$(sh -c 'GSOCKET_ARGS=-w gsocket -k id_sec.txt ssh -i id_rsa -o StrictHostKeyChecking=no -p 31338 ${LOGNAME}@gsocket echo Hello World 2>client_err.txt >client_out.dat & echo ${!}')"
waitk $GSPID2
kill $GSPID1 &>/dev/null
[[ -f ~/.ssh/authorized_keys-backup ]] && cp -a ~/.ssh/authorized_keys-backup ~/.ssh/authorized_keys
# rm ~/.ssh/authorized_keys-backup
if [ "${MDHELLOW}" != "$(MD5 client_out.dat)" ]; then fail 1; fi
$ECHO "${OK}"
fi
fi
if [ x"$1" == x ]; then
### Clean-up
test_start ""
fi
exit 0
|
"""
Write a code to detect the similarity between two sentences.
"""
import nltk
from nltk.corpus import wordnet
def compute_similarity(sentence1, sentence2):
"""
Computes the similarity between two sentences.
Args:
sentence1: The first sentence.
sentence2: The second sentence.
Returns:
The similarity between the two sentences (value between 0 and 1).
"""
tokens1 = nltk.word_tokenize(sentence1)
tokens2 = nltk.word_tokenize(sentence2)
similarity = 0
for token1 in tokens1:
for token2 in tokens2:
syn1 = wordnet.synsets(token1)
syn2 = wordnet.synsets(token2)
if(len(syn1) > 0 and len(syn2) > 0):
similarity += wordnet.wup_similarity(syn1[0], syn2[0])
return similarity / (len(tokens1)*len(tokens2))
similarity = compute_similarity("This is a sentence.", "This sentence is similar.")
print(similarity) |
#!/bin/bash
i=$1
current=$2
while [ $i -gt $current ]
do
instance=worker
cat > ${instance}-$i-csr.json <<EOF
{
"CN": "system:node:${instance}-$i",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Portland",
"O": "system:nodes",
"OU": "clinco Hard Way",
"ST": "Oregon"
}
]
}
EOF
# if [ "$(uname)" = "Darwin" ]
# then
# KUBERNETES_PUBLIC_ADDRESS=$(hostname)
# elif [ "$(uname)" = "Linux" ]
# then
# KUBERNETES_PUBLIC_ADDRESS=$(hostname -i)
# fi
KUBERNETES_PUBLIC_ADDRESS=$3
EXTERNAL_IP=${KUBERNETES_PUBLIC_ADDRESS} # 172.172.1.$i
INTERNAL_IP=172.172.1.$i # 127.0.0.1
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-hostname=${instance}-$i,${EXTERNAL_IP},${INTERNAL_IP} \
-profile=kubernetes \
${instance}-$i-csr.json | cfssljson -bare ${instance}-$i
kubectl config set-cluster clinco-the-hard-way \
--certificate-authority=ca.pem \
--embed-certs=true \
--server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \
--kubeconfig=${instance}-$i.kubeconfig
kubectl config set-credentials system:node:${instance}-$i \
--client-certificate=${instance}-$i.pem \
--client-key=${instance}-$i-key.pem \
--embed-certs=true \
--kubeconfig=${instance}-$i.kubeconfig
kubectl config set-context default \
--cluster=clinco-the-hard-way \
--user=system:node:${instance}-$i \
--kubeconfig=${instance}-$i.kubeconfig
kubectl config use-context default --kubeconfig=${instance}-$i.kubeconfig
i=$((i-1))
done
|
#!/bin/bash
#
# Copyright (c) 2017-2021 VMware, Inc. or its affiliates
# SPDX-License-Identifier: Apache-2.0
set -eux -o pipefail
# NOTE: All these steps need to be done in the same task since each task is run
# in its own isolated container with no shared state. Thus, installing the RPM,
# and making isolation2 needs to be done in the same task/container.
export GPHOME_SOURCE=/usr/local/greenplum-db-source
export GPHOME_TARGET=/usr/local/greenplum-db-target
source gpupgrade_src/ci/scripts/ci-helpers.bash
make_pg_isolation2_regress_for_the_target_GPDB_version() {
# setup_configure_vars and configure expect GPHOME=/usr/local/greenplum-db-devel
# Thus, symlink the target version to /usr/local/greenplum-db-devel.
# Alternatively, refactor common.bash to use $GPHOME. However, due to unforeseen
# consequences and stability concerns we cannot do that.
ln -s "$GPHOME_TARGET" /usr/local/greenplum-db-devel
set +u
source gpdb_src/concourse/scripts/common.bash
setup_configure_vars
export LDFLAGS="-L$GPHOME_TARGET/ext/python/lib $LDFLAGS"
configure
set -u
source "${GPHOME_TARGET}"/greenplum_path.sh
make -j "$(nproc)" -C gpdb_src
make -j "$(nproc)" -C gpdb_src/src/test/isolation2 install
}
run_pg_upgrade_tests() {
chown -R gpadmin:gpadmin gpupgrade_src
time su gpadmin -c '
set -eux -o pipefail
export TERM=linux
export ISOLATION2_PATH=$(readlink -e gpdb_src/src/test/isolation2)
cd gpupgrade_src
make pg-upgrade-tests
'
}
main() {
echo "Installing BATS..."
./bats/install.sh /usr/local
echo "Installing gpupgrade rpm..."
yum install -y enterprise_rpm/gpupgrade-*.rpm
echo "Setting up gpadmin user..."
mkdir -p gpdb_src
./gpdb_src_source/concourse/scripts/setup_gpadmin_user.bash "centos"
echo "Installing the source GPDB rpm and symlink..."
install_source_GPDB_rpm_and_symlink
echo "Installing the target GPDB rpm and symlink..."
install_target_GPDB_rpm_and_symlink
echo "Making pg_isolation2_regress for the target GPDB version..."
make_pg_isolation2_regress_for_the_target_GPDB_version
echo "Creating the source demo cluster..."
create_source_cluster
echo "Running tests..."
run_pg_upgrade_tests
}
main
|
(defn first-ten
[lst]
(println (take 10 lst)))
(first-ten [1 2 3 4 5 6 7 8 9 10 11 12])
# Output: (1 2 3 4 5 6 7 8 9 10) |
# Creates a compact diff between two reflect json files
java -classpath `dirname $0`/../spring-graalvm-native-feature/target/spring-graalvm-native-tools-*.jar org.springframework.graalvm.support.ReflectionJsonComparator $1 $2
|
#!/usr/bin/env bash
# Exit on errors
set -e
if [ -z "$1" ]; then
echo "Usage requires one argument which is location of the input file"
exit 1
fi
declare serviceHost
if [ -z "${SD_URL}" ]; then
echo "Require variable SD_URL to be set"
exit 1
fi
echo "Signal detection Service URL: ${SD_URL}"
# Check file exists
file=$1
if [ ! -f "$file" ]; then
echo "File $file does not exist"
exit 1
fi
url="${SD_URL}/coi/qc-masks"
echo "Posting file $file to ${url}"
curl --fail -w "\n" -i -H "Content-Type: application/json" -H "Accept: application/json" -X POST ${url} -d @${file}
|
package model
import "github.com/ungerik/go-start/reflection"
// ConvertIterator returns an Iterator that calls conversionFunc
// for every from.Next() result and returns the result
// of conversionFunc at every Next().
func ConversionIterator(from Iterator, fromResultPtr interface{}, conversionFunc func(interface{}) interface{}) Iterator {
return &conversionIterator{from, fromResultPtr, conversionFunc}
}
type conversionIterator struct {
Iterator
FromResultPtr interface{}
conversionFunc func(interface{}) interface{}
}
func (self *conversionIterator) Next(resultRef interface{}) bool {
if !self.Iterator.Next(self.FromResultPtr) {
return false
}
reflection.SmartCopy(self.conversionFunc(self.FromResultPtr), resultRef)
return true
}
|
// Create an OpenCL buffer for the vector to be updated
viennacl::ocl::context & ctx = viennacl::ocl::get_context(0);
viennacl::vector<NumericT> result(vec_size);
viennacl::ocl::handle<cl_mem> result_mem = viennacl::ocl::current_context().create_memory(CL_MEM_READ_WRITE, vec_size * sizeof(NumericT));
// Set the arguments for the "bicgstab_vector_update" kernel
k.arg(0, result_mem);
k.arg(1, vec_size);
// Enqueue the kernel for execution with appropriate global and local work sizes
viennacl::ocl::enqueue(k(viennacl::traits::opencl_handle(result), vec_size));
// Retrieve the updated vector from the device and store it in the result variable
viennacl::backend::finish();
viennacl::copy(result_mem, result); |
#!/bin/bash
# $@ - all command line params
# $ 1 - first param (no space)
# $# - number of command line params
# Exports the display so you can run remotely, kills any existing instances, turns off screen saver, runs the app.
#
# Should be run on the remote machine. You can use a command like the following
# on your local machine to run it on the remote machine.
#
# ssh pi@botfly ./runchesslr.sh
#
# Since you need to run as root to allow access to i2c ports, make java run as root.
# sudo chmod 4755 /etc/alternatives/java
export DISPLAY=:0
killall java
cd chesslr
xset s off
java $1 -classpath ../lib/'*':target/classes com.axorion.chesslr.hardware.BoardDiagnostics
|
<filename>server/src/intercept/server/components/NewStubCommand.java
package intercept.server.components;
import intercept.configuration.StubRequest;
import intercept.framework.Command;
import intercept.server.WebContext;
public class NewStubCommand implements Command {
public void executeCommand(WebContext context) {
StubRequestDocument stubRequest = new StubRequestDocument();
context.fillRequestDocument(stubRequest);
context.createOrUpdateStub(stubRequest.getRequest());
context.redirectTo("/"+context.getServerConfig().getName());
}
}
|
#!/bin/bash
kill -s SIGKILL $(ps axg | grep "[l]ightpulse" | awk '{print $1}')
echo "LED128,128,128;" > /dev/ttyAMA0
python server.py |
#
# Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# Utilities for shell tests
#
: ${TESTSRC=.} ${TESTCLASSES=.}
java="${TESTJAVA+${TESTJAVA}/bin/}java"
javac="${TESTJAVA+${TESTJAVA}/bin/}javac"
jar="${TESTJAVA+${TESTJAVA}/bin/}jar"
rmic="${TESTJAVA+${TESTJAVA}/bin/}rmic"
case `uname -s` in
Windows*|CYGWIN*)
WindowsOnly() { "$@"; }
UnixOnly() { :; }
PS=";" ;;
*)
UnixOnly() { "$@"; }
WindowsOnly() { :; }
PS=":";;
esac
failed=""
Fail() { echo "FAIL: $1"; failed="${failed}."; }
Die() { printf "%s\n" "$*"; exit 1; }
Sys() {
printf "%s\n" "$*"; "$@"; rc="$?";
test "$rc" -eq 0 || Die "Command \"$*\" failed with exitValue $rc";
}
CheckFiles() {
for f in "$@"; do test -r "$f" || Die "File $f not found"; done
}
Report() {
test "$#" != 2 && Die "Usage: Report success|failure rc"
if test "$1" = "success" -a "$2" = 0; then
echo "PASS: succeeded as expected"
elif test "$1" = "failure" -a "$2" != 0; then
echo "PASS: failed as expected"
elif test "$1" = "success" -a "$2" != 0; then
Fail "test failed unexpectedly"
elif test "$1" = "failure" -a "$2" = 0; then
Fail "test succeeded unexpectedly"
else
Die "Usage: Report success|failure rc"
fi
}
MkManifestWithClassPath() {
(echo "Manifest-Version: 1.0"; echo "Class-Path: $*") > MANIFEST.MF
}
HorizontalRule() {
echo "-----------------------------------------------------------------"
}
Test() {
HorizontalRule
expectedResult="$1"; shift
printf "%s\n" "$*"
"$@"
Report "$expectedResult" "$?"
}
Failure() { Test failure "$@"; }
Success() { Test success "$@"; }
Bottom() {
test "$#" = 1 -a "$1" = "Line" || Die "Usage: Bottom Line"
if test -n "$failed"; then
count=`printf "%s" "$failed" | wc -c | tr -d ' '`
echo "FAIL: $count tests failed"
exit 1
else
echo "PASS: all tests gave expected results"
exit 0
fi
}
BadJarFile() {
for jarfilename in "$@"; do pwd > "$jarfilename"; done
}
#----------------------------------------------------------------
# Usage: BCP=`DefaultBootClassPath`
# Returns default bootclasspath, discarding non-existent entries
#----------------------------------------------------------------
DefaultBootClassPath() {
echo 'public class B {public static void main(String[] a) {
System.out.println(System.getProperty("sun.boot.class.path"));}}' > B.java
"$javac" B.java
_BCP_=""
for elt in `"$java" B | tr "${PS}" " "`; do
test -r "$elt" -a -n "$elt" && _BCP_="${_BCP_:+${_BCP_}${PS}}${elt}"
done
rm -f B.java B.class
printf "%s" "$_BCP_" # Don't use echo -- unsafe on Windows
}
#----------------------------------------------------------------
# Foil message localization
#----------------------------------------------------------------
DiagnosticsInEnglishPlease() {
LANG="C" LC_ALL="C" LC_MESSAGES="C"; export LANG LC_ALL LC_MESSAGES
}
|
def convert_to_hex_escaped(input_string):
# Step 1: Break up large Unicode characters into multiple UTF-8 hex characters
s_1 = input_string.encode("utf-8").decode("latin-1")
# Step 2: Escape all special characters
return s_1.encode("unicode-escape").decode("latin-1") |
#!/bin/bash
set -e # Exit with nonzero exit code if anything fails
SOURCE_BRANCH="master"
TARGET_BRANCH="master"
SHA=`git rev-parse --verify --short HEAD`
# Pull requests and commits to other branches shouldn't try to deploy, just build to verify
if [ "$TRAVIS_PULL_REQUEST" != "false" -o "$TRAVIS_BRANCH" != "$SOURCE_BRANCH" ]; then
echo "Skipping deploy; just doing a build."
exit 0
fi
# Keys
openssl aes-256-cbc -K $encrypted_6c0bd54c9dd4_key -iv $encrypted_6c0bd54c9dd4_iv -in scripts/deploy/keys.tar.enc -out scripts/deploy/keys.tar -d
tar xvf scripts/deploy/keys.tar -C scripts/deploy/
rm scripts/deploy/keys.tar
echo "TRAVIS_TAG = " $TRAVIS_TAG
# Clone the existing gh-pages for this repo into out/
# Create a new empty branch if gh-pages doesn't exist yet (should only happen on first deply)
chmod 600 ./scripts/deploy/deploy_key
chmod 600 ./scripts/deploy/deploy_key_ds
eval `ssh-agent -s`
ssh-add scripts/deploy/deploy_key
ssh-add scripts/deploy/deploy_key_ds
git clone $TARGET_REPO out
cd out
git checkout $TARGET_BRANCH || git checkout --orphan $TARGET_BRANCH
cd ..
# Overwrite contents with _site
rm -rf out/**/* || exit 0
cp -r _site/* out/
# Now let's go have some fun with the cloned repo
cd out
git config user.name "Travis CI"
git config user.email "$COMMIT_AUTHOR_EMAIL"
# Be careful with that key!
echo "*deploy_key*" >> .gitignore
echo "*keys.tar" >> .gitignore
echo "scripts/" >> .gitignore
# If there are no changes to the compiled out (e.g. this is a README update) then just bail.
if git diff --quiet; then
echo "No changes to the output on this push; exiting."
exit 0
fi
# Commit the "changes", i.e. the new version.
# The delta will show diffs between new and old versions.
git add -A .
git reset .gitignore
git commit -m "Deploy ${SHA} from branch ${TRAVIS_BRANCH}"
# Now that we're all set up, we can push.
git push $TARGET_REPO $TARGET_BRANCH
|
#!/bin/sh
# Test:
# Cli tool: test listing hooks from local shared repos
if ! sh /var/lib/githooks/install.sh; then
echo "! Failed to execute the install script"
exit 1
fi
mkdir -p /tmp/test117/shared/.githooks/pre-commit &&
cd /tmp/test117/shared/.githooks/pre-commit &&
touch example-01 ||
exit 2
# generate the shared folder name (from the cli.sh)
REPO_LOCATION="ssh://git@github.com/test/repo1.git"
INSTALL_DIR=$(git config --global --get githooks.installDir)
[ -n "$INSTALL_DIR" ] || INSTALL_DIR=~/".githooks"
SHA_HASH=$(echo "$REPO_LOCATION" | git hash-object --stdin 2>/dev/null)
NAME=$(echo "$REPO_LOCATION" | tail -c 48 | sed -E "s/[^a-zA-Z0-9]/-/g")
SHARED_ROOT="$INSTALL_DIR/shared/$SHA_HASH-$NAME"
mkdir -p "$SHARED_ROOT/pre-push" &&
cd "$SHARED_ROOT" &&
touch pre-push/example-03 &&
git init &&
git remote add origin "$REPO_LOCATION" &&
git hooks shared add --global "$REPO_LOCATION" ||
exit 3
mkdir -p /tmp/test117/repo/.githooks/commit-msg &&
cd /tmp/test117/repo &&
git init &&
touch .githooks/commit-msg/example-02 &&
git hooks shared add --global /tmp/test117/shared ||
exit 4
cd /tmp/test117/repo || exit 5
OUTPUT=$(git hooks list 2>&1)
if ! echo "$OUTPUT" | grep 'example-01'; then
echo "$OUTPUT" >&2
echo "! Missing shared hook in the output" >&2
exit 11
fi
if ! echo "$OUTPUT" | grep 'example-02'; then
echo "$OUTPUT" >&2
echo "! Missing local hook in the output" >&2
exit 12
fi
if ! echo "$OUTPUT" | grep 'example-03'; then
echo "$OUTPUT" >&2
echo "! Missing shared hook in the output" >&2
exit 13
fi
|
#!/bin/bash
geopmread --cache
|
<filename>vault_test.go
package sshvault
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"syscall"
"testing"
"github.com/kr/pty"
"github.com/ssh-vault/crypto"
"github.com/ssh-vault/crypto/aead"
)
// These are done in one function to avoid declaring global variables in a test
// file.
func TestVaultFunctions(t *testing.T) {
dir, err := ioutil.TempDir("", "vault")
if err != nil {
t.Error(err)
}
defer os.RemoveAll(dir) // clean up
tmpfile := filepath.Join(dir, "vault")
vault, err := New("", "test_data/id_rsa.pub", "", "create", tmpfile)
if err != nil {
t.Error(err)
}
keyPw := string("<PASSWORD>")
pty, tty, err := pty.Open()
if err != nil {
t.Errorf("Unable to open pty: %s", err)
}
// File Descriptor magic. GetPasswordPrompt() reads the password
// from stdin. For the test, we save stdin to a spare FD,
// point stdin at the file, run the system under test, and
// finally restore the original stdin
oldStdin, _ := syscall.Dup(int(syscall.Stdin))
oldStdout, _ := syscall.Dup(int(syscall.Stdout))
syscall.Dup2(int(tty.Fd()), int(syscall.Stdin))
syscall.Dup2(int(tty.Fd()), int(syscall.Stdout))
go PtyWriteback(pty, keyPw)
keyPwTest, err := vault.GetPasswordPrompt()
syscall.Dup2(oldStdin, int(syscall.Stdin))
syscall.Dup2(oldStdout, int(syscall.Stdout))
if err != nil {
t.Error(err)
}
if string(strings.Trim(keyPw, "\n")) != string(keyPwTest) {
t.Errorf("password prompt: expected %s, got %s\n", keyPw, keyPwTest)
}
PKCS8, err := vault.PKCS8()
if err != nil {
t.Error(err)
}
vault.PublicKey, err = vault.GetRSAPublicKey(PKCS8)
if err != nil {
t.Error(err)
}
vault.Fingerprint, err = vault.GenFingerprint(PKCS8)
if err != nil {
t.Error(err)
}
if vault.Password, err = crypto.GenerateNonce(32); err != nil {
t.Error(err)
}
// Skip vault.Create because we don't need/want to interact with an editor
// for tests.
in := []byte("The quick brown fox jumps over the lazy dog")
out, err := aead.Encrypt(vault.Password, in, []byte(vault.Fingerprint))
if err != nil {
t.Error(err)
}
if err = vault.Close(out); err != nil {
t.Error(err)
}
enc1, err := ioutil.ReadFile(tmpfile)
if err != nil {
t.Error(err)
}
plaintext, err := vault.View()
if err != nil {
t.Error(err)
}
if !bytes.Equal(in, plaintext) {
t.Error("in != out")
}
os.Setenv("EDITOR", "cat")
edited, err := vault.Edit(plaintext)
if err != nil {
t.Error(err)
}
out, err = aead.Encrypt(vault.Password, edited, []byte(vault.Fingerprint))
if err != nil {
t.Error(err)
}
if err = vault.Close(out); err != nil {
t.Error(err)
}
plaintext, err = vault.View()
if err != nil {
t.Error(err)
}
enc2, err := ioutil.ReadFile(tmpfile)
if err != nil {
t.Error(err)
}
if !bytes.Equal(edited, plaintext) {
t.Error("edited != plaintext ")
}
if bytes.Equal(enc1, enc2) {
t.Error("Expecting different encrypted outputs")
}
}
func TestVaultFunctionsSTDOUT(t *testing.T) {
dir, err := ioutil.TempDir("", "vault")
if err != nil {
t.Error(err)
}
defer os.RemoveAll(dir) // clean up
vault, err := New("", "test_data/id_rsa.pub", "", "create", "")
if err != nil {
t.Error(err)
}
PKCS8, err := vault.PKCS8()
if err != nil {
t.Error(err)
}
vault.PublicKey, err = vault.GetRSAPublicKey(PKCS8)
if err != nil {
t.Error(err)
}
vault.Fingerprint, err = vault.GenFingerprint(PKCS8)
if err != nil {
t.Error(err)
}
if vault.Password, err = crypto.GenerateNonce(32); err != nil {
t.Error(err)
}
// Skip vault.Create because we don't need/want to interact with an editor
in := []byte("The quick brown fox jumps over the lazy dog")
out, err := aead.Encrypt(vault.Password, in, []byte(vault.Fingerprint))
if err != nil {
t.Error(err)
}
rescueStdout := os.Stdout // keep backup of the real stdout
r, w, _ := os.Pipe()
os.Stdout = w
if err = vault.Close(out); err != nil {
t.Error(err)
}
w.Close()
outStdout, _ := ioutil.ReadAll(r)
os.Stdout = rescueStdout
tmpfile, err := ioutil.TempFile("", "stdout")
if err != nil {
t.Error(err)
}
tmpfile.Write([]byte(outStdout))
vault.vault = tmpfile.Name()
plaintext, err := vault.View()
if err != nil {
t.Error(err)
}
if !bytes.Equal(in, plaintext) {
t.Error("in != out")
}
}
func TestVaultNew(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
expect(t, "ssh-vault", r.Header.Get("User-agent"))
fmt.Fprintln(w, "ssh-rsa ABC")
}))
defer ts.Close()
_, err := New("", "", ts.URL, "view", "")
if err != nil {
t.Error(err)
}
}
func TestVaultNewNoKey(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
expect(t, "ssh-vault", r.Header.Get("User-agent"))
}))
defer ts.Close()
_, err := New("", "", ts.URL, "view", "")
if err == nil {
t.Error("Expecting error")
}
}
func TestVaultNoKey(t *testing.T) {
_, err := New("", "/dev/null/none", "", "", "")
if err == nil {
t.Error("Expecting error")
}
}
func TestExistingVault(t *testing.T) {
_, err := New("", "test_data/id_rsa.pub", "", "create", "LICENSE")
if err == nil {
t.Error("Expecting error")
}
}
func TestPKCS8(t *testing.T) {
v := &vault{
key: "/dev/null/non-existent",
}
_, err := v.PKCS8()
if err == nil {
t.Error("Expecting error")
}
}
func TestKeyHTTPNotFound(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
expect(t, "ssh-vault", r.Header.Get("User-agent"))
}))
defer ts.Close()
_, err := New("", ts.URL, "", "view", "")
if err == nil {
t.Error("Expecting error")
}
}
|
'use strict';
const axios = require('axios');
const fs = require('fs');
const { get } = require('lodash');
const { services } = require('coinstac-common');
const config = require('./config');
axios.defaults.baseURL = `${config.protocol}://${config.apiServer}:${config.port}`;
const compspecUpload = (username, password, logger) => {
return new Promise(async (resolve, reject) => {
try {
const { data } = await axios.post('/authenticate', { username, password });
const json = JSON.parse(fs.readFileSync('./compspec.json'));
const validationResult = services.validator.validate(json, 'computation');
if (validationResult.error) {
let err = 'Computation schema is not valid:';
validationResult.error.details.forEach((error) => {
err += ` ${error.path}: ${error.message}\n`;
});
throw new Error(err);
}
const payload = JSON.stringify({
operationName: config.graphql.operationName,
query: config.graphql.query,
variables: {
computationSchema: json,
},
});
const headers = {
'Content-Type': 'application/json',
Authorization: `Bearer ${data.id_token}`,
};
const { data: createdData } = await axios.post('/graphql', payload, { headers });
if (createdData.errors) {
throw new Error(get(createdData, 'errors.0.message', 'Failed to upload computation'));
}
logger.info('Successfully uploaded computation schema');
resolve();
} catch (error) {
let message;
if (error.message && error.message.includes('JSON')) return reject(new Error(`Compspec JSON parsing failed\n ${error.message}`));
const code = error.code || (error.response ? error.response.status : '');
switch (code) {
case 401:
message = get(error, 'response.data.message');
break;
case 403:
message = get(error, 'response.data.message');
break;
case 'ENOTFOUND':
message = 'Could not contact coinstac servers';
break;
case 'ECONNREFUSED':
message = 'Could not contact coinstac servers';
break;
default:
message = error.message || error;
}
reject(new Error(message));
}
});
};
module.exports = {
compspecUpload,
};
|
<reponame>IzaacBaptista/ads-senac
import java.util.ArrayList;
public class Materia {
int id;
String descricao;
ArrayList<Professor> professores;
ArrayList<Aluno> alunos;
public Materia(int id, String descricao) {
this.id = id;
this.descricao = descricao;
this.professores = new ArrayList<>();
this.alunos = new ArrayList<>();
}
public void addProfessor(Professor professor) {
this.professores.add(professor);
}
public void imprimirMateria() {
System.out.println("Descrição " + this.descricao);
for (Professor professor : professores) {
System.out.println("Nome " + professor.nome + " Escolaridade " + professor.escolaridade);
}
}
} |
package com.ergdyne.tasktimer;
import android.app.AlertDialog;
import android.app.DatePickerDialog;
import android.app.TimePickerDialog;
import android.content.DialogInterface;
import android.database.Cursor;
import android.os.Bundle;
import android.support.v7.app.ActionBar;
import android.support.v7.app.AppCompatActivity;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ArrayAdapter;
import android.widget.AutoCompleteTextView;
import android.widget.DatePicker;
import android.widget.TextView;
import android.widget.TimePicker;
import com.ergdyne.lib.AppConstants;
import com.ergdyne.lib.DBMap;
import com.ergdyne.lib.EpicDate;
import com.ergdyne.lib.ErgAlert;
import com.ergdyne.lib.ErgFormats;
import java.util.ArrayList;
import java.util.List;
/**
* Created by j on 3/30/17.
*/
public class EditEventActivity extends AppCompatActivity {
/**********************/
//Variable definitions
/**********************/
public static final String EVENT_ID = "eventID";
public static final String IS_CURRENT = "isRunning";
private static final String TAG = "EditEventActivity";
private long eventID;
private boolean isRunning; //is currently running...
private TextView eventName;
private TextView durationDisplay;
private AutoCompleteTextView reviseEvent;
private TextView setStartTime;
private TextView setEndTime;
private TextView setStartDate;
private TextView setEndDate;
private String currentTaskName;
private EpicDate currentStart;
private EpicDate currentEnd;
private String currentDurationDisplay;
private EpicDate newStart;
private EpicDate newEnd;
private DBHelper dbHelper;
/**********************/
//Activity lifecycle
/**********************/
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
//Change the toolbar to cancel/confirm.
{
try {
final LayoutInflater inflater = (LayoutInflater) getSupportActionBar().getThemedContext()
.getSystemService(LAYOUT_INFLATER_SERVICE);
final View customActionBarView = inflater.inflate(
R.layout.actionbar_edit, null);
customActionBarView.findViewById(R.id.actionbar_cancel).setOnClickListener(
new View.OnClickListener() {
@Override
public void onClick(View v) {
finish();
}
});
customActionBarView.findViewById(R.id.actionbar_confirm).setOnClickListener(
new View.OnClickListener() {
@Override
public void onClick(View v) {
inputProcessing(dbHelper, isRunning, eventID, reviseEvent.getText().toString(), currentTaskName, newStart, currentStart, newEnd, currentEnd);
}
}
);
final ActionBar actionBar = getSupportActionBar();
actionBar.setDisplayOptions(
ActionBar.DISPLAY_SHOW_CUSTOM,
ActionBar.DISPLAY_SHOW_CUSTOM | ActionBar.DISPLAY_SHOW_HOME
| ActionBar.DISPLAY_HOME_AS_UP);
actionBar.setCustomView(customActionBarView,
new ActionBar.LayoutParams(
ViewGroup.LayoutParams.MATCH_PARENT,
ViewGroup.LayoutParams.MATCH_PARENT));
}catch(NullPointerException e){
Log.e(TAG,"getThemedContext returned null");
}
}
setContentView(R.layout.activity_edit_event);
//Link variables to views.
{
eventName = (TextView) this.findViewById(R.id.textView_edit_event);
durationDisplay = (TextView) this.findViewById(R.id.textView_edit_duration);
reviseEvent = (AutoCompleteTextView) this.findViewById(R.id.autoCompleteTextView_revise_event);
setStartTime = (TextView) this.findViewById(R.id.textView_set_start_time);
setEndTime = (TextView) this.findViewById(R.id.textView_set_end_time);
setStartDate = (TextView) this.findViewById(R.id.textView_set_start_date);
setEndDate = (TextView) this.findViewById(R.id.textView_set_end_date);
}
dbHelper = new DBHelper(this);
isRunning = getIntent().getBooleanExtra(IS_CURRENT,false);
eventID = getIntent().getLongExtra(EVENT_ID, DBMap.SettingsTable.defaultID);
//Load the major variables needed to display and edit.
{
if(isRunning){
//If it is the current event, the end is not yet set. We go to the Settings table for information.
newEnd = new EpicDate(dbHelper.rightNow());
currentEnd = new EpicDate(newEnd.sEpoch);
Cursor settings = dbHelper.getSettings();
settings.moveToFirst();
currentTaskName = dbHelper.getCurrentTaskName();
currentStart = new EpicDate(dbHelper.getCurrentStart());
long currentDuration = newEnd.sEpoch - currentStart.sEpoch;
currentDurationDisplay = ErgFormats.durationHMS(currentDuration);
settings.close();
}else {
//Normal event is being edited, so information comes from the event table.
Cursor event = dbHelper.getByID(DBMap.EventTable.table, eventID);
event.moveToFirst();
long taskID = event.getLong(event.getColumnIndex(DBMap.EventTable.taskID));
currentTaskName = dbHelper.getTaskName(taskID);
currentStart = new EpicDate(event.getLong(event.getColumnIndex(DBMap.EventTable.start)));
currentEnd = new EpicDate(event.getLong(event.getColumnIndex(DBMap.EventTable.end)));
currentDurationDisplay = event.getString(event.getColumnIndex(DBMap.EventTable.durationDisplay));
event.close();
newEnd = new EpicDate(currentEnd.sEpoch);
}
newStart = new EpicDate(currentStart.sEpoch);
}
//Push values to the views.
{
//I don't like the way this whole section flows. Like so much similar code that there should be a better way.
ArrayAdapter<String> adapter = new ArrayAdapter<String>(this, android.R.layout.simple_list_item_1,dbHelper.getTaskList());
//Same information is filled for current as others except for the End Time/Date
eventName.setText(currentTaskName);
durationDisplay.setText(currentDurationDisplay);
setStartTime.setText(currentStart.toTimeString(true,AppConstants.HOUR_FORMAT));
setEndTime.setText((isRunning)?"N/A":currentEnd.toTimeString(true,AppConstants.HOUR_FORMAT));
setStartDate.setText(currentStart.toDateString());
setEndDate.setText((isRunning)?"":currentEnd.toDateString());
reviseEvent.setThreshold(AppConstants.SUGGESTION_THRESHOLD);
reviseEvent.setAdapter(adapter);
/**********************/
//Set on click listeners. Date and times are separate.
/**********************/
setStartTime.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
TimePickerDialog startPicker;
startPicker = new TimePickerDialog(EditEventActivity.this, new TimePickerDialog.OnTimeSetListener() {
@Override
public void onTimeSet(TimePicker view, int hourOfDay, int minute) {
if(minute != currentStart.minute || hourOfDay != currentStart.hourOfDay ) {
newStart.setHourOfDay(hourOfDay);
newStart.setMinute(minute);
setStartTime.setText(EpicDate.toTimeString(hourOfDay, minute, AppConstants.HOUR_FORMAT));
}
}
},currentStart.hourOfDay,currentStart.minute,false); //true 24hour time
startPicker.setTitle(getResources().getString(R.string.hint_set_start_time));
startPicker.show();
}
});
setStartDate.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
DatePickerDialog startPicker;
startPicker = new DatePickerDialog(EditEventActivity.this, new DatePickerDialog.OnDateSetListener(){
@Override
public void onDateSet(DatePicker datePicker, int year, int month, int dayOfMonth){
if(dayOfMonth != currentStart.dayOfMonth ||
month != currentStart.month || year != currentStart.year){
newStart.setYear(year);
newStart.setMonth(month);
newStart.setDayOfMonth(dayOfMonth);
setStartDate.setText(EpicDate.toDateString(year,month,dayOfMonth));
}
}
},currentStart.year,currentStart.month,currentStart.dayOfMonth);
startPicker.setTitle(getResources().getString(R.string.hint_set_start_date));
startPicker.show();
}
});
if(!isRunning) {
setEndTime.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
TimePickerDialog startPicker;
startPicker = new TimePickerDialog(EditEventActivity.this, new TimePickerDialog.OnTimeSetListener() {
@Override
public void onTimeSet(TimePicker view, int hourOfDay, int minute) {
if (minute != currentEnd.minute || hourOfDay != currentEnd.hourOfDay) {
newEnd.setHourOfDay(hourOfDay);
newEnd.setMinute(minute);
setEndTime.setText(EpicDate.toTimeString(hourOfDay, minute,AppConstants.HOUR_FORMAT));
}
}
}, currentEnd.hourOfDay, currentEnd.minute, false); //true 24hour time
startPicker.setTitle(getResources().getString(R.string.hint_set_end_time));
startPicker.show();
}
});
setEndDate.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
DatePickerDialog startPicker;
startPicker = new DatePickerDialog(EditEventActivity.this, new DatePickerDialog.OnDateSetListener() {
@Override
public void onDateSet(DatePicker datePicker, int year, int month, int dayOfMonth) {
if (dayOfMonth != currentEnd.dayOfMonth ||
month != currentEnd.month || year != currentEnd.year) {
newEnd.setYear(year);
newEnd.setMonth(month);
newEnd.setDayOfMonth(dayOfMonth);
setEndDate.setText(EpicDate.toDateString(year, month, dayOfMonth));
}
}
}, currentEnd.year, currentEnd.month, currentEnd.dayOfMonth);
startPicker.setTitle(getResources().getString(R.string.hint_set_end_date));
startPicker.show();
}
});
}
}
}
/**********************/
//Input processing
/**********************/
//This is probably the most complex section of code in the app
//Whats with all the inputs?
//I was trying to make this look more functional.
//i.e. this does not follow good Object Oriented Programming Principles.
//The dialogs get in the way of some of the functional nature, so it is all a bit more messy than required.
private void inputProcessing(DBHelper db, Boolean running, long id,
String newName, String currentName,
EpicDate nStart, EpicDate cStart,
EpicDate nEnd, EpicDate cEnd){
List<String> errors = new ArrayList<>();
boolean changeStart = false;
boolean changeEnd = false;
//If we have any changes then reprocess the new epochs to work in seconds since epoch.
if(!cStart.isEqual(nStart)){
nStart.setSEpoch();
changeStart = true;
}
if(!cEnd.isEqual(nEnd)){
nEnd.setSEpoch();
changeEnd = true;
}
if(changeStart || changeEnd){
//Check for errors...
if(!running && nEnd.sEpoch <= nStart.sEpoch){
errors.add(getResources().getString(R.string.err_end_before_start));
}
if(!running && nEnd.sEpoch > db.rightNow()){
errors.add(getResources().getString(R.string.err_end_future));
}
if(nStart.sEpoch > db.rightNow()){
errors.add(getResources().getString(R.string.err_start_future));
}
if(errors.size()==0){
//Inputs look good; we have a change of some sort, so run the eventAdjustments.
eventAdjustments(db, running,id, newName, currentName, nStart.sEpoch,cStart.sEpoch,nEnd.sEpoch,cEnd.sEpoch);
}else{
//We have errors, so show them.
if(errors.size()>0){
ErgAlert.alert(EditEventActivity.this,errors); }
}
}else{
//No events to edit, just check if name has changed and go.
updateEvent(db, running, newName,currentName, id, cStart.sEpoch, cEnd.sEpoch);
}
}
private void updateEvent(DBHelper db, boolean running, String newName, String currentName, long id, long newStartEpoch, long newEndEpoch){
//This function updates the Event or current Event.
String name = (!newName.equals(currentName) && newName.length() >0)? newName:currentName;
if(!running){
db.updateEvent(id,
newStartEpoch,
newEndEpoch,
db.findOrInsertTask(name),
newEndEpoch-newStartEpoch,
ErgFormats.durationHMS(newEndEpoch-newStartEpoch)
);
}else{
db.updateSettings(newStartEpoch, db.findOrInsertTask(name));
}
finish();
}
private void eventAdjustments(DBHelper db, boolean running, long id, String newName, String currentName, long newStartEpoch, long currentStartEpoch, long newEndEpoch, long currentEndEpoch){
//Some changes are being made and we may need confirmation before deleting something.
int numberDeleted = getEndDeleted(db, newEndEpoch,currentEndEpoch).size() + getStartDeleted(db, newStartEpoch,currentStartEpoch).size();
if(numberDeleted > 0){
AlertDialog.Builder b = new AlertDialog.Builder(EditEventActivity.this);
b.setTitle(getResources().getString(R.string.warn_events_deleted_title));
b.setMessage(getResources().getString(R.string.warn_events_deleted) + numberDeleted);
b.setPositiveButton(android.R.string.yes, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
adjustAndDelete(dbHelper, newStart.sEpoch, currentStart.sEpoch,newEnd.sEpoch, currentEnd.sEpoch);
updateEvent(dbHelper, isRunning,reviseEvent.getText().toString(),currentTaskName,eventID,newStart.sEpoch,newEnd.sEpoch);
}
});
b.setNegativeButton(android.R.string.no, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
}
});
b.setIcon(android.R.drawable.ic_dialog_alert);
b.show();
}else{
//Only need to adjust so no confirmation.
adjustStartSide(db, currentStartEpoch,newStartEpoch);
adjustEndSide(db, currentEndEpoch,newEndEpoch);
updateEvent(db,running,newName,currentName,id,newStartEpoch,newEndEpoch);
}
}
//The Adjusted events are eating entire events.
private void adjustAndDelete(DBHelper db, long newStartEpoch, long currentStartEpoch, long newEndEpoch, long currentEndEpoch){
//Check for changes to impacted on start side.
List<Long> startIds = getStartDeleted(db,newStartEpoch,currentStartEpoch);
if(startIds.size()==0){
adjustStartSide(db,currentStartEpoch,newStartEpoch);
}else{
adjustStartSide(db,startIds,newStartEpoch,currentEndEpoch);
}
//Check for changes to impacted on end side.
List<Long> endIds = getEndDeleted(db,newEndEpoch,currentEndEpoch);
if(endIds.size()==0){
adjustEndSide(db,currentEndEpoch,newEndEpoch);
}else{
adjustEndSide(db,endIds,currentStartEpoch,newEndEpoch);
}
}
private boolean adjustStartSide(DBHelper db, long currentStartEpoch, long newStartEpoch){//ok
//Exactly one impacted. The event that has an end that matches the the start.
//Update the previous event.
return updateStartSide(db, db.getByLongField(DBMap.EventTable.table,DBMap.EventTable.end,currentStartEpoch),newStartEpoch);
}
private boolean adjustEndSide(DBHelper db, long currentEndEpoch, long newEndEpoch){//ok
//Exactly one impacted and easy to find because it is the one with a start that matches the end.
//Update the next event.
return updateEndSide(db, db.getByLongField(DBMap.EventTable.table,DBMap.EventTable.start,currentEndEpoch),newEndEpoch);
}
//Then overloaded versions that include deletes.
private boolean adjustStartSide(DBHelper db, List<Long> impactedIDs, long newStartEpoch, long currentEndEpoch){
db.deleteByID(DBMap.EventTable.table,impactedIDs);
return updateStartSide(db, db.getInRange(
DBMap.EventTable.table,
null,
DBMap.EventTable.end,
newStartEpoch,
currentEndEpoch-1
),newStartEpoch);
}
private boolean adjustEndSide(DBHelper db, List<Long> impactedIDs, long currentStartEpoch, long newEndEpoch){
db.deleteByID(DBMap.EventTable.table,impactedIDs);
//We don't know exactly where this one starts, but we know the range in which its start lies.
//It is the only one in range because we deleted the others.
return updateEndSide(db, db.getInRange(
DBMap.EventTable.table,
null,
DBMap.EventTable.start,
currentStartEpoch+1,
newEndEpoch
),newEndEpoch);
}
private boolean updateStartSide(DBHelper db, Cursor impacted, long newStartEpoch){
if(impacted.moveToFirst()){
long impactedStart = impacted.getLong(impacted.getColumnIndex(DBMap.EventTable.start));
long adjustedStart = impactedStart;
long adjustedEnd = newStartEpoch;
long adjustedDuration = adjustedEnd - adjustedStart;
db.updateEvent(impacted.getLong(impacted.getColumnIndex(DBMap._ID)),
adjustedStart,
adjustedEnd,
impacted.getLong(impacted.getColumnIndex(DBMap.EventTable.taskID)),
adjustedDuration,
ErgFormats.durationHMS(adjustedDuration));
return true;
}else{
//This is probably not an issue as it would only happen if adjusting the first event or extending behind the first event.
//So it is not an issue if it happens.
Log.w(getResources().getString(R.string.app_name),"EditEventActivity - Start cursor not found");
return false;
}
}
private boolean updateEndSide(DBHelper db, Cursor impacted, long newEndEpoch){
long adjustedStart = newEndEpoch;
if(impacted.moveToFirst()){
long impactedEnd = impacted.getLong(impacted.getColumnIndex(DBMap.EventTable.end));
long adjustedEnd = impactedEnd;
long adjustedDuration = adjustedEnd - adjustedStart;
db.updateEvent(impacted.getLong(impacted.getColumnIndex(DBMap._ID)),
adjustedStart,
adjustedEnd,
impacted.getLong(impacted.getColumnIndex(DBMap.EventTable.taskID)),
adjustedDuration,
ErgFormats.durationHMS(adjustedDuration));
return true;
}else{
db.updateSettings(adjustedStart,db.getCurrentTaskID());
return true;
}
}
//Find which events will be deleted by an expansion of the edited event
private List<Long> getStartDeleted(DBHelper db, long newStartEpoch, long currentStartEpoch){
//the deleted items have newStart < itemStart < currentStart-1
return db.getImpactedEvents(DBMap.EventTable.start,newStartEpoch,(currentStartEpoch-1));
}
private List<Long> getEndDeleted(DBHelper db, long newEndEpoch, long currentEndEpoch){
return db.getImpactedEvents(DBMap.EventTable.end, (currentEndEpoch + 1),newEndEpoch);
}
} |
def sum_list(numbers):
"""Find the sum of all numbers in the list."""
total = 0
if isinstance(numbers, list):
for n in numbers:
total += n
else:
raise TypeError("Input must be a list of numbers!")
return total |
#!/bin/bash
# unset variables are errors
set -o nounset;
# any failed commands are errors
set -o errexit;
# this will bail with "unbound variable" if no arg provided
VERSION="$1";
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )";
ROOT="${DIR}/../../";
sed -i "$ROOT/flirt/Cargo.toml" -e "s/^version = \"\([^\"]*\)\"/version = \"$VERSION\"/g";
sed -i "$ROOT/core/Cargo.toml" -e "s/^version = \"\([^\"]*\)\"/version = \"$VERSION\"/g";
sed -i "$ROOT/pylancelot/Cargo.toml" -e "s/^version = \"\([^\"]*\)\"/version = \"$VERSION\"/g";
sed -i "$ROOT/pyflirt/Cargo.toml" -e "s/^version = \"\([^\"]*\)\"/version = \"$VERSION\"/g";
sed -i "$ROOT/bin/Cargo.toml" -e "s/^version = \"\([^\"]*\)\"/version = \"$VERSION\"/g";
sed -i "$ROOT/core/Cargo.toml" \
-e "s/\(lancelot-flirt.*\)version = \"[^\"]*\"\(.*\)$/\1version = \"$VERSION\"\2/g";
sed -i "$ROOT/bin/Cargo.toml" \
-e "s/\(lancelot.*\)version = \"[^\"]*\"\(.*\)$/\1version = \"$VERSION\"\2/g";
sed -i "$ROOT/pylancelot/Cargo.toml" \
-e "s/\(lancelot.*\)version = \"[^\"]*\"\(.*\)$/\1version = \"$VERSION\"\2/g";
sed -i "$ROOT/pyflirt/Cargo.toml" \
-e "s/\(lancelot-flirt.*\)version = \"[^\"]*\"\(.*\)$/\1version = \"$VERSION\"\2/g";
exec git diff;
|
import testSubject from './myAtoi'
/**
* @file Unit Tests - myAtoi
* @module myAtoi/tests
*/
describe('08/myAtoi', () => {
const cases = {
1: { expected: 42, s: '42' },
2: { expected: -42, s: ' -42' },
3: { expected: 4193, s: '4193 with words' },
4: { expected: 0, s: 'words and 987' },
5: { expected: (-2) ** 31, s: '-91283472332' }
}
const stringify_result = (s: string, expected: number) => {
return `${expected} given '${s}'`
}
Object.keys(cases).forEach(num => {
const { expected, s } = cases[num]
describe(`case ${num}`, () => {
it(`should return ${stringify_result(s, expected)}`, () => {
expect(testSubject(s)).toBe(expected)
})
})
})
})
|
#!/bin/bash
SGX_AGENT_DIR=sgx_agent
TAR_NAME=$(basename $SGX_AGENT_DIR)
# Check OS and VERSION
OS=$(cat /etc/os-release | grep ^ID= | cut -d'=' -f2)
temp="${OS%\"}"
temp="${temp#\"}"
OS="$temp"
VER=$(cat /etc/os-release | grep ^VERSION_ID | tr -d 'VERSION_ID="')
OS_FLAVOUR="$OS""$VER"
create_sgx_agent_tar()
{
\cp -pf ../deploy_scripts/*.sh $SGX_AGENT_DIR
\cp -pf ../deploy_scripts/README.install $SGX_AGENT_DIR
\cp -pf ../deploy_scripts/agent.conf $SGX_AGENT_DIR
tar -cf $TAR_NAME.tar -C $SGX_AGENT_DIR . --remove-files
sha256sum $TAR_NAME.tar > $TAR_NAME.sha2
echo "sgx_agent.tar file and sgx_agent.sha2 checksum file created"
}
if [ "$OS" == "rhel" ]
then
rm -f /etc/yum.repos.d/*sgx_rpm_local_repo.repo
fi
source build_prerequisites.sh
if [ $? -ne 0 ]
then
echo "failed to resolve package dependencies"
exit
fi
source download_dcap_driver.sh
if [ $? -ne 0 ]
then
echo "sgx dcap driver download failed"
exit
fi
source install_sgxsdk.sh
if [ $? -ne 0 ]
then
echo "sgxsdk install failed"
exit
fi
source download_sgx_psw_qgl.sh
if [ $? -ne 0 ]
then
echo "sgx psw, qgl rpms download failed"
exit
fi
source download_mpa_uefi_rpm.sh
if [ $? -ne 0 ]
then
echo "mpa uefi rpm download failed"
exit
fi
source build_pckretrieval_tool.sh
if [ $? -ne 0 ]
then
echo "pckretrieval tool build failed"
exit
fi
source build_sgx_agent.sh
if [ $? -ne 0 ]
then
echo "sgx agent build failed"
exit
fi
create_sgx_agent_tar
|
#!/bin/bash
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
set -o pipefail
# K8s Namespace that all resources deployed to
NAMESPACE=kubeflow
usage()
{
echo "usage: run_test.sh
--results-gcs-dir GCS directory for the test results. Usually gs://<project-id>/<commit-sha>/initialization_test
[--namespace k8s namespace where ml-pipelines is deployed. The tests run against the instance in this namespace]
[-h help]"
}
while [ "$1" != "" ]; do
case $1 in
--results-gcs-dir )shift
RESULTS_GCS_DIR=$1
;;
--namespace ) shift
NAMESPACE=$1
;;
-h | --help ) usage
exit
;;
* ) usage
exit 1
esac
shift
done
if [ -z "$RESULTS_GCS_DIR" ]; then
usage
exit 1
fi
if [[ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" ]]; then
gcloud auth activate-service-account --key-file="${GOOGLE_APPLICATION_CREDENTIALS}"
fi
GITHUB_REPO=kubeflow/pipelines
BASE_DIR=/go/src/github.com/${GITHUB_REPO}
JUNIT_TEST_RESULT=junit_InitializationTestOutput.xml
TEST_DIR=backend/test/initialization
cd "${BASE_DIR}/${TEST_DIR}"
# turn on go module
export GO111MODULE=on
echo "Run Initialization test..."
LOG_FILE=$(mktemp)
# Note, "set -o pipefail" at top of file is required to catch exit code of the pipe.
TEST_EXIT_CODE=0 # reference for how to save exit code: https://stackoverflow.com/a/18622662
go test -v ./... -namespace ${NAMESPACE} -args -runIntegrationTests=true |& tee $LOG_FILE || TEST_EXIT_CODE=$?
# Convert test result to junit.xml
< "$LOG_FILE" go-junit-report > "${JUNIT_TEST_RESULT}"
echo "Copy test result to GCS ${RESULTS_GCS_DIR}/${JUNIT_TEST_RESULT}"
gsutil cp ${JUNIT_TEST_RESULT} ${RESULTS_GCS_DIR}/${JUNIT_TEST_RESULT}
exit $TEST_EXIT_CODE
|
<reponame>jmccrae/saffron<filename>web/src/main/java/org/insightcentre/saffron/web/Executor.java
package org.insightcentre.saffron.web;
import com.fasterxml.jackson.databind.ObjectWriter;
import org.insightcentre.nlp.saffron.run.InclusionList;
import java.io.*;
import java.net.URL;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.io.FileUtils;
import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.server.handler.AbstractHandler;
import org.insightcentre.nlp.saffron.config.Configuration;
import org.insightcentre.nlp.saffron.data.*;
import org.insightcentre.nlp.saffron.data.connections.AuthorAuthor;
import org.insightcentre.nlp.saffron.data.connections.AuthorTerm;
import org.insightcentre.nlp.saffron.data.connections.TermTerm;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.flogger.FluentLogger;
import org.insightcentre.nlp.saffron.data.connections.DocumentTerm;
import org.insightcentre.nlp.saffron.run.RunConfiguration;
import org.insightcentre.nlp.saffron.run.SaffronPipeline;
import org.insightcentre.nlp.saffron.run.SaffronRunListener;
/**
*
* @author <NAME> <<EMAIL>>
*/
public class Executor extends AbstractHandler {
private final SaffronDataSource data;
private final File parentDirectory;
private final Map<String, Status> statuses;
private Configuration defaultConfig;
private final File logFile;
static String storeCopy = System.getenv("STORE_LOCAL_COPY");
private static final FluentLogger logger = FluentLogger.forEnclosingClass();
public Executor(SaffronDataSource data, File directory, File logFile) {
this.data = data;
this.parentDirectory = directory;
this.statuses = new HashMap<>();
try {
this.defaultConfig = new ObjectMapper().readValue(new SaffronPath("${saffron.home}/configs/config.json").toFile(), Configuration.class);
} catch (IOException x) {
this.defaultConfig = new Configuration();
logger.atInfo().log("Could not load config.json in models folder... using default configuration (" + x.getMessage() + ")");
}
this.logFile = logFile;
}
/**
* Initialize a new Saffron Dataset
*
* @param name The name
* @return True if a new dataset was created
*/
public boolean newDataSet(String name) throws IOException {
if (data.containsKey(name)) {
return false;
} else {
data.addRun(name, new Date(), this.defaultConfig);
return true;
}
}
public boolean isExecuting(String name) {
return statuses.containsKey(name) && statuses.get(name).stage > 0 && !statuses.get(name).completed;
}
public File getParentDirectory() {
return this.parentDirectory;
}
@Override
public void handle(String target, Request baseRequest, HttpServletRequest hsr,
HttpServletResponse response) throws IOException, ServletException {
try {
if ("/execute".equals(target)) {
String name = hsr.getParameter("name");
doExecute(name, response, baseRequest, hsr);
} else if (target.startsWith("/execute/advanced/")) {
final String saffronDatasetName = target.substring("/execute/advanced/".length());
doAdvancedExecute(hsr, saffronDatasetName, response, baseRequest);
// } else if (target.startsWith("/api/v1/run/rerun")) {
// final String saffronDatasetName = target.substring("/api/v1/run/rerun/".length());
// if (saffronDatasetName != null && data.containsKey(saffronDatasetName)) {
// doRerun(saffronDatasetName, response, baseRequest);
// }
} else if ("/execute/status".equals(target)) {
String saffronDatasetName = hsr.getParameter("name");
Status status = getStatus(saffronDatasetName);
if (status != null) {
response.setContentType("application/json");
response.setStatus(HttpServletResponse.SC_OK);
baseRequest.setHandled(true);
ObjectMapper mapper = new ObjectMapper();
mapper.writeValue(response.getWriter(), statuses.get(saffronDatasetName));
} else {
response.sendError(HttpServletResponse.SC_NOT_FOUND, "No executing run: " + saffronDatasetName);
baseRequest.setHandled(true);
}
}
} catch (Exception x) {
x.printStackTrace();
throw new ServletException(x);
}
}
public Status getStatus(String saffronDatasetName) throws IOException {
return statuses.get(saffronDatasetName);
}
// private void doRerun(final String saffronDatasetName, HttpServletResponse response, Request baseRequest) throws IOException, FileNotFoundException, NumberFormatException, JSONException {
// Status _status = makeStatus();
// _status.name = saffronDatasetName;
// statuses.put(saffronDatasetName, _status);
// response.setContentType("text/html");
// response.setStatus(HttpServletResponse.SC_OK);
// baseRequest.setHandled(true);
// FileReader reader = new FileReader(new File(System.getProperty("saffron.home") + "/web/static/executing.html"));
// Writer writer = new StringWriter();
// char[] buf = new char[4096];
// int p = 0;
// while ((p = reader.read(buf)) >= 0) {
// writer.write(buf, 0, p);
// }
// response.getWriter().write(writer.toString().replace("{{name}}", saffronDatasetName));
// String mongoUrl = System.getenv("MONGO_URL");
// if (mongoUrl == null) {
// mongoUrl = "localhost";
// }
// String mongoPort = System.getenv("MONGO_PORT");
// if (mongoPort == null) {
// mongoPort = "27017";
// }
//
// MongoDBHandler mongo = new MongoDBHandler();
// String run = mongo.getRun(saffronDatasetName);
// JSONObject configObj = new JSONObject(run);
// String confJson = (String) configObj.get("config");
// JSONObject config = new JSONObject(confJson);
// JSONObject termExtractionConfig = (JSONObject) config.get("termExtraction");
// JSONObject authorTermConfig = (JSONObject) config.get("authorTerm");
// JSONObject authorSimConfig = (JSONObject) config.get("authorSim");
// JSONObject termSimConfig = (JSONObject) config.get("termSim");
// JSONObject taxonomyConfig = (JSONObject) config.get("taxonomy");
// //JSONObject conceptConsolidation = (JSONObject) config.get("conceptConsolidation");
// final Configuration newConfig = new Configuration();
// TermExtractionConfiguration terms
// = new ObjectMapper().readValue(termExtractionConfig.toString(), TermExtractionConfiguration.class);
// AuthorTermConfiguration authorTerm
// = new ObjectMapper().readValue(authorTermConfig.toString(), AuthorTermConfiguration.class);
// AuthorSimilarityConfiguration authorSimilarityConfiguration
// = new ObjectMapper().readValue(authorSimConfig.toString(), AuthorSimilarityConfiguration.class);
// TermSimilarityConfiguration termSimilarityConfiguration
// = new ObjectMapper().readValue(termSimConfig.toString(), TermSimilarityConfiguration.class);
// TaxonomyExtractionConfiguration taxonomyExtractionConfiguration
// = new ObjectMapper().readValue(taxonomyConfig.toString(), TaxonomyExtractionConfiguration.class);
// //ConceptConsolidationConfiguration conceptConsolidationConfiguration
// // = conceptConsolidation != null ? new ObjectMapper().readValue(conceptConsolidation.toString(), ConceptConsolidationConfiguration.class) : new ConceptConsolidationConfiguration();
// newConfig.authorSim = authorSimilarityConfiguration;
// newConfig.authorTerm = authorTerm;
// newConfig.taxonomy = taxonomyExtractionConfiguration;
// newConfig.termExtraction = terms;
// newConfig.termSim = termSimilarityConfiguration;
// //newConfig.conceptConsolidation = conceptConsolidationConfiguration;
//
// Corpus corpus = mongo.getCorpus(saffronDatasetName);
//
// new Thread(new Runnable() {
//
// @Override
// @SuppressWarnings("UseSpecificCatch")
// public void run() {
// _status.stage = 1;
// try {
// RunConfiguration runConfig = new RunConfiguration(corpus, getInclusionList(saffronDatasetName), false, RunConfiguration.KGMethod.TAXO, false, null);
// getStatus(saffronDatasetName).runConfig = runConfig;
// SaffronPipeline.execute(runConfig, new File(parentDirectory, saffronDatasetName), newConfig, saffronDatasetName, getStatus(saffronDatasetName));
// } catch (Throwable x) {
// Status _status = statuses.get(saffronDatasetName);
// _status.fail(x.getMessage(), x);
// x.printStackTrace();
// }
// }
// }).start();
// }
private boolean doAdvancedExecute(HttpServletRequest hsr, final String saffronDatasetName, HttpServletResponse response, Request baseRequest) throws IOException {
BufferedReader r = hsr.getReader();
StringBuilder sb = new StringBuilder();
final Configuration newConfig;
try {
String line;
while ((line = r.readLine()) != null) {
sb.append(line).append("\n");
}
newConfig = new ObjectMapper().readValue(sb.toString(), Configuration.class);
} catch (Exception x) {
x.printStackTrace();
return true;
}
// Clear the 'advanced' status so the system switches to the spinner
statuses.get(saffronDatasetName).advanced = false;
new Thread(new Runnable() {
@Override
@SuppressWarnings("UseSpecificCatch")
public void run() {
try {
SaffronPipeline.execute(getStatus(saffronDatasetName).runConfig, new File(parentDirectory, saffronDatasetName), newConfig, saffronDatasetName, getStatus(saffronDatasetName));
} catch (Exception x) {
Status _status = statuses.get(saffronDatasetName);
_status.fail(x.getMessage(), x);
x.printStackTrace();
}
}
}).start();
response.setStatus(HttpServletResponse.SC_OK);
baseRequest.setHandled(true);
return false;
}
public void doExecute(String name, HttpServletResponse response, Request baseRequest, HttpServletRequest hsr) throws IOException {
if (statuses.containsKey(name) && statuses.get(name).advanced) {
response.setContentType("text/html");
response.setStatus(HttpServletResponse.SC_OK);
baseRequest.setHandled(true);
String page = FileUtils.readFileToString(new File(System.getProperty("saffron.home") + "/web/static/advanced.html"));
page = page.replace("{{config}}", new ObjectMapper().writeValueAsString(defaultConfig));
page = page.replace("{{name}}", hsr.getParameter("name"));
response.getWriter().print(page);
} else {
final String saffronDatasetName = hsr.getParameter("name");
response.setContentType("text/html");
response.setStatus(HttpServletResponse.SC_OK);
baseRequest.setHandled(true);
FileReader reader = new FileReader(new File(System.getProperty("saffron.home") + "/web/static/executing.html"));
Writer writer = new StringWriter();
char[] buf = new char[4096];
int i = 0;
while ((i = reader.read(buf)) >= 0) {
writer.write(buf, 0, i);
}
response.getWriter().write(writer.toString().replace("{{name}}", saffronDatasetName));
}
}
public void startWithZip(final File tmpFile, final boolean advanced, final String saffronDatasetName) {
final Status _status = makeStatus();
_status.name = saffronDatasetName;
statuses.put(saffronDatasetName, _status);
new Thread(new Runnable() {
@Override
public void run() {
try {
_status.stage = 1;
_status.setStageStart("Loading corpus " + tmpFile.getPath(), saffronDatasetName);
try {
final RunConfiguration runConfig = new RunConfiguration(tmpFile, RunConfiguration.CorpusMethod.ZIP, getInclusionList(saffronDatasetName), true, RunConfiguration.KGMethod.TAXO, false, null);
_status.runConfig = runConfig;
if (advanced) {
_status.advanced = true;
} else {
SaffronPipeline.execute(runConfig, new File(parentDirectory, saffronDatasetName), defaultConfig, saffronDatasetName, _status);
}
} catch (Throwable x) {
_status.fail(x.getMessage(), x);
logger.atInfo().log("Error: " + x.getMessage() + ")");
x.printStackTrace();
}
try {
_status.close();
} catch (IOException x) {
}
} catch(Throwable x) {
x.printStackTrace();
}
}
}).start();
}
private Status makeStatus() {
try {
if (logFile != null && !logFile.exists()) {
PrintWriter out = new PrintWriter(logFile);
out.close();
}
return new Status(logFile == null ? null : new PrintWriter(new FileWriter(logFile, true)), data, parentDirectory);
} catch (IOException x) {
System.err.println("Could not create logging file: " + x.getMessage());
return new Status(null, data, parentDirectory);
}
}
public void startWithCrawl(final String url, final int maxPages, final boolean domain,
final boolean advanced, final String saffronDatasetName) {
final Status _status = makeStatus();
_status.name = saffronDatasetName;
statuses.put(saffronDatasetName, _status);
new Thread(new Runnable() {
@Override
public void run() {
try {
_status.stage = 1;
_status.setStageStart("Loading corpus " + url, saffronDatasetName);
try {
URL url2 = new URL(url);
final RunConfiguration runConfig = new RunConfiguration(url2, getInclusionList(saffronDatasetName), true, RunConfiguration.KGMethod.TAXO, maxPages, domain, false);
_status.runConfig = runConfig;
if (advanced) {
_status.advanced = true;
} else {
SaffronPipeline.execute(runConfig, new File(parentDirectory, saffronDatasetName), defaultConfig, saffronDatasetName, _status);
}
} catch (Exception x) {
_status.fail(x.getMessage(), x);
}
try {
_status.close();
} catch (IOException x) {
x.printStackTrace();
}
} catch(Throwable t) {
t.printStackTrace();
}
}
}).start();
}
public void startWithJson(final File tmpFile, final boolean advanced, final String saffronDatasetName) {
final Status _status = makeStatus();
_status.name = saffronDatasetName;
statuses.put(saffronDatasetName, _status);
new Thread(new Runnable() {
@Override
public void run() {
try {
_status.stage = 1;
_status.setStageStart("Loading corpus" + tmpFile.getPath(), saffronDatasetName);
try {
final RunConfiguration runConfig = new RunConfiguration(tmpFile, RunConfiguration.CorpusMethod.JSON, getInclusionList(saffronDatasetName), true, RunConfiguration.KGMethod.TAXO, false, null);
_status.runConfig = runConfig;
if (advanced) {
_status.advanced = true;
} else {
SaffronPipeline.execute(runConfig, new File(parentDirectory, saffronDatasetName), defaultConfig, saffronDatasetName, _status);
}
} catch (Exception x) {
_status.fail(x.getMessage(), x);
x.printStackTrace();
}
try {
_status.close();
} catch (IOException x) {
}
} catch(Throwable t) {
t.printStackTrace();
}
}
}).start();
}
private void scaleThreads(Configuration config) {
long heapSize = Runtime.getRuntime().maxMemory();
if ((long) config.termExtraction.numThreads * 1024 * 1024 * 400 > heapSize) {
int numThreads = (int) Math.ceil((double) heapSize / 1024 / 1024 / 400);
System.err.println(String.format("System memory %d MB", heapSize / 1024 / 1024));
System.err.println(String.format("Insufficient memory for %d threads, reducing to %d", config.termExtraction.numThreads, numThreads));
System.err.println("Try setting the -Xmx flag to the Java Runtime to improve performance");
config.termExtraction.numThreads = numThreads;
}
}
private InclusionList getInclusionList(String saffronDatasetName) throws JsonParseException, JsonMappingException, IOException {
InclusionList allowDenyList = extractInclusionList(saffronDatasetName);
if (allowDenyList == null) {
allowDenyList = new InclusionList<TaxoLink>();
}
return allowDenyList;
}
public InclusionList extractInclusionList(String datasetName) throws JsonParseException, JsonMappingException, IOException {
return new InclusionList();
}
public static class Status implements SaffronRunListener, Closeable {
public int stage = 0;
public boolean failed = false;
public boolean completed = false;
public boolean advanced = false;
public boolean warning = false;
public String name;
public String statusMessage;
private final PrintWriter out;
private final SaffronDataSource data;
public RunConfiguration runConfig;
private final ObjectMapper mapper;
private final ObjectWriter writer;
private final File outputFolder;
public Status(PrintWriter out, SaffronDataSource data) {
this.out = out;
this.data = data;
this.outputFolder = null;
this.mapper = new ObjectMapper();
this.writer = mapper.writerWithDefaultPrettyPrinter();
}
public Status(PrintWriter out, SaffronDataSource data, File outputFolder) {
this.out = out;
this.data = data;
this.outputFolder = outputFolder;
this.mapper = new ObjectMapper();
this.writer = mapper.writerWithDefaultPrettyPrinter();
}
public void setStatusMessage(String statusMessage) {
logger.atInfo().log("[STAGE %d] %s\n", stage, statusMessage);
if (out != null) {
out.printf("[STAGE %d] %s\n", stage, statusMessage);
}
if (out != null) {
out.flush();
}
}
public void setErrorMessage(String errorMessage) {
logger.atSevere().log("[STAGE %d] %s\n", stage, errorMessage);
this.statusMessage = errorMessage;
}
public void setWarningMessage(String warningMessage) {
logger.atWarning().log("[STAGE %d] %s\n", stage, warningMessage);
this.statusMessage = warningMessage;
}
@Override
public void setStageStart(String statusMessage, String taxonomyId) {
logger.atInfo().log("[STAGE %d] %s\n", stage, statusMessage);
String run = data.getRun(taxonomyId);
if (out != null) {
out.printf("[STAGE %d] %s\n", stage, statusMessage);
}
if (out != null) {
out.flush();
}
this.statusMessage = statusMessage;
}
@Override
public void setStageComplete(String statusMessage, String taxonomyId) {
String run = data.getRun(taxonomyId);
stage++;
}
@Override
public void warning(String message, Throwable cause) {
this.warning = true;
setWarningMessage("Warning: " + message);
cause.printStackTrace();
if (out != null) {
cause.printStackTrace(out);
}
if (out != null) {
out.flush();
}
}
@Override
public void fail(String message, Throwable cause) {
this.failed = true;
setErrorMessage("Failed: " + message);
data.remove(name);
if (cause != null) {
cause.printStackTrace();
if (out != null) {
cause.printStackTrace(out);
}
logger.atSevere().log("Failed due to " + cause.getClass().getName() + ": " + message);
} else {
logger.atSevere().log("Failed: " + message);
}
if (out != null) {
out.flush();
}
}
@Override
public void log(String message) {
logger.atInfo().log("[STAGE %d] %s\n", stage, message);
if (out != null) {
out.println(message);
}
if (out != null) {
out.flush();
}
}
@Override
public void endTick() {
logger.atInfo().log("[STAGE %d] %s\n", stage, "");
if (out != null) {
out.println();
}
if (out != null) {
out.flush();
}
}
@Override
public void tick() {
logger.atInfo().log("[STAGE %d] %s\n", stage, ".");
if (out != null) {
out.print(".");
}
if (out != null) {
out.flush();
}
}
@Override
public void close() throws IOException {
if (out != null) {
out.close();
}
}
@Override
public void setTerms(String saffronDatasetName, List<Term> terms) {
data.setTerms(saffronDatasetName, terms);
try {
File outputFolder2 = new File(outputFolder.getAbsolutePath() + "/" + name);
writer.writeValue(new File(outputFolder2, "terms.json"), terms);
} catch(IOException x) {
throw new RuntimeException(x);
}
}
@Override
public void setDocTerms(String saffronDatasetName, List<DocumentTerm> docTerms) {
data.setDocTerms(saffronDatasetName, docTerms);
try {
File outputFolder2 = new File(outputFolder.getAbsolutePath() + "/" + name);
writer.writeValue(new File(outputFolder2, "doc-terms.json"), docTerms);
} catch(IOException x) {
throw new RuntimeException(x);
}
}
@Override
public void setCorpus(String saffronDatasetName, Corpus searcher) {
data.setCorpus(saffronDatasetName, searcher);
try {
File outputFolder2 = new File(outputFolder + "/" + name);
writer.writeValue(new File(outputFolder2, "corpus.json"), searcher);
} catch(IOException x) {
throw new RuntimeException(x);
}
}
@Override
public void setAuthorTerms(String saffronDatasetName, Collection<AuthorTerm> authorTerms) {
data.setAuthorTerms(saffronDatasetName, authorTerms);
try {
File outputFolder2 = new File(outputFolder + "/" + name);
writer.writeValue(new File(outputFolder2, "author-terms.json"), authorTerms);
} catch(IOException x) {
throw new RuntimeException(x);
}
}
@Override
public void setTermSim(String saffronDatasetName, List<TermTerm> termSimilarity) {
data.setTermSim(saffronDatasetName, termSimilarity);
try {
File outputFolder2 = new File(outputFolder + "/" + name);
writer.writeValue(new File(outputFolder2, "term-sim.json"), termSimilarity);
} catch(IOException x) {
throw new RuntimeException(x);
}
}
@Override
public void setAuthorSim(String saffronDatasetName, List<AuthorAuthor> authorSim) {
data.setAuthorSim(saffronDatasetName, authorSim);
try {
File outputFolder2 = new File(outputFolder + "/" + name);
writer.writeValue(new File(outputFolder2, "author-sim.json"), authorSim);
} catch(IOException x) {
throw new RuntimeException(x);
}
}
@Override
public void setTaxonomy(String saffronDatasetName, Taxonomy graph) {
data.setTaxonomy(saffronDatasetName, graph);
try {
File outputFolder2 = new File(outputFolder + "/" + name);
writer.writeValue(new File(outputFolder2, "taxonomy.json"), graph);
} catch(IOException x) {
throw new RuntimeException(x);
}
}
@Override
public void setKnowledgeGraph(String saffronDatasetName, KnowledgeGraph kGraph) {
data.setKnowledgeGraph(saffronDatasetName, kGraph);
try {
File outputFolder2 = new File(outputFolder + "/" + name);
writer.writeValue(new File(outputFolder2, "kg.json"), kGraph);
} catch(IOException x) {
throw new RuntimeException(x);
}
}
@Override
public void start(String taxonomyId, Configuration config) {
data.deleteRun(taxonomyId);
data.addRun(taxonomyId, new Date(), config);
this.advanced = false;
}
@Override
public void end(String taxonomyId) {
this.completed = true;
}
@Override
public void setDomainModelTerms(String saffronDatasetName, Set<Term> terms) {
//Not available to Web module
}
}
}
|
package binary_search;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.StringTokenizer;
/**
*
* @author exponential-e
* 백준 18113번: 그르다 김가놈
*
* @see https://www.acmicpc.net/problem/18113/
*
*/
public class Boj18113 {
public static void main(String[] args) throws Exception{
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
StringTokenizer st = new StringTokenizer(br.readLine());
int N = Integer.parseInt(st.nextToken());
int K = Integer.parseInt(st.nextToken());
int M = Integer.parseInt(st.nextToken());
int[] L = new int[N];
for(int i = 0; i < N; i++) {
L[i] = Integer.parseInt(br.readLine());
}
System.out.println(minLen(N, K, M, L));
}
private static int minLen(int n, long k, int m, int[] arr) {
int res = -1;
int start = 1, end = 1_000_000_000;
while(start <= end) {
int mid = (start + end) / 2;
long count = 0;
for(int i = 0; i < n; i++) {
if(arr[i] <= k) continue;
long k2 = 2 * k;
long val = 0;
if(arr[i] < k2) val = arr[i] - k; // remove tail
else val = arr[i] - k2;
count += (val / mid); // count what is sufficient
}
if(count >= m) {
start = mid + 1;
res = Math.max(res, mid);
}
else {
end = mid - 1;
}
}
return res;
}
}
|
<!DOCTYPE html>
<html>
<head>
<title>Weather Update</title>
<script type="text/javascript" src="js/main.js"></script>
</head>
<body>
<h1>Weather Update</h1>
<div id="weather-info">
<!-- Weather info is added by JavaScript -->
</div>
</body>
</html>
// JavaScript
let locations = ["New York", "London", "Tokyo"];
let weatherInfo = document.getElementById("weather-info");
for (let loc of locations) {
getWeatherData(loc).then((data) => {
let temp = data.main.temp;
let cityName = data.name;
weatherInfo.innerHTML += `<p>The temperature in ${cityName} is ${temp}°C`;
});
}
// Pseudo code for getWeatherData() function
function getWeatherData(location) {
// Make a get request to get weather data for specified location
} |
#!/usr/bin/env bash
example_dir=$1
python label_studio/server.py init --template=sentiment_analysis sentiment_analysis_project
python label_studio/server.py start sentiment_analysis_project -p ${PORT:-8200}
|
<reponame>Shock451/devalert
/* eslint-disable no-param-reassign */
var Validator = require('validator');
var isEmpty = require('./is-empty');
const validateQueryText = data => {
const errors = {};
// data.advert_header = !isEmpty(data.advert_header) ? data.advert_header : '';
data.company_name = !isEmpty(data.company_name) ? data.company_name : '';
data.job_title = !isEmpty(data.job_title) ? data.job_title : '';
data.job_link = !isEmpty(data.job_link) ? data.job_link : '';
data.job_description = !isEmpty(data.job_description) ? data.job_description : '';
data.job_category = !isEmpty(data.job_category) ? data.job_category : '';
// data.location = !isEmpty(data.location) ? data.location : '';
// if (Validator.isEmpty(data.advert_header)) {
// errors.advert_header = 'Advert Header is required';
// }
if (Validator.isEmpty(data.company_name)) {
errors.company_name = 'Company Name is required';
}
if (Validator.isEmpty(data.job_title)) {
errors.job_title = 'The job title is required';
}
/*
if (Validator.isEmpty(data.job_link)) {
errors.job_link = 'Job link is required';
}
*/
if (Validator.isEmpty(data.job_description)) {
errors.job_description = 'Job Description is required';
}
/*
if (Validator.isEmpty(data.job_category)) {
errors.job_category = 'Job Category is required';
}
*/
// if (Validator.isEmpty(data.location)) {
// errors.location = 'Job Location is required';
// }
return {
errors,
isValid: isEmpty(errors),
};
};
module.exports = validateQueryText;
|
<filename>docs/theme/styles/colors.js
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export const white = "#FFFFFF";
export const grayUltraLight = "#FCFBFA";
export const grayExtraLight = "#F5F6F7";
export const grayLight = "#CED4DE";
export const gray = "#7D899C";
export const grayDark = "#2D3747";
export const grayExtraDark = "#1D2330";
export const dark = "#13161F";
export const blueLight = "#e9f2fa";
export const blue = "#4a90e2 ";
export const skyBlue = "#1FB6FF";
export const negative = "#EB4D4B";
export const green = "#37bb9b";
export const greenLight = "#529d8b";
|
#!/usr/bin/env bash
home=`echo $HOME`
jobJarPath="${home}/etl-jobs-1.0/etl-jobs-1.0.jar"
jobConfPath="${home}/etl-jobs-1.0/resources/cassandraRedis.conf"
spark/bin/spark-submit \
--conf spark.driver.extraJavaOptions="-Dconfig.file=${jobConfPath}" \
--class org.ekstep.analytics.jobs.CassandraRedisIndexer \
${jobJarPath}
|
import './registerServiceWorker'
import './filters'
import Vue from 'vue'
import App from './app.vue'
import { isCordova } from './const'
import vuetify from './plugins/vuetify'
import router from './router'
import store from './store'
Vue.config.productionTip = false
const ignoredMessage = 'The .native modifier for v-on is only valid on components but it was used on <svg>.'
Vue.config.warnHandler = (message, vm, componentTrace) => {
if (message !== ignoredMessage) {
// eslint-disable-next-line no-console
console.error(message + componentTrace)
}
}
if (isCordova) {
document.addEventListener('deviceready', () => {
document.addEventListener('backbutton', function (e) {
e.preventDefault()
}, false)
}, false)
}
new Vue({
router,
store,
vuetify,
render: h => h(App)
}).$mount('#app')
|
<gh_stars>0
#cDCheck: A Python script to check for, and delete duplicate files in a directory
#(C) <NAME> - MIT License
import os #for directory access
import sys #for args
import threading #for threading
import re #for regex matching
#processes the files in range
def processRange(r1, r2, file_dict, dup_file_dict, files):
for i in range(r1, r2):
#hash as binary
h = hash(open(files[i], "rb").read())
if h in file_dict:
#print("adding to dup")
if h in dup_file_dict:
dup_file_dict[h].append(files[i])
else:
dup_file_dict[h] = [files[i], file_dict[h]]
else:
file_dict[h] = files[i]
#print("adding to file_dict")
#alerts the user to duplicates
def callOutDups(dup_file_dict):
for i in dup_file_dict:
print("Duplicate file detected with hash: " + str(i))
print("Instances:")
for j, k in enumerate(dup_file_dict[i]):
print(str(j) + ": " + str(k))
#keep going to valid input
while True:
c = input("Choose a number for the file you would like to maintain. Other options are:\ns to skip this file\nr to delete all files that DON'T contain a regex match\n")
#break character
if str(c).lower() == "s":
break
#regex character
if str(c).lower() == "r":
r = input("Regex: ")
try:
reg = re.compile(str(r))
except Exception:
print("Unable to compile regex. Please try again.")
continue
for z in range(0, j + 1):
#delete all that don't match regex
if not reg.search(dup_file_dict[i][z]):
os.remove(dup_file_dict[i][z])
print("Deleted files that didn't match regex: " + str(r))
break
try:
c = int(c)
except ValueError:
print("Invalid input, choose one file (by number) to maintain")
continue
#make sure given int is valid
if c >= 0 and c <= j:
print("Performing requested action. Maintaining file " + str(c) + ". Deleting others.")
for z in range(0, j + 1):
if z != c:
os.remove(dup_file_dict[i][z])
break
else:
print("Invalid input, choose one file (by number) to maintain")
#does the iteration work
def checkPath(path, thread_count=4):
file_dict = {}
dup_file_dict = {}
file_count = 0
files = []
print("Processing files in directory: " + path)
ldir = os.listdir(path)
for i in ldir:
f_path = os.path.join(path, i)
if os.path.isfile(f_path):
file_count+=1
files.append(f_path)
print("Files found: " + str(file_count))
threads = []
f_slice = []
#handle if we can do more threads than files
if (thread_count > file_count):
thread_count = file_count
#starting per thread
per_thread = int(file_count / thread_count)
#set all threads
for i in range(thread_count):
f_slice.append(per_thread)
#remainder number of files that haven't been distributed to threads
extra_files = file_count - (per_thread * thread_count)
#add remainder to threads as equally as possible
for i in range(len(f_slice)):
if extra_files == 0:
break
f_slice[i]+=1
extra_files -= 1
#starts a thread_count threads
#fill threads list with threads that we can start
#f_slice is the number of files each thread should hash
counter = 0
for i in range(len(f_slice)):
s1 = counter
counter = counter + f_slice[i]
t = threading.Thread(target=processRange, args=(s1, counter, file_dict, dup_file_dict, files))
threads.append(t)
#start all threads
for i in threads:
i.start()
#join all threads
for i in threads:
i.join()
print("Done Processing Directory\n")
print("Found " + str(len(dup_file_dict)) + " files with duplicates")
callOutDups(dup_file_dict)
#entrant function
def main():
#make sure we have enough args
if len(sys.argv) >= 2:
print("Please do not remove files from the given directory while this is running")
path = sys.argv[1]
#make sure path exists
if os.path.exists(path):
if (len(sys.argv) == 3):
try:
t = int(sys.argv[2])
checkPath(path, t)
print("cDCheck Completed Successfully!")
except ValueError:
print("Number of threads is not an integer, please make it one and try again")
if (len(sys.argv) == 2):
checkPath(path)
print("cDCheck Completed Successfully!")
else:
print("Given path does not exist, please check and try again")
else:
print("Usage: python cDCheck.py folderpath <number of threads, defaults to 4>")
if __name__ == '__main__':
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.