text
stringlengths 1
1.05M
|
|---|
<reponame>openstreetcam/android<filename>app/src/main/java/com/telenav/osv/upload/operation/UploadOperationSequence.java<gh_stars>10-100
package com.telenav.osv.upload.operation;
import androidx.annotation.NonNull;
import androidx.core.util.Consumer;
import com.telenav.osv.common.event.SimpleEventBus;
import com.telenav.osv.data.frame.datasource.local.FrameLocalDataSource;
import com.telenav.osv.data.sequence.datasource.local.SequenceLocalDataSource;
import com.telenav.osv.data.sequence.model.LocalSequence;
import com.telenav.osv.data.sequence.model.details.compression.SequenceDetailsCompressionBase;
import com.telenav.osv.data.sequence.model.details.compression.SequenceDetailsCompressionVideo;
import com.telenav.osv.data.video.datasource.VideoLocalDataSource;
import com.telenav.osv.item.KVFile;
import com.telenav.osv.network.KVApi;
import com.telenav.osv.upload.operation.image.UploadOperationImage;
import com.telenav.osv.upload.operation.video.UploadOperationVideo;
import com.telenav.osv.upload.progress.model.UploadUpdateDisk;
import com.telenav.osv.upload.progress.model.UploadUpdateProgress;
import com.telenav.osv.utils.Log;
import java.util.ArrayList;
import java.util.List;
import io.reactivex.Completable;
import io.reactivex.Flowable;
import io.reactivex.disposables.CompositeDisposable;
import io.reactivex.functions.Action;
import io.reactivex.schedulers.Schedulers;
/**
* The operation which will upload a metadata file to the network.
* <p> This will generate a stream via {@link #getStream()} method which is the only public entry point to this operation.
* @author horatiuf
* @see UploadOperationBase
* @see #getStream()
*/
public class UploadOperationSequence extends UploadOperationBase {
/**
* The identifier for the current class.
*/
private static final String TAG = UploadOperationSequence.class.getSimpleName();
/**
* The synchronization objects used in updates to provide thread safe operations.
*/
private static final Object synchObject = new Object();
/**
* The data source for sequence in order to manipulate the sequence information.
* @see SequenceLocalDataSource
*/
private SequenceLocalDataSource sequenceLocalDataSource;
/**
* The data source for video in order to manipulate the video information.
* @see VideoLocalDataSource
*/
private VideoLocalDataSource videoLocalDataSource;
/**
* The data source for the frame in order to manipulate the frame information.
* @see FrameLocalDataSource
*/
private FrameLocalDataSource frameLocalDataSource;
/**
* The sequence identifier which will be used in order to initiate the process to upload the sequence identified by the id.
*/
private String sequenceId;
/**
* The {@code disposable} representing updates regarding progress of the upload operation.
*/
private CompositeDisposable compositeDisposableUpdate;
/**
* Reference to the current sequence loaded from the memory. This will be used in order to give any information required by any operation.
*/
private LocalSequence localSequence;
/**
* Progress update related to sequence.
*/
private UploadUpdateProgress uploadUpdateProgressSequence;
/**
* The update progress consumer to be called in order to inform for progress updates.
*/
private Consumer<UploadUpdateProgress> updateConsumer;
/**
* Default constructor for the current class.
*/
public UploadOperationSequence(@NonNull String sequenceId,
@NonNull SequenceLocalDataSource sequenceLocalDataSource,
@NonNull String accessToken,
@NonNull KVApi api,
@NonNull FrameLocalDataSource frameLocalDataSource,
@NonNull VideoLocalDataSource videoLocalDataSource,
@NonNull Consumer<UploadUpdateProgress> updateConsumer) {
super(accessToken, api, new SimpleEventBus());
this.sequenceId = sequenceId;
this.frameLocalDataSource = frameLocalDataSource;
this.videoLocalDataSource = videoLocalDataSource;
this.sequenceLocalDataSource = sequenceLocalDataSource;
this.updateConsumer = updateConsumer;
compositeDisposableUpdate = new CompositeDisposable();
compositeDisposableUpdate.add(updateEventBus
.filteredObservable(UploadUpdateDisk.class)
.observeOn(Schedulers.io())
.subscribe(this::processUploadUpdateDisk,
throwable -> Log.d(TAG, String.format("constructor updates disk. Error: %s", throwable.getLocalizedMessage()))));
compositeDisposableUpdate.add(updateEventBus
.filteredObservable(UploadUpdateProgress.class)
.observeOn(Schedulers.io())
.subscribe(this::processUploadUpdateProgress,
throwable -> Log.d(TAG, String.format("constructor updates progress. Error: %s", throwable.getLocalizedMessage()))));
}
/**
* @return {@code Completable} composed of:
* <ul>
* <li> persistence get of the sequence with reward</li>
* <li> metadata process - interally via {@link #processSequenceUploadMetadata(LocalSequence)}</li>
* <li> compression process - interally via {@link #processSequenceUploadMetadata(LocalSequence)}</li>
* <li> complete process - interally via {@link #processSequenceUploadMetadata(LocalSequence)}</li>
* <li> error logging and handling </li>
* </ul>
*/
public Completable getStream() {
return sequenceLocalDataSource
.getSequenceWithReward(sequenceId)
.doOnSuccess(item -> {
// persist fields which are required by different operations
this.localSequence = item;
//setup the current progress update
long sequenceSize = localSequence.getLocalDetails().getDiskSize();
this.uploadUpdateProgressSequence = new UploadUpdateProgress(0, sequenceSize);
if (updateConsumer != null) {
updateConsumer.accept(uploadUpdateProgressSequence);
}
//return upload metadata stream
})
.flatMapCompletable(sequence ->
processSequenceUploadMetadata(sequence)
.andThen(Completable.defer(() -> processSequenceUploadTagging(sequence.getLocalDetails().getFolder(), sequence.getDetails().getOnlineId())))
.andThen(Completable.defer(() -> processSequenceUploadCompression(sequence.getCompressionDetails() instanceof SequenceDetailsCompressionVideo)))
.andThen(Completable.defer(this::processSequenceUploadComplete)))
.retryWhen(this::handleDefaultRetryFlowableWithTimer)
.doOnError(throwable -> Log.d(TAG, String.format("getStream. Status: error. Message: %s.", throwable.getLocalizedMessage())));
}
@Override
public void dispose() {
if (compositeDisposableUpdate != null && !compositeDisposableUpdate.isDisposed()) {
compositeDisposableUpdate.dispose();
}
}
/**
* Process any computation required to update the disk size for the sequence.
* @param uploadUpdateDisk the new {@code UploadUpdateDisk} received from any operation.
*/
private void processUploadUpdateDisk(UploadUpdateDisk uploadUpdateDisk) {
synchronized (synchObject) {
long oldSize = localSequence.getLocalDetails().getDiskSize();
long newCurrentDiskSize = oldSize - uploadUpdateDisk.getTotalUnit();
boolean updateDiskSize = sequenceLocalDataSource.updateDiskSize(sequenceId, newCurrentDiskSize);
Log.d(TAG,
String.format("updateDiskSize. Status: %s. Sequence id: %s. Old disk size: %s. New disk Size: %s.",
updateDiskSize,
sequenceId,
oldSize,
newCurrentDiskSize));
if (updateDiskSize) {
localSequence.getLocalDetails().setDiskSize(newCurrentDiskSize);
}
}
}
/**
* Process any computation required to update the progress update for the sequence children.
* @param uploadUpdateProgress the new {@code UploadUpdateProgress} representing a child.
*/
private void processUploadUpdateProgress(UploadUpdateProgress uploadUpdateProgress) {
synchronized (synchObject) {
if (uploadUpdateProgressSequence != null) {
if (uploadUpdateProgress.isCancel()) {
uploadUpdateProgressSequence.removeChild(uploadUpdateProgress);
} else if (uploadUpdateProgress.isArchive()) {
uploadUpdateProgressSequence.archive(uploadUpdateProgress);
} else {
uploadUpdateProgressSequence.addChild(uploadUpdateProgress);
}
}
}
}
/**
* @param videosIds the identifiers for all videos which will be uploaded.
* @return {@code collection} with all the completable returned by creation and call of {@link UploadOperationVideo#getStream()} correlated to each video identifier given.
*/
private Flowable<Completable> setUploadVideoStreamCollection(List<String> videosIds) {
List<Completable> uploadCompressionCompletables = new ArrayList<>();
for (String videoId : videosIds) {
uploadCompressionCompletables.add(
new UploadOperationVideo(
accessToken,
api,
videoLocalDataSource,
videoId,
localSequence.getDetails().getOnlineId(),
updateEventBus,
videoSuccessResponseConsumer(),
null)
.getStream());
}
return Flowable.fromIterable(uploadCompressionCompletables);
}
/**
* @param frameIds the identifiers for all frames which will be uploaded.
* @return {@code collection} with all the completable returned by creation and call of {@link UploadOperationImage#getStream()} correlated to each frame identifier given.
*/
private Flowable<Completable> setUploadFrameStreamCollection(List<String> frameIds) {
List<Completable> uploadCompressionCompletables = new ArrayList<>();
for (String frameId : frameIds) {
uploadCompressionCompletables.add(
new UploadOperationImage(
accessToken,
api,
frameLocalDataSource,
frameId,
localSequence.getDetails().getOnlineId(),
updateEventBus,
frameSuccessResponseAction(),
null)
.getStream());
}
return Flowable.fromIterable(uploadCompressionCompletables);
}
/**
* The action success consumer which will update the frame count for the current sequence in both persistence and cache.
*/
private Action frameSuccessResponseAction() {
return () -> {
synchronized (synchObject) {
SequenceDetailsCompressionBase compressionBase = localSequence.getCompressionDetails();
int newLocationsCount = compressionBase.getLocationsCount() - 1;
boolean updateSizeCount = sequenceLocalDataSource.updateCompressionSizeInfo(localSequence.getID(), newLocationsCount, 0);
Log.d(TAG,
String.format("frameSuccessResponseAction. Status: %s. Message: Updating compression size info. Frame count: %s",
updateSizeCount,
newLocationsCount));
compressionBase.setLocationsCount(newLocationsCount);
}
};
}
/**
* The video success consumer which will update the frame count and video count for the current sequence in both persistence and cache.
*/
private Consumer<Integer> videoSuccessResponseConsumer() {
return (locationsCount) -> {
SequenceDetailsCompressionBase compressionBase = localSequence.getCompressionDetails();
int newLocationsCount = compressionBase.getLocationsCount() - locationsCount;
int newVideoCount = compressionBase.getLength() - 1;
boolean updateSizeCount = sequenceLocalDataSource.updateCompressionSizeInfo(localSequence.getID(), newLocationsCount, newVideoCount);
Log.d(TAG,
String.format("handleVideoSuccessSequenceUpdate. Status: %s. Message: Updating compression size info. Loc count: %s. Video count: %s",
updateSizeCount,
newLocationsCount,
newVideoCount));
compressionBase.setLength(newVideoCount);
compressionBase.setLocationsCount(newLocationsCount);
};
}
/**
* @param localSequence the sequence given from local persistence which will be used to create the upload metadata operation.
* @return Completable with the logic of {@link UploadOperationMetadata}. Also it will persist before fields which are required by different operations.
*/
private Completable processSequenceUploadMetadata(LocalSequence localSequence) {
return new UploadOperationMetadata(
accessToken,
api,
this.localSequence.getLocalDetails().getFolder(),
this.localSequence.getDetails(),
updateEventBus,
this.localSequence.getRewardDetails(),
(onlineId) -> {
boolean updateOnlineId = sequenceLocalDataSource.updateOnlineId(sequenceId, onlineId);
Log.d(TAG, String.format("processSequenceUploadMetadata. Status: %s. Message: Persisting the sequence online id in the persistence.", updateOnlineId));
localSequence.getDetails().setOnlineId(onlineId);
},
null)
.getStream();
}
/**
* @param sequenceFolder the sequence given from local persistence which will be used to create the upload tagging operation.
* @return Completable with the logic of {@link UploadOperationTagging}.
*/
private Completable processSequenceUploadTagging(KVFile sequenceFolder, long onlineId) {
return new UploadOperationTagging(
sequenceFolder,
onlineId,
accessToken,
api,
updateEventBus,
null)
.getStream();
}
/**
* @return {@code Completable} composed by:
* <ul>
* <li>
* video/frame local data source search by ids based on the given param.
* </li>
* <li>
* process above ids into a stream by using {@link Completable#mergeDelayError(Iterable)} with the iterable returned by {@link #setUploadVideoStreamCollection(List)} or
* {@link #setUploadFrameStreamCollection(List)}.
* </li>
* </ul>
*/
private Completable processSequenceUploadCompression(boolean isVideoCompression) {
if (isVideoCompression) {
return videoLocalDataSource
.getVideoIdsBySequenceId(sequenceId)
.doOnSuccess(frameIds -> Log.d(TAG,
String.format("processSequenceUploadCompression. Status: success. Count: %s. Message: Videos found for sequence.", frameIds.size())))
.flatMapCompletable(videoIds -> Completable
.mergeDelayError(setUploadVideoStreamCollection(videoIds), MERGE_DELAY_ERROR_NO_SERIAL)
.retryWhen(this::handleDefaultRetryFlowableWithTimer));
}
return frameLocalDataSource
.getFrameIdsBySequenceId(sequenceId)
.doOnSuccess(frameIds -> Log.d(TAG,
String.format("processSequenceUploadCompression. Status: success. Count: %s. Message: Frames found for sequence.", frameIds.size())))
.flatMapCompletable(framesIds ->
Completable
.mergeDelayError(setUploadFrameStreamCollection(framesIds), MERGE_DELAY_ERROR_CONCURRENT_NO)
.retryWhen(this::handleDefaultRetryFlowableWithTimer));
}
/**
* @return {@code Completable} representing {@link UploadOperationSequenceComplete#getStream()} method for the current sequence.
*/
private Completable processSequenceUploadComplete() {
return new UploadOperationSequenceComplete(
accessToken,
api,
localSequence.getDetails().getOnlineId(),
updateEventBus,
sequenceCompleteSuccessAction(),
null)
.getStream();
}
/**
* The video success action which will remove the sequence from persistence and device.
* @return action which handles sequence complete operation success behaviour related to sequence.
*/
private Action sequenceCompleteSuccessAction() {
return () -> {
if (localSequence.getLocalDetails().getDiskSize() != 0) {
boolean updateDiskSize = sequenceLocalDataSource.updateDiskSize(sequenceId, 0);
Log.d(TAG, String.format("sequenceCompleteSuccessAction. Status: %s. Sequence id: %s. New size: 0", updateDiskSize, localSequence.getID()));
uploadUpdateProgressSequence.setCurrentUnit(uploadUpdateProgressSequence.getTotalUnit());
}
boolean deleteSequenceFromPersistence = sequenceLocalDataSource.deleteSequence(sequenceId);
Log.d(TAG, String.format(
"sequenceCompleteSuccessAction. Status: %s. Sequence id: %s. Message: Attempting to remove sequence from persistence.",
deleteSequenceFromPersistence,
sequenceId));
KVFile sequenceFolder = localSequence.getLocalDetails().getFolder();
if (deleteSequenceFromPersistence && sequenceFolder.exists()) {
boolean folderDelete = sequenceFolder.delete();
Log.w(TAG, String.format("sequenceCompleteSuccessAction. Status: %s. Attempting to remove the folder for the sequence local id: %s.",
folderDelete,
sequenceId));
}
uploadUpdateProgressSequence.complete();
};
}
}
|
def get_last_five(my_list):
return my_list[-5:]
|
#!/usr/bin/env bash
set -e
# TODO: Set to URL of git repo.
PROJECT_GIT_URL='https://github.com/shahabbahrami-project/profiles-rest-api.git'
PROJECT_BASE_PATH='/usr/local/apps/profiles-rest-api'
echo "Installing dependencies..."
apt-get update
apt-get install -y python3-dev python3-venv sqlite python-pip supervisor nginx git
# Create project directory
mkdir -p $PROJECT_BASE_PATH
git clone $PROJECT_GIT_URL $PROJECT_BASE_PATH
# Create virtual environment
mkdir -p $PROJECT_BASE_PATH/env
python3 -m venv $PROJECT_BASE_PATH/env
# Install python packages
$PROJECT_BASE_PATH/env/bin/pip install -r $PROJECT_BASE_PATH/requirements.txt
$PROJECT_BASE_PATH/env/bin/pip install uwsgi==2.0.18
# Run migrations and collectstatic
cd $PROJECT_BASE_PATH
$PROJECT_BASE_PATH/env/bin/python manage.py migrate
$PROJECT_BASE_PATH/env/bin/python manage.py collectstatic --noinput
# Configure supervisor
cp $PROJECT_BASE_PATH/deploy/supervisor_profiles_api.conf /etc/supervisor/conf.d/profiles_api.conf
supervisorctl reread
supervisorctl update
supervisorctl restart profiles_api
# Configure nginx
cp $PROJECT_BASE_PATH/deploy/nginx_profiles_api.conf /etc/nginx/sites-available/profiles_api.conf
rm /etc/nginx/sites-enabled/default
ln -s /etc/nginx/sites-available/profiles_api.conf /etc/nginx/sites-enabled/profiles_api.conf
systemctl restart nginx.service
echo "DONE! :)"
|
import json
import requests
import time
def parse_json_data(url):
max_retries = 3
retries = 0
while retries < max_retries:
try:
r = requests.get(url)
r.raise_for_status() # Raise an exception for 4xx or 5xx status codes
data = json.loads(r.text)
return data # Return the parsed JSON data if successful
except (requests.RequestException, json.JSONDecodeError) as e:
print("Error occurred while fetching or parsing JSON data:", e)
retries += 1
if retries < max_retries:
print("Sleeping for some time before retrying...")
time.sleep(10)
print("Max retries reached. Unable to fetch and parse JSON data.")
return None
|
<gh_stars>10-100
/**
* @module
*/
/* eslint-disable require-await */
import * as plugin from "../plugin/vimeo.js";
import { matchPattern } from "../tools/matchpattern.js";
/**
* Extrait les informations nécessaire pour lire une vidéo sur Kodi. Seul les
* URLs des vidéos intégrées sont gérées car pour les URLs vers les vidéos : le
* scraper <em>opengraph</em> va extrait l'URL de la vidéo intégrée depuis la
* méta-donnée <code>og:video:secure_url</code>.
*
* @param {URL} url L'URL d'une vidéo Vimeo intégrée.
* @returns {Promise<string>} Une promesse contenant le lien du
* <em>fichier</em>.
*/
const action = async function ({ pathname, searchParams }) {
return plugin.generateUrl(pathname.slice(7),
searchParams.get("h") ?? undefined);
};
export const extract = matchPattern(action, "*://player.vimeo.com/video/*");
|
import { Encrypter, Decrypter, HasherCompare, Hasher } from '@/data/protocols/cryptography'
export const mockHasher = (): Hasher => {
class HasherStub implements Hasher {
async hash (value: string): Promise<string> {
return Promise.resolve('hashed_password')
}
}
return new HasherStub()
}
export const mockHasherCompare = (): HasherCompare => {
class HasherCompareStub implements HasherCompare {
async compare (value: string, hash: string): Promise<boolean> {
return Promise.resolve(true)
}
}
return new HasherCompareStub()
}
export const mockEncrypter = (): Encrypter => {
class EncrypterStub implements Encrypter {
async encrypt (value: string): Promise<string> {
return Promise.resolve('encrypt_token')
}
}
return new EncrypterStub()
}
export const mockDecrypter = (): Decrypter => {
class DecrypterStub implements Decrypter {
async decrypt (token: string): Promise<string> {
return Promise.resolve('dencrypt_token')
}
}
return new DecrypterStub()
}
|
/*\
module-type: relinkfilteroperator
Given a title as an operand, returns all non-shadow tiddlers that have any
sort of updatable reference to it.
`relink:backreferences[]]`
`relink:references[]]`
Returns all tiddlers that reference `fromTiddler` somewhere inside them.
Input is ignored. Maybe it shouldn't do this.
\*/
var LinkedList = $tw.utils.LinkedList;
if (!LinkedList) {
/* If the linked list isn't available, make a quick crappy version. */
LinkedList = function() {this.array=[];};
LinkedList.prototype.pushTop = function(array) {
$tw.utils.pushTop(this.array, array);
};
LinkedList.prototype.toArray = function() {
return this.array;
};
};
exports.backreferences = function(source,operator,options) {
var results = new LinkedList();
source(function(tiddler,title) {
results.pushTop(Object.keys(options.wiki.getTiddlerRelinkBackreferences(title,options)));
});
return results.toArray();
};
exports.references = function(source,operator,options) {
var results = new LinkedList();
source(function(tiddler,title) {
var refs = options.wiki.getTiddlerRelinkReferences(title,options);
if (refs) {
results.pushTop(Object.keys(refs));
}
});
return results.toArray();
};
|
import type { IActionHttp, IActorHttpOutput } from '@comunica/bus-http';
import { ActorHttp } from '@comunica/bus-http';
import { KeysHttpProxy } from '@comunica/context-entries';
import type { IActorArgs, IActorTest, Mediator } from '@comunica/core';
import type { IMediatorTypeTime } from '@comunica/mediatortype-time';
import type { IProxyHandler } from './IProxyHandler';
/**
* A comunica Proxy Http Actor.
*/
export class ActorHttpProxy extends ActorHttp {
public readonly mediatorHttp: Mediator<ActorHttp, IActionHttp, IActorTest, IActorHttpOutput>;
public constructor(args: IActorHttpProxyArgs) {
super(args);
}
public async test(action: IActionHttp): Promise<IMediatorTypeTime> {
if (!action.context) {
throw new Error(`Actor ${this.name} could not find a context.`);
}
const proxyHandler: IProxyHandler = action.context.get(KeysHttpProxy.httpProxyHandler);
if (!proxyHandler) {
throw new Error(`Actor ${this.name} could not find a proxy handler in the context.`);
}
if (!await proxyHandler.getProxy(action)) {
throw new Error(`Actor ${this.name} could not determine a proxy for the given request.`);
}
return { time: Number.POSITIVE_INFINITY };
}
public async run(action: IActionHttp): Promise<IActorHttpOutput> {
const requestedUrl = typeof action.input === 'string' ? action.input : action.input.url;
if (!action.context) {
throw new Error('Illegal state: missing context');
}
const proxyHandler: IProxyHandler = action.context.get(KeysHttpProxy.httpProxyHandler);
// Send a request for the modified request
const output = await this.mediatorHttp.mediate({
...await proxyHandler.getProxy(action),
context: action.context.delete(KeysHttpProxy.httpProxyHandler),
});
// Modify the response URL
// use defineProperty to allow modification of unmodifiable objects
Object.defineProperty(output, 'url', {
configurable: true,
enumerable: true,
get: () => output.headers.get('x-final-url') ?? requestedUrl,
});
return output;
}
}
export interface IActorHttpProxyArgs extends IActorArgs<IActionHttp, IActorTest, IActorHttpOutput> {
mediatorHttp: Mediator<ActorHttp, IActionHttp, IActorTest, IActorHttpOutput>;
}
/**
* @deprecated Import this constant from @comunica/context-entries.
*/
export const KEY_CONTEXT_HTTPPROXYHANDLER = KeysHttpProxy.httpProxyHandler;
|
#!/bin/bash
# maintainer: zhaoyafei (https://github.com/walkoncross, zhaoyafei0210@gmail.com)
s=16
m=0.5
n=0
b=0
lambda=0
gpu=0
epoch=200
loss_type=cosface
python train_large_margin_zyf.py --cifar-dir ./data \
--net resnet20_cifar10_nofc \
--gpu-ids ${gpu} \
--lr-scheduler cosine \
--lr 0.1 \
--num-epochs ${epoch} \
--mom 0.9 \
--wd 0.0001 \
--batch-size 256 \
--data-workers 4 \
--test-bs 200 \
--test-dw 4 \
--loss-type ${loss_type} \
--loss-scale ${s} \
--loss-m ${m} \
--loss-n ${n} \
--loss-b ${b} \
--loss-lambda ${lambda} \
--model-prefix res20-cifar \
--save-dir checkpoints-res20-cifar-coslr-${epoch}ep-${loss_type}-s${s}-m${m}-n${n}-b${b}-l${lambda}
# --save-dir checkpoints-res20-cifar-coslr-200ep-${loss_type}-s32-coslr-200-m0.35-test
# --no-progress-bar \
# --resume \
# --resume-checkpoints checkpoints/ckpt.t7 \
|
import React from 'react';
interface Props {
address: string;
genesisHash: string;
className?: string;
size?: string | number;
style?: React.CSSProperties;
}
declare function DisplayAddress({ address, className, genesisHash, size, style }: Props): React.ReactElement<Props> | null;
export declare const QrDisplayAddress: React.MemoExoticComponent<typeof DisplayAddress>;
export {};
|
import matplotlib.pyplot as plt
# Function to create and display the plot based on user input
def create_custom_plot():
plot_type = input("Enter the type of plot (line/scatter/bar): ").lower()
# Prompt user for data points
data_points = input("Enter the data points (comma-separated values): ").split(',')
data_points = [float(point) for point in data_points]
# Customize plot appearance
title = input("Enter the plot title: ")
x_label = input("Enter the label for the x-axis: ")
y_label = input("Enter the label for the y-axis: ")
color = input("Enter the color for the plot: ")
# Generate the plot based on user input
plt.figure()
if plot_type == 'line':
plt.plot(data_points, color=color)
elif plot_type == 'scatter':
plt.scatter(range(len(data_points)), data_points, color=color)
elif plot_type == 'bar':
plt.bar(range(len(data_points)), data_points, color=color)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.show()
# Call the function to create and display the custom plot
create_custom_plot()
|
#!/bin/bash
# Copyright 2016 - 2018 Crunchy Data Solutions, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source ${CCPROOT}/examples/common.sh
echo_info "Cleaning up.."
${CCP_CLI?} delete --namespace=${CCP_NAMESPACE?} service backrest
${CCP_CLI?} delete --namespace=${CCP_NAMESPACE?} pod backrest
${CCP_CLI?} delete --namespace=${CCP_NAMESPACE?} configmap br-pgconf
${CCP_CLI?} delete --namespace=${CCP_NAMESPACE?} pvc br-pgdata br-backups
if [ -z "$CCP_STORAGE_CLASS" ]; then
${CCP_CLI?} delete --namespace=${CCP_NAMESPACE?} pv br-pgdata br-backups
fi
$CCPROOT/examples/waitforterm.sh backrest ${CCP_CLI?}
dir_check_rm "backrest"
dir_check_rm "backrest-backups"
dir_check_rm "backrest-spool"
|
require_relative 'sql'
module Sequent
module Migrations
##
# The executor is the implementation of the 3-phase deploy in Sequent.
# is responsible for executing the `Planner::Plan`.
#
class Executor
include Sql
def execute_online(plan)
plan.replay_tables.each do |migration|
table = migration.record_class
sql_file = "#{Sequent.configuration.migration_sql_files_directory}/#{table.table_name}.sql"
statements = sql_file_to_statements(sql_file) { |raw_sql| raw_sql.gsub('%SUFFIX%', "_#{migration.version}") }
statements.each(&method(:exec_sql))
table.table_name = "#{table.table_name}_#{migration.version}"
table.reset_column_information
end
end
def execute_offline(plan, current_version)
plan.replay_tables.each do |migration|
table = migration.record_class
current_table_name = table.table_name.gsub("_#{migration.version}", "")
# 2 Rename old table
exec_sql("ALTER TABLE IF EXISTS #{current_table_name} RENAME TO #{current_table_name}_#{current_version}")
# 3 Rename new table
exec_sql("ALTER TABLE #{table.table_name} RENAME TO #{current_table_name}")
# Use new table from now on
table.table_name = current_table_name
table.reset_column_information
end
plan.alter_tables.each do |migration|
table = migration.record_class
sql_file = "#{Sequent.configuration.migration_sql_files_directory}/#{table.table_name}_#{migration.version}.sql"
statements = sql_file_to_statements(sql_file)
statements.each(&method(:exec_sql))
end
end
def reset_table_names(plan)
plan.replay_tables.each do |migration|
table = migration.record_class
table.table_name = table.table_name.gsub("_#{migration.version}", "")
table.reset_column_information
end
end
def set_table_names_to_new_version(plan)
plan.replay_tables.each do |migration|
table = migration.record_class
unless table.table_name.end_with?("_#{migration.version}")
table.table_name = "#{table.table_name}_#{migration.version}"
table.reset_column_information
fail MigrationError.new("Table #{table.table_name} does not exist. Did you run ViewSchema.migrate_online first?") unless table.table_exists?
end
end
end
end
end
end
|
public boolean isPalindrome(int num) {
int rev = 0;
int temp = num;
while (temp != 0) {
rev = rev * 10 + temp % 10;
temp /= 10;
}
return (num == rev);
}
|
swift download -o data_in/wrfout_d01_2017-12-09_00:00:00 wrf_long_run wrfout_d01_2017-12-09_00:00:00
swift download -o data_in/wrfout_d01_2017-12-09_01:00:00 wrf_long_run wrfout_d01_2017-12-09_01:00:00
swift download -o data_in/wrfout_d01_2017-12-09_02:00:00 wrf_long_run wrfout_d01_2017-12-09_02:00:00
swift download -o data_in/wrfout_d01_2017-12-09_03:00:00 wrf_long_run wrfout_d01_2017-12-09_03:00:00
swift download -o data_in/wrfout_d01_2017-12-09_04:00:00 wrf_long_run wrfout_d01_2017-12-09_04:00:00
swift download -o data_in/wrfout_d01_2017-12-09_05:00:00 wrf_long_run wrfout_d01_2017-12-09_05:00:00
swift download -o data_in/wrfout_d01_2017-12-09_06:00:00 wrf_long_run wrfout_d01_2017-12-09_06:00:00
swift download -o data_in/wrfout_d01_2017-12-09_07:00:00 wrf_long_run wrfout_d01_2017-12-09_07:00:00
swift download -o data_in/wrfout_d01_2017-12-09_08:00:00 wrf_long_run wrfout_d01_2017-12-09_08:00:00
swift download -o data_in/wrfout_d01_2017-12-09_09:00:00 wrf_long_run wrfout_d01_2017-12-09_09:00:00
swift download -o data_in/wrfout_d01_2017-12-09_10:00:00 wrf_long_run wrfout_d01_2017-12-09_10:00:00
swift download -o data_in/wrfout_d01_2017-12-09_11:00:00 wrf_long_run wrfout_d01_2017-12-09_11:00:00
swift download -o data_in/wrfout_d01_2017-12-09_12:00:00 wrf_long_run wrfout_d01_2017-12-09_12:00:00
swift download -o data_in/wrfout_d01_2017-12-09_13:00:00 wrf_long_run wrfout_d01_2017-12-09_13:00:00
swift download -o data_in/wrfout_d01_2017-12-09_14:00:00 wrf_long_run wrfout_d01_2017-12-09_14:00:00
swift download -o data_in/wrfout_d01_2017-12-09_15:00:00 wrf_long_run wrfout_d01_2017-12-09_15:00:00
swift download -o data_in/wrfout_d01_2017-12-09_16:00:00 wrf_long_run wrfout_d01_2017-12-09_16:00:00
swift download -o data_in/wrfout_d01_2017-12-09_17:00:00 wrf_long_run wrfout_d01_2017-12-09_17:00:00
swift download -o data_in/wrfout_d01_2017-12-09_18:00:00 wrf_long_run wrfout_d01_2017-12-09_18:00:00
swift download -o data_in/wrfout_d01_2017-12-09_19:00:00 wrf_long_run wrfout_d01_2017-12-09_19:00:00
swift download -o data_in/wrfout_d01_2017-12-09_20:00:00 wrf_long_run wrfout_d01_2017-12-09_20:00:00
swift download -o data_in/wrfout_d01_2017-12-09_21:00:00 wrf_long_run wrfout_d01_2017-12-09_21:00:00
swift download -o data_in/wrfout_d01_2017-12-09_22:00:00 wrf_long_run wrfout_d01_2017-12-09_22:00:00
swift download -o data_in/wrfout_d01_2017-12-09_23:00:00 wrf_long_run wrfout_d01_2017-12-09_23:00:00
swift download -o data_in/wrfout_d01_2017-12-10_00:00:00 wrf_long_run wrfout_d01_2017-12-10_00:00:00
swift download -o data_in/wrfout_d01_2017-12-10_01:00:00 wrf_long_run wrfout_d01_2017-12-10_01:00:00
swift download -o data_in/wrfout_d01_2017-12-10_02:00:00 wrf_long_run wrfout_d01_2017-12-10_02:00:00
swift download -o data_in/wrfout_d01_2017-12-10_03:00:00 wrf_long_run wrfout_d01_2017-12-10_03:00:00
swift download -o data_in/wrfout_d01_2017-12-10_04:00:00 wrf_long_run wrfout_d01_2017-12-10_04:00:00
swift download -o data_in/wrfout_d01_2017-12-10_05:00:00 wrf_long_run wrfout_d01_2017-12-10_05:00:00
swift download -o data_in/wrfout_d01_2017-12-10_06:00:00 wrf_long_run wrfout_d01_2017-12-10_06:00:00
swift download -o data_in/wrfout_d01_2017-12-10_07:00:00 wrf_long_run wrfout_d01_2017-12-10_07:00:00
swift download -o data_in/wrfout_d01_2017-12-10_08:00:00 wrf_long_run wrfout_d01_2017-12-10_08:00:00
swift download -o data_in/wrfout_d01_2017-12-10_09:00:00 wrf_long_run wrfout_d01_2017-12-10_09:00:00
swift download -o data_in/wrfout_d01_2017-12-10_10:00:00 wrf_long_run wrfout_d01_2017-12-10_10:00:00
swift download -o data_in/wrfout_d01_2017-12-10_11:00:00 wrf_long_run wrfout_d01_2017-12-10_11:00:00
swift download -o data_in/wrfout_d01_2017-12-10_12:00:00 wrf_long_run wrfout_d01_2017-12-10_12:00:00
swift download -o data_in/wrfout_d01_2017-12-10_13:00:00 wrf_long_run wrfout_d01_2017-12-10_13:00:00
swift download -o data_in/wrfout_d01_2017-12-10_14:00:00 wrf_long_run wrfout_d01_2017-12-10_14:00:00
swift download -o data_in/wrfout_d01_2017-12-10_15:00:00 wrf_long_run wrfout_d01_2017-12-10_15:00:00
swift download -o data_in/wrfout_d01_2017-12-10_16:00:00 wrf_long_run wrfout_d01_2017-12-10_16:00:00
swift download -o data_in/wrfout_d01_2017-12-10_17:00:00 wrf_long_run wrfout_d01_2017-12-10_17:00:00
swift download -o data_in/wrfout_d01_2017-12-10_18:00:00 wrf_long_run wrfout_d01_2017-12-10_18:00:00
swift download -o data_in/wrfout_d01_2017-12-10_19:00:00 wrf_long_run wrfout_d01_2017-12-10_19:00:00
swift download -o data_in/wrfout_d01_2017-12-10_20:00:00 wrf_long_run wrfout_d01_2017-12-10_20:00:00
swift download -o data_in/wrfout_d01_2017-12-10_21:00:00 wrf_long_run wrfout_d01_2017-12-10_21:00:00
swift download -o data_in/wrfout_d01_2017-12-10_22:00:00 wrf_long_run wrfout_d01_2017-12-10_22:00:00
swift download -o data_in/wrfout_d01_2017-12-10_23:00:00 wrf_long_run wrfout_d01_2017-12-10_23:00:00
swift download -o data_in/wrfout_d01_2017-12-11_00:00:00 wrf_long_run wrfout_d01_2017-12-11_00:00:00
swift download -o data_in/wrfout_d01_2017-12-11_01:00:00 wrf_long_run wrfout_d01_2017-12-11_01:00:00
swift download -o data_in/wrfout_d01_2017-12-11_02:00:00 wrf_long_run wrfout_d01_2017-12-11_02:00:00
swift download -o data_in/wrfout_d01_2017-12-11_03:00:00 wrf_long_run wrfout_d01_2017-12-11_03:00:00
swift download -o data_in/wrfout_d01_2017-12-11_04:00:00 wrf_long_run wrfout_d01_2017-12-11_04:00:00
swift download -o data_in/wrfout_d01_2017-12-11_05:00:00 wrf_long_run wrfout_d01_2017-12-11_05:00:00
swift download -o data_in/wrfout_d01_2017-12-11_06:00:00 wrf_long_run wrfout_d01_2017-12-11_06:00:00
swift download -o data_in/wrfout_d01_2017-12-11_07:00:00 wrf_long_run wrfout_d01_2017-12-11_07:00:00
swift download -o data_in/wrfout_d01_2017-12-11_08:00:00 wrf_long_run wrfout_d01_2017-12-11_08:00:00
swift download -o data_in/wrfout_d01_2017-12-11_09:00:00 wrf_long_run wrfout_d01_2017-12-11_09:00:00
swift download -o data_in/wrfout_d01_2017-12-11_10:00:00 wrf_long_run wrfout_d01_2017-12-11_10:00:00
swift download -o data_in/wrfout_d01_2017-12-11_11:00:00 wrf_long_run wrfout_d01_2017-12-11_11:00:00
swift download -o data_in/wrfout_d01_2017-12-11_12:00:00 wrf_long_run wrfout_d01_2017-12-11_12:00:00
swift download -o data_in/wrfout_d01_2017-12-11_13:00:00 wrf_long_run wrfout_d01_2017-12-11_13:00:00
swift download -o data_in/wrfout_d01_2017-12-11_14:00:00 wrf_long_run wrfout_d01_2017-12-11_14:00:00
swift download -o data_in/wrfout_d01_2017-12-11_15:00:00 wrf_long_run wrfout_d01_2017-12-11_15:00:00
swift download -o data_in/wrfout_d01_2017-12-11_16:00:00 wrf_long_run wrfout_d01_2017-12-11_16:00:00
swift download -o data_in/wrfout_d01_2017-12-11_17:00:00 wrf_long_run wrfout_d01_2017-12-11_17:00:00
swift download -o data_in/wrfout_d01_2017-12-11_18:00:00 wrf_long_run wrfout_d01_2017-12-11_18:00:00
swift download -o data_in/wrfout_d01_2017-12-11_19:00:00 wrf_long_run wrfout_d01_2017-12-11_19:00:00
swift download -o data_in/wrfout_d01_2017-12-11_20:00:00 wrf_long_run wrfout_d01_2017-12-11_20:00:00
swift download -o data_in/wrfout_d01_2017-12-11_21:00:00 wrf_long_run wrfout_d01_2017-12-11_21:00:00
swift download -o data_in/wrfout_d01_2017-12-11_22:00:00 wrf_long_run wrfout_d01_2017-12-11_22:00:00
swift download -o data_in/wrfout_d01_2017-12-11_23:00:00 wrf_long_run wrfout_d01_2017-12-11_23:00:00
swift download -o data_in/wrfout_d01_2017-12-12_00:00:00 wrf_long_run wrfout_d01_2017-12-12_00:00:00
swift download -o data_in/wrfout_d01_2017-12-12_01:00:00 wrf_long_run wrfout_d01_2017-12-12_01:00:00
swift download -o data_in/wrfout_d01_2017-12-12_02:00:00 wrf_long_run wrfout_d01_2017-12-12_02:00:00
swift download -o data_in/wrfout_d01_2017-12-12_03:00:00 wrf_long_run wrfout_d01_2017-12-12_03:00:00
swift download -o data_in/wrfout_d01_2017-12-12_04:00:00 wrf_long_run wrfout_d01_2017-12-12_04:00:00
swift download -o data_in/wrfout_d01_2017-12-12_05:00:00 wrf_long_run wrfout_d01_2017-12-12_05:00:00
swift download -o data_in/wrfout_d01_2017-12-12_06:00:00 wrf_long_run wrfout_d01_2017-12-12_06:00:00
swift download -o data_in/wrfout_d01_2017-12-12_07:00:00 wrf_long_run wrfout_d01_2017-12-12_07:00:00
swift download -o data_in/wrfout_d01_2017-12-12_08:00:00 wrf_long_run wrfout_d01_2017-12-12_08:00:00
swift download -o data_in/wrfout_d01_2017-12-12_09:00:00 wrf_long_run wrfout_d01_2017-12-12_09:00:00
swift download -o data_in/wrfout_d01_2017-12-12_10:00:00 wrf_long_run wrfout_d01_2017-12-12_10:00:00
swift download -o data_in/wrfout_d01_2017-12-12_11:00:00 wrf_long_run wrfout_d01_2017-12-12_11:00:00
swift download -o data_in/wrfout_d01_2017-12-12_12:00:00 wrf_long_run wrfout_d01_2017-12-12_12:00:00
swift download -o data_in/wrfout_d01_2017-12-12_13:00:00 wrf_long_run wrfout_d01_2017-12-12_13:00:00
swift download -o data_in/wrfout_d01_2017-12-12_14:00:00 wrf_long_run wrfout_d01_2017-12-12_14:00:00
swift download -o data_in/wrfout_d01_2017-12-12_15:00:00 wrf_long_run wrfout_d01_2017-12-12_15:00:00
swift download -o data_in/wrfout_d01_2017-12-12_16:00:00 wrf_long_run wrfout_d01_2017-12-12_16:00:00
swift download -o data_in/wrfout_d01_2017-12-12_17:00:00 wrf_long_run wrfout_d01_2017-12-12_17:00:00
swift download -o data_in/wrfout_d01_2017-12-12_18:00:00 wrf_long_run wrfout_d01_2017-12-12_18:00:00
swift download -o data_in/wrfout_d01_2017-12-12_19:00:00 wrf_long_run wrfout_d01_2017-12-12_19:00:00
swift download -o data_in/wrfout_d01_2017-12-12_20:00:00 wrf_long_run wrfout_d01_2017-12-12_20:00:00
swift download -o data_in/wrfout_d01_2017-12-12_21:00:00 wrf_long_run wrfout_d01_2017-12-12_21:00:00
swift download -o data_in/wrfout_d01_2017-12-12_22:00:00 wrf_long_run wrfout_d01_2017-12-12_22:00:00
swift download -o data_in/wrfout_d01_2017-12-12_23:00:00 wrf_long_run wrfout_d01_2017-12-12_23:00:00
swift download -o data_in/wrfout_d01_2017-12-13_00:00:00 wrf_long_run wrfout_d01_2017-12-13_00:00:00
swift download -o data_in/wrfout_d01_2017-12-13_01:00:00 wrf_long_run wrfout_d01_2017-12-13_01:00:00
swift download -o data_in/wrfout_d01_2017-12-13_02:00:00 wrf_long_run wrfout_d01_2017-12-13_02:00:00
swift download -o data_in/wrfout_d01_2017-12-13_03:00:00 wrf_long_run wrfout_d01_2017-12-13_03:00:00
swift download -o data_in/wrfout_d01_2017-12-13_04:00:00 wrf_long_run wrfout_d01_2017-12-13_04:00:00
swift download -o data_in/wrfout_d01_2017-12-13_05:00:00 wrf_long_run wrfout_d01_2017-12-13_05:00:00
swift download -o data_in/wrfout_d01_2017-12-13_06:00:00 wrf_long_run wrfout_d01_2017-12-13_06:00:00
swift download -o data_in/wrfout_d01_2017-12-13_07:00:00 wrf_long_run wrfout_d01_2017-12-13_07:00:00
swift download -o data_in/wrfout_d01_2017-12-13_08:00:00 wrf_long_run wrfout_d01_2017-12-13_08:00:00
swift download -o data_in/wrfout_d01_2017-12-13_09:00:00 wrf_long_run wrfout_d01_2017-12-13_09:00:00
swift download -o data_in/wrfout_d01_2017-12-13_10:00:00 wrf_long_run wrfout_d01_2017-12-13_10:00:00
swift download -o data_in/wrfout_d01_2017-12-13_11:00:00 wrf_long_run wrfout_d01_2017-12-13_11:00:00
swift download -o data_in/wrfout_d01_2017-12-13_12:00:00 wrf_long_run wrfout_d01_2017-12-13_12:00:00
swift download -o data_in/wrfout_d01_2017-12-13_13:00:00 wrf_long_run wrfout_d01_2017-12-13_13:00:00
swift download -o data_in/wrfout_d01_2017-12-13_14:00:00 wrf_long_run wrfout_d01_2017-12-13_14:00:00
swift download -o data_in/wrfout_d01_2017-12-13_15:00:00 wrf_long_run wrfout_d01_2017-12-13_15:00:00
swift download -o data_in/wrfout_d01_2017-12-13_16:00:00 wrf_long_run wrfout_d01_2017-12-13_16:00:00
swift download -o data_in/wrfout_d01_2017-12-13_17:00:00 wrf_long_run wrfout_d01_2017-12-13_17:00:00
swift download -o data_in/wrfout_d01_2017-12-13_18:00:00 wrf_long_run wrfout_d01_2017-12-13_18:00:00
swift download -o data_in/wrfout_d01_2017-12-13_19:00:00 wrf_long_run wrfout_d01_2017-12-13_19:00:00
swift download -o data_in/wrfout_d01_2017-12-13_20:00:00 wrf_long_run wrfout_d01_2017-12-13_20:00:00
swift download -o data_in/wrfout_d01_2017-12-13_21:00:00 wrf_long_run wrfout_d01_2017-12-13_21:00:00
swift download -o data_in/wrfout_d01_2017-12-13_22:00:00 wrf_long_run wrfout_d01_2017-12-13_22:00:00
swift download -o data_in/wrfout_d01_2017-12-13_23:00:00 wrf_long_run wrfout_d01_2017-12-13_23:00:00
swift download -o data_in/wrfout_d01_2017-12-14_00:00:00 wrf_long_run wrfout_d01_2017-12-14_00:00:00
swift download -o data_in/wrfout_d02_2017-12-09_00:00:00 wrf_long_run wrfout_d02_2017-12-09_00:00:00
swift download -o data_in/wrfout_d02_2017-12-09_01:00:00 wrf_long_run wrfout_d02_2017-12-09_01:00:00
swift download -o data_in/wrfout_d02_2017-12-09_02:00:00 wrf_long_run wrfout_d02_2017-12-09_02:00:00
swift download -o data_in/wrfout_d02_2017-12-09_03:00:00 wrf_long_run wrfout_d02_2017-12-09_03:00:00
swift download -o data_in/wrfout_d02_2017-12-09_04:00:00 wrf_long_run wrfout_d02_2017-12-09_04:00:00
swift download -o data_in/wrfout_d02_2017-12-09_05:00:00 wrf_long_run wrfout_d02_2017-12-09_05:00:00
swift download -o data_in/wrfout_d02_2017-12-09_06:00:00 wrf_long_run wrfout_d02_2017-12-09_06:00:00
swift download -o data_in/wrfout_d02_2017-12-09_07:00:00 wrf_long_run wrfout_d02_2017-12-09_07:00:00
swift download -o data_in/wrfout_d02_2017-12-09_08:00:00 wrf_long_run wrfout_d02_2017-12-09_08:00:00
swift download -o data_in/wrfout_d02_2017-12-09_09:00:00 wrf_long_run wrfout_d02_2017-12-09_09:00:00
swift download -o data_in/wrfout_d02_2017-12-09_10:00:00 wrf_long_run wrfout_d02_2017-12-09_10:00:00
swift download -o data_in/wrfout_d02_2017-12-09_11:00:00 wrf_long_run wrfout_d02_2017-12-09_11:00:00
swift download -o data_in/wrfout_d02_2017-12-09_12:00:00 wrf_long_run wrfout_d02_2017-12-09_12:00:00
swift download -o data_in/wrfout_d02_2017-12-09_13:00:00 wrf_long_run wrfout_d02_2017-12-09_13:00:00
swift download -o data_in/wrfout_d02_2017-12-09_14:00:00 wrf_long_run wrfout_d02_2017-12-09_14:00:00
swift download -o data_in/wrfout_d02_2017-12-09_15:00:00 wrf_long_run wrfout_d02_2017-12-09_15:00:00
swift download -o data_in/wrfout_d02_2017-12-09_16:00:00 wrf_long_run wrfout_d02_2017-12-09_16:00:00
swift download -o data_in/wrfout_d02_2017-12-09_17:00:00 wrf_long_run wrfout_d02_2017-12-09_17:00:00
swift download -o data_in/wrfout_d02_2017-12-09_18:00:00 wrf_long_run wrfout_d02_2017-12-09_18:00:00
swift download -o data_in/wrfout_d02_2017-12-09_19:00:00 wrf_long_run wrfout_d02_2017-12-09_19:00:00
swift download -o data_in/wrfout_d02_2017-12-09_20:00:00 wrf_long_run wrfout_d02_2017-12-09_20:00:00
swift download -o data_in/wrfout_d02_2017-12-09_21:00:00 wrf_long_run wrfout_d02_2017-12-09_21:00:00
swift download -o data_in/wrfout_d02_2017-12-09_22:00:00 wrf_long_run wrfout_d02_2017-12-09_22:00:00
swift download -o data_in/wrfout_d02_2017-12-09_23:00:00 wrf_long_run wrfout_d02_2017-12-09_23:00:00
swift download -o data_in/wrfout_d02_2017-12-10_00:00:00 wrf_long_run wrfout_d02_2017-12-10_00:00:00
swift download -o data_in/wrfout_d02_2017-12-10_01:00:00 wrf_long_run wrfout_d02_2017-12-10_01:00:00
swift download -o data_in/wrfout_d02_2017-12-10_02:00:00 wrf_long_run wrfout_d02_2017-12-10_02:00:00
swift download -o data_in/wrfout_d02_2017-12-10_03:00:00 wrf_long_run wrfout_d02_2017-12-10_03:00:00
swift download -o data_in/wrfout_d02_2017-12-10_04:00:00 wrf_long_run wrfout_d02_2017-12-10_04:00:00
swift download -o data_in/wrfout_d02_2017-12-10_05:00:00 wrf_long_run wrfout_d02_2017-12-10_05:00:00
swift download -o data_in/wrfout_d02_2017-12-10_06:00:00 wrf_long_run wrfout_d02_2017-12-10_06:00:00
swift download -o data_in/wrfout_d02_2017-12-10_07:00:00 wrf_long_run wrfout_d02_2017-12-10_07:00:00
swift download -o data_in/wrfout_d02_2017-12-10_08:00:00 wrf_long_run wrfout_d02_2017-12-10_08:00:00
swift download -o data_in/wrfout_d02_2017-12-10_09:00:00 wrf_long_run wrfout_d02_2017-12-10_09:00:00
swift download -o data_in/wrfout_d02_2017-12-10_10:00:00 wrf_long_run wrfout_d02_2017-12-10_10:00:00
swift download -o data_in/wrfout_d02_2017-12-10_11:00:00 wrf_long_run wrfout_d02_2017-12-10_11:00:00
swift download -o data_in/wrfout_d02_2017-12-10_12:00:00 wrf_long_run wrfout_d02_2017-12-10_12:00:00
swift download -o data_in/wrfout_d02_2017-12-10_13:00:00 wrf_long_run wrfout_d02_2017-12-10_13:00:00
swift download -o data_in/wrfout_d02_2017-12-10_14:00:00 wrf_long_run wrfout_d02_2017-12-10_14:00:00
swift download -o data_in/wrfout_d02_2017-12-10_15:00:00 wrf_long_run wrfout_d02_2017-12-10_15:00:00
swift download -o data_in/wrfout_d02_2017-12-10_16:00:00 wrf_long_run wrfout_d02_2017-12-10_16:00:00
swift download -o data_in/wrfout_d02_2017-12-10_17:00:00 wrf_long_run wrfout_d02_2017-12-10_17:00:00
swift download -o data_in/wrfout_d02_2017-12-10_18:00:00 wrf_long_run wrfout_d02_2017-12-10_18:00:00
swift download -o data_in/wrfout_d02_2017-12-10_19:00:00 wrf_long_run wrfout_d02_2017-12-10_19:00:00
swift download -o data_in/wrfout_d02_2017-12-10_20:00:00 wrf_long_run wrfout_d02_2017-12-10_20:00:00
swift download -o data_in/wrfout_d02_2017-12-10_21:00:00 wrf_long_run wrfout_d02_2017-12-10_21:00:00
swift download -o data_in/wrfout_d02_2017-12-10_22:00:00 wrf_long_run wrfout_d02_2017-12-10_22:00:00
swift download -o data_in/wrfout_d02_2017-12-10_23:00:00 wrf_long_run wrfout_d02_2017-12-10_23:00:00
swift download -o data_in/wrfout_d02_2017-12-11_00:00:00 wrf_long_run wrfout_d02_2017-12-11_00:00:00
swift download -o data_in/wrfout_d02_2017-12-11_01:00:00 wrf_long_run wrfout_d02_2017-12-11_01:00:00
swift download -o data_in/wrfout_d02_2017-12-11_02:00:00 wrf_long_run wrfout_d02_2017-12-11_02:00:00
swift download -o data_in/wrfout_d02_2017-12-11_03:00:00 wrf_long_run wrfout_d02_2017-12-11_03:00:00
swift download -o data_in/wrfout_d02_2017-12-11_04:00:00 wrf_long_run wrfout_d02_2017-12-11_04:00:00
swift download -o data_in/wrfout_d02_2017-12-11_05:00:00 wrf_long_run wrfout_d02_2017-12-11_05:00:00
swift download -o data_in/wrfout_d02_2017-12-11_06:00:00 wrf_long_run wrfout_d02_2017-12-11_06:00:00
swift download -o data_in/wrfout_d02_2017-12-11_07:00:00 wrf_long_run wrfout_d02_2017-12-11_07:00:00
swift download -o data_in/wrfout_d02_2017-12-11_08:00:00 wrf_long_run wrfout_d02_2017-12-11_08:00:00
swift download -o data_in/wrfout_d02_2017-12-11_09:00:00 wrf_long_run wrfout_d02_2017-12-11_09:00:00
swift download -o data_in/wrfout_d02_2017-12-11_10:00:00 wrf_long_run wrfout_d02_2017-12-11_10:00:00
swift download -o data_in/wrfout_d02_2017-12-11_11:00:00 wrf_long_run wrfout_d02_2017-12-11_11:00:00
swift download -o data_in/wrfout_d02_2017-12-11_12:00:00 wrf_long_run wrfout_d02_2017-12-11_12:00:00
swift download -o data_in/wrfout_d02_2017-12-11_13:00:00 wrf_long_run wrfout_d02_2017-12-11_13:00:00
swift download -o data_in/wrfout_d02_2017-12-11_14:00:00 wrf_long_run wrfout_d02_2017-12-11_14:00:00
swift download -o data_in/wrfout_d02_2017-12-11_15:00:00 wrf_long_run wrfout_d02_2017-12-11_15:00:00
swift download -o data_in/wrfout_d02_2017-12-11_16:00:00 wrf_long_run wrfout_d02_2017-12-11_16:00:00
swift download -o data_in/wrfout_d02_2017-12-11_17:00:00 wrf_long_run wrfout_d02_2017-12-11_17:00:00
swift download -o data_in/wrfout_d02_2017-12-11_18:00:00 wrf_long_run wrfout_d02_2017-12-11_18:00:00
swift download -o data_in/wrfout_d02_2017-12-11_19:00:00 wrf_long_run wrfout_d02_2017-12-11_19:00:00
swift download -o data_in/wrfout_d02_2017-12-11_20:00:00 wrf_long_run wrfout_d02_2017-12-11_20:00:00
swift download -o data_in/wrfout_d02_2017-12-11_21:00:00 wrf_long_run wrfout_d02_2017-12-11_21:00:00
swift download -o data_in/wrfout_d02_2017-12-11_22:00:00 wrf_long_run wrfout_d02_2017-12-11_22:00:00
swift download -o data_in/wrfout_d02_2017-12-11_23:00:00 wrf_long_run wrfout_d02_2017-12-11_23:00:00
swift download -o data_in/wrfout_d02_2017-12-12_00:00:00 wrf_long_run wrfout_d02_2017-12-12_00:00:00
swift download -o data_in/wrfout_d02_2017-12-12_01:00:00 wrf_long_run wrfout_d02_2017-12-12_01:00:00
swift download -o data_in/wrfout_d02_2017-12-12_02:00:00 wrf_long_run wrfout_d02_2017-12-12_02:00:00
swift download -o data_in/wrfout_d02_2017-12-12_03:00:00 wrf_long_run wrfout_d02_2017-12-12_03:00:00
swift download -o data_in/wrfout_d02_2017-12-12_04:00:00 wrf_long_run wrfout_d02_2017-12-12_04:00:00
swift download -o data_in/wrfout_d02_2017-12-12_05:00:00 wrf_long_run wrfout_d02_2017-12-12_05:00:00
swift download -o data_in/wrfout_d02_2017-12-12_06:00:00 wrf_long_run wrfout_d02_2017-12-12_06:00:00
swift download -o data_in/wrfout_d02_2017-12-12_07:00:00 wrf_long_run wrfout_d02_2017-12-12_07:00:00
swift download -o data_in/wrfout_d02_2017-12-12_08:00:00 wrf_long_run wrfout_d02_2017-12-12_08:00:00
swift download -o data_in/wrfout_d02_2017-12-12_09:00:00 wrf_long_run wrfout_d02_2017-12-12_09:00:00
swift download -o data_in/wrfout_d02_2017-12-12_10:00:00 wrf_long_run wrfout_d02_2017-12-12_10:00:00
swift download -o data_in/wrfout_d02_2017-12-12_11:00:00 wrf_long_run wrfout_d02_2017-12-12_11:00:00
swift download -o data_in/wrfout_d02_2017-12-12_12:00:00 wrf_long_run wrfout_d02_2017-12-12_12:00:00
swift download -o data_in/wrfout_d02_2017-12-12_13:00:00 wrf_long_run wrfout_d02_2017-12-12_13:00:00
swift download -o data_in/wrfout_d02_2017-12-12_14:00:00 wrf_long_run wrfout_d02_2017-12-12_14:00:00
swift download -o data_in/wrfout_d02_2017-12-12_15:00:00 wrf_long_run wrfout_d02_2017-12-12_15:00:00
swift download -o data_in/wrfout_d02_2017-12-12_16:00:00 wrf_long_run wrfout_d02_2017-12-12_16:00:00
swift download -o data_in/wrfout_d02_2017-12-12_17:00:00 wrf_long_run wrfout_d02_2017-12-12_17:00:00
swift download -o data_in/wrfout_d02_2017-12-12_18:00:00 wrf_long_run wrfout_d02_2017-12-12_18:00:00
swift download -o data_in/wrfout_d02_2017-12-12_19:00:00 wrf_long_run wrfout_d02_2017-12-12_19:00:00
swift download -o data_in/wrfout_d02_2017-12-12_20:00:00 wrf_long_run wrfout_d02_2017-12-12_20:00:00
swift download -o data_in/wrfout_d02_2017-12-12_21:00:00 wrf_long_run wrfout_d02_2017-12-12_21:00:00
swift download -o data_in/wrfout_d02_2017-12-12_22:00:00 wrf_long_run wrfout_d02_2017-12-12_22:00:00
swift download -o data_in/wrfout_d02_2017-12-12_23:00:00 wrf_long_run wrfout_d02_2017-12-12_23:00:00
swift download -o data_in/wrfout_d02_2017-12-13_00:00:00 wrf_long_run wrfout_d02_2017-12-13_00:00:00
swift download -o data_in/wrfout_d02_2017-12-13_01:00:00 wrf_long_run wrfout_d02_2017-12-13_01:00:00
swift download -o data_in/wrfout_d02_2017-12-13_02:00:00 wrf_long_run wrfout_d02_2017-12-13_02:00:00
swift download -o data_in/wrfout_d02_2017-12-13_03:00:00 wrf_long_run wrfout_d02_2017-12-13_03:00:00
swift download -o data_in/wrfout_d02_2017-12-13_04:00:00 wrf_long_run wrfout_d02_2017-12-13_04:00:00
swift download -o data_in/wrfout_d02_2017-12-13_05:00:00 wrf_long_run wrfout_d02_2017-12-13_05:00:00
swift download -o data_in/wrfout_d02_2017-12-13_06:00:00 wrf_long_run wrfout_d02_2017-12-13_06:00:00
swift download -o data_in/wrfout_d02_2017-12-13_07:00:00 wrf_long_run wrfout_d02_2017-12-13_07:00:00
swift download -o data_in/wrfout_d02_2017-12-13_08:00:00 wrf_long_run wrfout_d02_2017-12-13_08:00:00
swift download -o data_in/wrfout_d02_2017-12-13_09:00:00 wrf_long_run wrfout_d02_2017-12-13_09:00:00
swift download -o data_in/wrfout_d02_2017-12-13_10:00:00 wrf_long_run wrfout_d02_2017-12-13_10:00:00
swift download -o data_in/wrfout_d02_2017-12-13_11:00:00 wrf_long_run wrfout_d02_2017-12-13_11:00:00
swift download -o data_in/wrfout_d02_2017-12-13_12:00:00 wrf_long_run wrfout_d02_2017-12-13_12:00:00
swift download -o data_in/wrfout_d02_2017-12-13_13:00:00 wrf_long_run wrfout_d02_2017-12-13_13:00:00
swift download -o data_in/wrfout_d02_2017-12-13_14:00:00 wrf_long_run wrfout_d02_2017-12-13_14:00:00
swift download -o data_in/wrfout_d02_2017-12-13_15:00:00 wrf_long_run wrfout_d02_2017-12-13_15:00:00
swift download -o data_in/wrfout_d02_2017-12-13_16:00:00 wrf_long_run wrfout_d02_2017-12-13_16:00:00
swift download -o data_in/wrfout_d02_2017-12-13_17:00:00 wrf_long_run wrfout_d02_2017-12-13_17:00:00
swift download -o data_in/wrfout_d02_2017-12-13_18:00:00 wrf_long_run wrfout_d02_2017-12-13_18:00:00
swift download -o data_in/wrfout_d02_2017-12-13_19:00:00 wrf_long_run wrfout_d02_2017-12-13_19:00:00
swift download -o data_in/wrfout_d02_2017-12-13_20:00:00 wrf_long_run wrfout_d02_2017-12-13_20:00:00
swift download -o data_in/wrfout_d02_2017-12-13_21:00:00 wrf_long_run wrfout_d02_2017-12-13_21:00:00
swift download -o data_in/wrfout_d02_2017-12-13_22:00:00 wrf_long_run wrfout_d02_2017-12-13_22:00:00
swift download -o data_in/wrfout_d02_2017-12-13_23:00:00 wrf_long_run wrfout_d02_2017-12-13_23:00:00
swift download -o data_in/wrfout_d02_2017-12-14_00:00:00 wrf_long_run wrfout_d02_2017-12-14_00:00:00
swift download -o data_in/wrfout_d03_2017-12-09_00:00:00 wrf_long_run wrfout_d03_2017-12-09_00:00:00
swift download -o data_in/wrfout_d03_2017-12-09_01:00:00 wrf_long_run wrfout_d03_2017-12-09_01:00:00
swift download -o data_in/wrfout_d03_2017-12-09_02:00:00 wrf_long_run wrfout_d03_2017-12-09_02:00:00
swift download -o data_in/wrfout_d03_2017-12-09_03:00:00 wrf_long_run wrfout_d03_2017-12-09_03:00:00
swift download -o data_in/wrfout_d03_2017-12-09_04:00:00 wrf_long_run wrfout_d03_2017-12-09_04:00:00
swift download -o data_in/wrfout_d03_2017-12-09_05:00:00 wrf_long_run wrfout_d03_2017-12-09_05:00:00
swift download -o data_in/wrfout_d03_2017-12-09_06:00:00 wrf_long_run wrfout_d03_2017-12-09_06:00:00
swift download -o data_in/wrfout_d03_2017-12-09_07:00:00 wrf_long_run wrfout_d03_2017-12-09_07:00:00
swift download -o data_in/wrfout_d03_2017-12-09_08:00:00 wrf_long_run wrfout_d03_2017-12-09_08:00:00
swift download -o data_in/wrfout_d03_2017-12-09_09:00:00 wrf_long_run wrfout_d03_2017-12-09_09:00:00
swift download -o data_in/wrfout_d03_2017-12-09_10:00:00 wrf_long_run wrfout_d03_2017-12-09_10:00:00
swift download -o data_in/wrfout_d03_2017-12-09_11:00:00 wrf_long_run wrfout_d03_2017-12-09_11:00:00
swift download -o data_in/wrfout_d03_2017-12-09_12:00:00 wrf_long_run wrfout_d03_2017-12-09_12:00:00
swift download -o data_in/wrfout_d03_2017-12-09_13:00:00 wrf_long_run wrfout_d03_2017-12-09_13:00:00
swift download -o data_in/wrfout_d03_2017-12-09_14:00:00 wrf_long_run wrfout_d03_2017-12-09_14:00:00
swift download -o data_in/wrfout_d03_2017-12-09_15:00:00 wrf_long_run wrfout_d03_2017-12-09_15:00:00
swift download -o data_in/wrfout_d03_2017-12-09_16:00:00 wrf_long_run wrfout_d03_2017-12-09_16:00:00
swift download -o data_in/wrfout_d03_2017-12-09_17:00:00 wrf_long_run wrfout_d03_2017-12-09_17:00:00
swift download -o data_in/wrfout_d03_2017-12-09_18:00:00 wrf_long_run wrfout_d03_2017-12-09_18:00:00
swift download -o data_in/wrfout_d03_2017-12-09_19:00:00 wrf_long_run wrfout_d03_2017-12-09_19:00:00
swift download -o data_in/wrfout_d03_2017-12-09_20:00:00 wrf_long_run wrfout_d03_2017-12-09_20:00:00
swift download -o data_in/wrfout_d03_2017-12-09_21:00:00 wrf_long_run wrfout_d03_2017-12-09_21:00:00
swift download -o data_in/wrfout_d03_2017-12-09_22:00:00 wrf_long_run wrfout_d03_2017-12-09_22:00:00
swift download -o data_in/wrfout_d03_2017-12-09_23:00:00 wrf_long_run wrfout_d03_2017-12-09_23:00:00
swift download -o data_in/wrfout_d03_2017-12-10_00:00:00 wrf_long_run wrfout_d03_2017-12-10_00:00:00
swift download -o data_in/wrfout_d03_2017-12-10_01:00:00 wrf_long_run wrfout_d03_2017-12-10_01:00:00
swift download -o data_in/wrfout_d03_2017-12-10_02:00:00 wrf_long_run wrfout_d03_2017-12-10_02:00:00
swift download -o data_in/wrfout_d03_2017-12-10_03:00:00 wrf_long_run wrfout_d03_2017-12-10_03:00:00
swift download -o data_in/wrfout_d03_2017-12-10_04:00:00 wrf_long_run wrfout_d03_2017-12-10_04:00:00
swift download -o data_in/wrfout_d03_2017-12-10_05:00:00 wrf_long_run wrfout_d03_2017-12-10_05:00:00
swift download -o data_in/wrfout_d03_2017-12-10_06:00:00 wrf_long_run wrfout_d03_2017-12-10_06:00:00
swift download -o data_in/wrfout_d03_2017-12-10_07:00:00 wrf_long_run wrfout_d03_2017-12-10_07:00:00
swift download -o data_in/wrfout_d03_2017-12-10_08:00:00 wrf_long_run wrfout_d03_2017-12-10_08:00:00
swift download -o data_in/wrfout_d03_2017-12-10_09:00:00 wrf_long_run wrfout_d03_2017-12-10_09:00:00
swift download -o data_in/wrfout_d03_2017-12-10_10:00:00 wrf_long_run wrfout_d03_2017-12-10_10:00:00
swift download -o data_in/wrfout_d03_2017-12-10_11:00:00 wrf_long_run wrfout_d03_2017-12-10_11:00:00
swift download -o data_in/wrfout_d03_2017-12-10_12:00:00 wrf_long_run wrfout_d03_2017-12-10_12:00:00
swift download -o data_in/wrfout_d03_2017-12-10_13:00:00 wrf_long_run wrfout_d03_2017-12-10_13:00:00
swift download -o data_in/wrfout_d03_2017-12-10_14:00:00 wrf_long_run wrfout_d03_2017-12-10_14:00:00
swift download -o data_in/wrfout_d03_2017-12-10_15:00:00 wrf_long_run wrfout_d03_2017-12-10_15:00:00
swift download -o data_in/wrfout_d03_2017-12-10_16:00:00 wrf_long_run wrfout_d03_2017-12-10_16:00:00
swift download -o data_in/wrfout_d03_2017-12-10_17:00:00 wrf_long_run wrfout_d03_2017-12-10_17:00:00
swift download -o data_in/wrfout_d03_2017-12-10_18:00:00 wrf_long_run wrfout_d03_2017-12-10_18:00:00
swift download -o data_in/wrfout_d03_2017-12-10_19:00:00 wrf_long_run wrfout_d03_2017-12-10_19:00:00
swift download -o data_in/wrfout_d03_2017-12-10_20:00:00 wrf_long_run wrfout_d03_2017-12-10_20:00:00
swift download -o data_in/wrfout_d03_2017-12-10_21:00:00 wrf_long_run wrfout_d03_2017-12-10_21:00:00
swift download -o data_in/wrfout_d03_2017-12-10_22:00:00 wrf_long_run wrfout_d03_2017-12-10_22:00:00
swift download -o data_in/wrfout_d03_2017-12-10_23:00:00 wrf_long_run wrfout_d03_2017-12-10_23:00:00
swift download -o data_in/wrfout_d03_2017-12-11_00:00:00 wrf_long_run wrfout_d03_2017-12-11_00:00:00
swift download -o data_in/wrfout_d03_2017-12-11_01:00:00 wrf_long_run wrfout_d03_2017-12-11_01:00:00
swift download -o data_in/wrfout_d03_2017-12-11_02:00:00 wrf_long_run wrfout_d03_2017-12-11_02:00:00
swift download -o data_in/wrfout_d03_2017-12-11_03:00:00 wrf_long_run wrfout_d03_2017-12-11_03:00:00
swift download -o data_in/wrfout_d03_2017-12-11_04:00:00 wrf_long_run wrfout_d03_2017-12-11_04:00:00
swift download -o data_in/wrfout_d03_2017-12-11_05:00:00 wrf_long_run wrfout_d03_2017-12-11_05:00:00
swift download -o data_in/wrfout_d03_2017-12-11_06:00:00 wrf_long_run wrfout_d03_2017-12-11_06:00:00
swift download -o data_in/wrfout_d03_2017-12-11_07:00:00 wrf_long_run wrfout_d03_2017-12-11_07:00:00
swift download -o data_in/wrfout_d03_2017-12-11_08:00:00 wrf_long_run wrfout_d03_2017-12-11_08:00:00
swift download -o data_in/wrfout_d03_2017-12-11_09:00:00 wrf_long_run wrfout_d03_2017-12-11_09:00:00
swift download -o data_in/wrfout_d03_2017-12-11_10:00:00 wrf_long_run wrfout_d03_2017-12-11_10:00:00
swift download -o data_in/wrfout_d03_2017-12-11_11:00:00 wrf_long_run wrfout_d03_2017-12-11_11:00:00
swift download -o data_in/wrfout_d03_2017-12-11_12:00:00 wrf_long_run wrfout_d03_2017-12-11_12:00:00
swift download -o data_in/wrfout_d03_2017-12-11_13:00:00 wrf_long_run wrfout_d03_2017-12-11_13:00:00
swift download -o data_in/wrfout_d03_2017-12-11_14:00:00 wrf_long_run wrfout_d03_2017-12-11_14:00:00
swift download -o data_in/wrfout_d03_2017-12-11_15:00:00 wrf_long_run wrfout_d03_2017-12-11_15:00:00
swift download -o data_in/wrfout_d03_2017-12-11_16:00:00 wrf_long_run wrfout_d03_2017-12-11_16:00:00
swift download -o data_in/wrfout_d03_2017-12-11_17:00:00 wrf_long_run wrfout_d03_2017-12-11_17:00:00
swift download -o data_in/wrfout_d03_2017-12-11_18:00:00 wrf_long_run wrfout_d03_2017-12-11_18:00:00
swift download -o data_in/wrfout_d03_2017-12-11_19:00:00 wrf_long_run wrfout_d03_2017-12-11_19:00:00
swift download -o data_in/wrfout_d03_2017-12-11_20:00:00 wrf_long_run wrfout_d03_2017-12-11_20:00:00
swift download -o data_in/wrfout_d03_2017-12-11_21:00:00 wrf_long_run wrfout_d03_2017-12-11_21:00:00
swift download -o data_in/wrfout_d03_2017-12-11_22:00:00 wrf_long_run wrfout_d03_2017-12-11_22:00:00
swift download -o data_in/wrfout_d03_2017-12-11_23:00:00 wrf_long_run wrfout_d03_2017-12-11_23:00:00
swift download -o data_in/wrfout_d03_2017-12-12_00:00:00 wrf_long_run wrfout_d03_2017-12-12_00:00:00
swift download -o data_in/wrfout_d03_2017-12-12_01:00:00 wrf_long_run wrfout_d03_2017-12-12_01:00:00
swift download -o data_in/wrfout_d03_2017-12-12_02:00:00 wrf_long_run wrfout_d03_2017-12-12_02:00:00
swift download -o data_in/wrfout_d03_2017-12-12_03:00:00 wrf_long_run wrfout_d03_2017-12-12_03:00:00
swift download -o data_in/wrfout_d03_2017-12-12_04:00:00 wrf_long_run wrfout_d03_2017-12-12_04:00:00
swift download -o data_in/wrfout_d03_2017-12-12_05:00:00 wrf_long_run wrfout_d03_2017-12-12_05:00:00
swift download -o data_in/wrfout_d03_2017-12-12_06:00:00 wrf_long_run wrfout_d03_2017-12-12_06:00:00
swift download -o data_in/wrfout_d03_2017-12-12_07:00:00 wrf_long_run wrfout_d03_2017-12-12_07:00:00
swift download -o data_in/wrfout_d03_2017-12-12_08:00:00 wrf_long_run wrfout_d03_2017-12-12_08:00:00
swift download -o data_in/wrfout_d03_2017-12-12_09:00:00 wrf_long_run wrfout_d03_2017-12-12_09:00:00
swift download -o data_in/wrfout_d03_2017-12-12_10:00:00 wrf_long_run wrfout_d03_2017-12-12_10:00:00
swift download -o data_in/wrfout_d03_2017-12-12_11:00:00 wrf_long_run wrfout_d03_2017-12-12_11:00:00
swift download -o data_in/wrfout_d03_2017-12-12_12:00:00 wrf_long_run wrfout_d03_2017-12-12_12:00:00
swift download -o data_in/wrfout_d03_2017-12-12_13:00:00 wrf_long_run wrfout_d03_2017-12-12_13:00:00
swift download -o data_in/wrfout_d03_2017-12-12_14:00:00 wrf_long_run wrfout_d03_2017-12-12_14:00:00
swift download -o data_in/wrfout_d03_2017-12-12_15:00:00 wrf_long_run wrfout_d03_2017-12-12_15:00:00
swift download -o data_in/wrfout_d03_2017-12-12_16:00:00 wrf_long_run wrfout_d03_2017-12-12_16:00:00
swift download -o data_in/wrfout_d03_2017-12-12_17:00:00 wrf_long_run wrfout_d03_2017-12-12_17:00:00
swift download -o data_in/wrfout_d03_2017-12-12_18:00:00 wrf_long_run wrfout_d03_2017-12-12_18:00:00
swift download -o data_in/wrfout_d03_2017-12-12_19:00:00 wrf_long_run wrfout_d03_2017-12-12_19:00:00
swift download -o data_in/wrfout_d03_2017-12-12_20:00:00 wrf_long_run wrfout_d03_2017-12-12_20:00:00
swift download -o data_in/wrfout_d03_2017-12-12_21:00:00 wrf_long_run wrfout_d03_2017-12-12_21:00:00
swift download -o data_in/wrfout_d03_2017-12-12_22:00:00 wrf_long_run wrfout_d03_2017-12-12_22:00:00
swift download -o data_in/wrfout_d03_2017-12-12_23:00:00 wrf_long_run wrfout_d03_2017-12-12_23:00:00
swift download -o data_in/wrfout_d03_2017-12-13_00:00:00 wrf_long_run wrfout_d03_2017-12-13_00:00:00
swift download -o data_in/wrfout_d03_2017-12-13_01:00:00 wrf_long_run wrfout_d03_2017-12-13_01:00:00
swift download -o data_in/wrfout_d03_2017-12-13_02:00:00 wrf_long_run wrfout_d03_2017-12-13_02:00:00
swift download -o data_in/wrfout_d03_2017-12-13_03:00:00 wrf_long_run wrfout_d03_2017-12-13_03:00:00
swift download -o data_in/wrfout_d03_2017-12-13_04:00:00 wrf_long_run wrfout_d03_2017-12-13_04:00:00
swift download -o data_in/wrfout_d03_2017-12-13_05:00:00 wrf_long_run wrfout_d03_2017-12-13_05:00:00
swift download -o data_in/wrfout_d03_2017-12-13_06:00:00 wrf_long_run wrfout_d03_2017-12-13_06:00:00
swift download -o data_in/wrfout_d03_2017-12-13_07:00:00 wrf_long_run wrfout_d03_2017-12-13_07:00:00
swift download -o data_in/wrfout_d03_2017-12-13_08:00:00 wrf_long_run wrfout_d03_2017-12-13_08:00:00
swift download -o data_in/wrfout_d03_2017-12-13_09:00:00 wrf_long_run wrfout_d03_2017-12-13_09:00:00
swift download -o data_in/wrfout_d03_2017-12-13_10:00:00 wrf_long_run wrfout_d03_2017-12-13_10:00:00
swift download -o data_in/wrfout_d03_2017-12-13_11:00:00 wrf_long_run wrfout_d03_2017-12-13_11:00:00
swift download -o data_in/wrfout_d03_2017-12-13_12:00:00 wrf_long_run wrfout_d03_2017-12-13_12:00:00
swift download -o data_in/wrfout_d03_2017-12-13_13:00:00 wrf_long_run wrfout_d03_2017-12-13_13:00:00
swift download -o data_in/wrfout_d03_2017-12-13_14:00:00 wrf_long_run wrfout_d03_2017-12-13_14:00:00
swift download -o data_in/wrfout_d03_2017-12-13_15:00:00 wrf_long_run wrfout_d03_2017-12-13_15:00:00
swift download -o data_in/wrfout_d03_2017-12-13_16:00:00 wrf_long_run wrfout_d03_2017-12-13_16:00:00
swift download -o data_in/wrfout_d03_2017-12-13_17:00:00 wrf_long_run wrfout_d03_2017-12-13_17:00:00
swift download -o data_in/wrfout_d03_2017-12-13_18:00:00 wrf_long_run wrfout_d03_2017-12-13_18:00:00
swift download -o data_in/wrfout_d03_2017-12-13_19:00:00 wrf_long_run wrfout_d03_2017-12-13_19:00:00
swift download -o data_in/wrfout_d03_2017-12-13_20:00:00 wrf_long_run wrfout_d03_2017-12-13_20:00:00
swift download -o data_in/wrfout_d03_2017-12-13_21:00:00 wrf_long_run wrfout_d03_2017-12-13_21:00:00
swift download -o data_in/wrfout_d03_2017-12-13_22:00:00 wrf_long_run wrfout_d03_2017-12-13_22:00:00
swift download -o data_in/wrfout_d03_2017-12-13_23:00:00 wrf_long_run wrfout_d03_2017-12-13_23:00:00
swift download -o data_in/wrfout_d03_2017-12-14_00:00:00 wrf_long_run wrfout_d03_2017-12-14_00:00:00
swift download -o data_in/wrfout_d04_2017-12-09_00:00:00 wrf_long_run wrfout_d04_2017-12-09_00:00:00
swift download -o data_in/wrfout_d04_2017-12-09_01:00:00 wrf_long_run wrfout_d04_2017-12-09_01:00:00
swift download -o data_in/wrfout_d04_2017-12-09_02:00:00 wrf_long_run wrfout_d04_2017-12-09_02:00:00
swift download -o data_in/wrfout_d04_2017-12-09_03:00:00 wrf_long_run wrfout_d04_2017-12-09_03:00:00
swift download -o data_in/wrfout_d04_2017-12-09_04:00:00 wrf_long_run wrfout_d04_2017-12-09_04:00:00
swift download -o data_in/wrfout_d04_2017-12-09_05:00:00 wrf_long_run wrfout_d04_2017-12-09_05:00:00
swift download -o data_in/wrfout_d04_2017-12-09_06:00:00 wrf_long_run wrfout_d04_2017-12-09_06:00:00
swift download -o data_in/wrfout_d04_2017-12-09_07:00:00 wrf_long_run wrfout_d04_2017-12-09_07:00:00
swift download -o data_in/wrfout_d04_2017-12-09_08:00:00 wrf_long_run wrfout_d04_2017-12-09_08:00:00
swift download -o data_in/wrfout_d04_2017-12-09_09:00:00 wrf_long_run wrfout_d04_2017-12-09_09:00:00
swift download -o data_in/wrfout_d04_2017-12-09_10:00:00 wrf_long_run wrfout_d04_2017-12-09_10:00:00
swift download -o data_in/wrfout_d04_2017-12-09_11:00:00 wrf_long_run wrfout_d04_2017-12-09_11:00:00
swift download -o data_in/wrfout_d04_2017-12-09_12:00:00 wrf_long_run wrfout_d04_2017-12-09_12:00:00
swift download -o data_in/wrfout_d04_2017-12-09_13:00:00 wrf_long_run wrfout_d04_2017-12-09_13:00:00
swift download -o data_in/wrfout_d04_2017-12-09_14:00:00 wrf_long_run wrfout_d04_2017-12-09_14:00:00
swift download -o data_in/wrfout_d04_2017-12-09_15:00:00 wrf_long_run wrfout_d04_2017-12-09_15:00:00
swift download -o data_in/wrfout_d04_2017-12-09_16:00:00 wrf_long_run wrfout_d04_2017-12-09_16:00:00
swift download -o data_in/wrfout_d04_2017-12-09_17:00:00 wrf_long_run wrfout_d04_2017-12-09_17:00:00
swift download -o data_in/wrfout_d04_2017-12-09_18:00:00 wrf_long_run wrfout_d04_2017-12-09_18:00:00
swift download -o data_in/wrfout_d04_2017-12-09_19:00:00 wrf_long_run wrfout_d04_2017-12-09_19:00:00
swift download -o data_in/wrfout_d04_2017-12-09_20:00:00 wrf_long_run wrfout_d04_2017-12-09_20:00:00
swift download -o data_in/wrfout_d04_2017-12-09_21:00:00 wrf_long_run wrfout_d04_2017-12-09_21:00:00
swift download -o data_in/wrfout_d04_2017-12-09_22:00:00 wrf_long_run wrfout_d04_2017-12-09_22:00:00
swift download -o data_in/wrfout_d04_2017-12-09_23:00:00 wrf_long_run wrfout_d04_2017-12-09_23:00:00
swift download -o data_in/wrfout_d04_2017-12-10_00:00:00 wrf_long_run wrfout_d04_2017-12-10_00:00:00
swift download -o data_in/wrfout_d04_2017-12-10_01:00:00 wrf_long_run wrfout_d04_2017-12-10_01:00:00
swift download -o data_in/wrfout_d04_2017-12-10_02:00:00 wrf_long_run wrfout_d04_2017-12-10_02:00:00
swift download -o data_in/wrfout_d04_2017-12-10_03:00:00 wrf_long_run wrfout_d04_2017-12-10_03:00:00
swift download -o data_in/wrfout_d04_2017-12-10_04:00:00 wrf_long_run wrfout_d04_2017-12-10_04:00:00
swift download -o data_in/wrfout_d04_2017-12-10_05:00:00 wrf_long_run wrfout_d04_2017-12-10_05:00:00
swift download -o data_in/wrfout_d04_2017-12-10_06:00:00 wrf_long_run wrfout_d04_2017-12-10_06:00:00
swift download -o data_in/wrfout_d04_2017-12-10_07:00:00 wrf_long_run wrfout_d04_2017-12-10_07:00:00
swift download -o data_in/wrfout_d04_2017-12-10_08:00:00 wrf_long_run wrfout_d04_2017-12-10_08:00:00
swift download -o data_in/wrfout_d04_2017-12-10_09:00:00 wrf_long_run wrfout_d04_2017-12-10_09:00:00
swift download -o data_in/wrfout_d04_2017-12-10_10:00:00 wrf_long_run wrfout_d04_2017-12-10_10:00:00
swift download -o data_in/wrfout_d04_2017-12-10_11:00:00 wrf_long_run wrfout_d04_2017-12-10_11:00:00
swift download -o data_in/wrfout_d04_2017-12-10_12:00:00 wrf_long_run wrfout_d04_2017-12-10_12:00:00
swift download -o data_in/wrfout_d04_2017-12-10_13:00:00 wrf_long_run wrfout_d04_2017-12-10_13:00:00
swift download -o data_in/wrfout_d04_2017-12-10_14:00:00 wrf_long_run wrfout_d04_2017-12-10_14:00:00
swift download -o data_in/wrfout_d04_2017-12-10_15:00:00 wrf_long_run wrfout_d04_2017-12-10_15:00:00
swift download -o data_in/wrfout_d04_2017-12-10_16:00:00 wrf_long_run wrfout_d04_2017-12-10_16:00:00
swift download -o data_in/wrfout_d04_2017-12-10_17:00:00 wrf_long_run wrfout_d04_2017-12-10_17:00:00
swift download -o data_in/wrfout_d04_2017-12-10_18:00:00 wrf_long_run wrfout_d04_2017-12-10_18:00:00
swift download -o data_in/wrfout_d04_2017-12-10_19:00:00 wrf_long_run wrfout_d04_2017-12-10_19:00:00
swift download -o data_in/wrfout_d04_2017-12-10_20:00:00 wrf_long_run wrfout_d04_2017-12-10_20:00:00
swift download -o data_in/wrfout_d04_2017-12-10_21:00:00 wrf_long_run wrfout_d04_2017-12-10_21:00:00
swift download -o data_in/wrfout_d04_2017-12-10_22:00:00 wrf_long_run wrfout_d04_2017-12-10_22:00:00
swift download -o data_in/wrfout_d04_2017-12-10_23:00:00 wrf_long_run wrfout_d04_2017-12-10_23:00:00
swift download -o data_in/wrfout_d04_2017-12-11_00:00:00 wrf_long_run wrfout_d04_2017-12-11_00:00:00
swift download -o data_in/wrfout_d04_2017-12-11_01:00:00 wrf_long_run wrfout_d04_2017-12-11_01:00:00
swift download -o data_in/wrfout_d04_2017-12-11_02:00:00 wrf_long_run wrfout_d04_2017-12-11_02:00:00
swift download -o data_in/wrfout_d04_2017-12-11_03:00:00 wrf_long_run wrfout_d04_2017-12-11_03:00:00
swift download -o data_in/wrfout_d04_2017-12-11_04:00:00 wrf_long_run wrfout_d04_2017-12-11_04:00:00
swift download -o data_in/wrfout_d04_2017-12-11_05:00:00 wrf_long_run wrfout_d04_2017-12-11_05:00:00
swift download -o data_in/wrfout_d04_2017-12-11_06:00:00 wrf_long_run wrfout_d04_2017-12-11_06:00:00
swift download -o data_in/wrfout_d04_2017-12-11_07:00:00 wrf_long_run wrfout_d04_2017-12-11_07:00:00
swift download -o data_in/wrfout_d04_2017-12-11_08:00:00 wrf_long_run wrfout_d04_2017-12-11_08:00:00
swift download -o data_in/wrfout_d04_2017-12-11_09:00:00 wrf_long_run wrfout_d04_2017-12-11_09:00:00
swift download -o data_in/wrfout_d04_2017-12-11_10:00:00 wrf_long_run wrfout_d04_2017-12-11_10:00:00
swift download -o data_in/wrfout_d04_2017-12-11_11:00:00 wrf_long_run wrfout_d04_2017-12-11_11:00:00
swift download -o data_in/wrfout_d04_2017-12-11_12:00:00 wrf_long_run wrfout_d04_2017-12-11_12:00:00
swift download -o data_in/wrfout_d04_2017-12-11_13:00:00 wrf_long_run wrfout_d04_2017-12-11_13:00:00
swift download -o data_in/wrfout_d04_2017-12-11_14:00:00 wrf_long_run wrfout_d04_2017-12-11_14:00:00
swift download -o data_in/wrfout_d04_2017-12-11_15:00:00 wrf_long_run wrfout_d04_2017-12-11_15:00:00
swift download -o data_in/wrfout_d04_2017-12-11_16:00:00 wrf_long_run wrfout_d04_2017-12-11_16:00:00
swift download -o data_in/wrfout_d04_2017-12-11_17:00:00 wrf_long_run wrfout_d04_2017-12-11_17:00:00
swift download -o data_in/wrfout_d04_2017-12-11_18:00:00 wrf_long_run wrfout_d04_2017-12-11_18:00:00
swift download -o data_in/wrfout_d04_2017-12-11_19:00:00 wrf_long_run wrfout_d04_2017-12-11_19:00:00
swift download -o data_in/wrfout_d04_2017-12-11_20:00:00 wrf_long_run wrfout_d04_2017-12-11_20:00:00
swift download -o data_in/wrfout_d04_2017-12-11_21:00:00 wrf_long_run wrfout_d04_2017-12-11_21:00:00
swift download -o data_in/wrfout_d04_2017-12-11_22:00:00 wrf_long_run wrfout_d04_2017-12-11_22:00:00
swift download -o data_in/wrfout_d04_2017-12-11_23:00:00 wrf_long_run wrfout_d04_2017-12-11_23:00:00
swift download -o data_in/wrfout_d04_2017-12-12_00:00:00 wrf_long_run wrfout_d04_2017-12-12_00:00:00
swift download -o data_in/wrfout_d04_2017-12-12_01:00:00 wrf_long_run wrfout_d04_2017-12-12_01:00:00
swift download -o data_in/wrfout_d04_2017-12-12_02:00:00 wrf_long_run wrfout_d04_2017-12-12_02:00:00
swift download -o data_in/wrfout_d04_2017-12-12_03:00:00 wrf_long_run wrfout_d04_2017-12-12_03:00:00
swift download -o data_in/wrfout_d04_2017-12-12_04:00:00 wrf_long_run wrfout_d04_2017-12-12_04:00:00
swift download -o data_in/wrfout_d04_2017-12-12_05:00:00 wrf_long_run wrfout_d04_2017-12-12_05:00:00
swift download -o data_in/wrfout_d04_2017-12-12_06:00:00 wrf_long_run wrfout_d04_2017-12-12_06:00:00
swift download -o data_in/wrfout_d04_2017-12-12_07:00:00 wrf_long_run wrfout_d04_2017-12-12_07:00:00
swift download -o data_in/wrfout_d04_2017-12-12_08:00:00 wrf_long_run wrfout_d04_2017-12-12_08:00:00
swift download -o data_in/wrfout_d04_2017-12-12_09:00:00 wrf_long_run wrfout_d04_2017-12-12_09:00:00
swift download -o data_in/wrfout_d04_2017-12-12_10:00:00 wrf_long_run wrfout_d04_2017-12-12_10:00:00
swift download -o data_in/wrfout_d04_2017-12-12_11:00:00 wrf_long_run wrfout_d04_2017-12-12_11:00:00
swift download -o data_in/wrfout_d04_2017-12-12_12:00:00 wrf_long_run wrfout_d04_2017-12-12_12:00:00
swift download -o data_in/wrfout_d04_2017-12-12_13:00:00 wrf_long_run wrfout_d04_2017-12-12_13:00:00
swift download -o data_in/wrfout_d04_2017-12-12_14:00:00 wrf_long_run wrfout_d04_2017-12-12_14:00:00
swift download -o data_in/wrfout_d04_2017-12-12_15:00:00 wrf_long_run wrfout_d04_2017-12-12_15:00:00
swift download -o data_in/wrfout_d04_2017-12-12_16:00:00 wrf_long_run wrfout_d04_2017-12-12_16:00:00
swift download -o data_in/wrfout_d04_2017-12-12_17:00:00 wrf_long_run wrfout_d04_2017-12-12_17:00:00
swift download -o data_in/wrfout_d04_2017-12-12_18:00:00 wrf_long_run wrfout_d04_2017-12-12_18:00:00
swift download -o data_in/wrfout_d04_2017-12-12_19:00:00 wrf_long_run wrfout_d04_2017-12-12_19:00:00
swift download -o data_in/wrfout_d04_2017-12-12_20:00:00 wrf_long_run wrfout_d04_2017-12-12_20:00:00
swift download -o data_in/wrfout_d04_2017-12-12_21:00:00 wrf_long_run wrfout_d04_2017-12-12_21:00:00
swift download -o data_in/wrfout_d04_2017-12-12_22:00:00 wrf_long_run wrfout_d04_2017-12-12_22:00:00
swift download -o data_in/wrfout_d04_2017-12-12_23:00:00 wrf_long_run wrfout_d04_2017-12-12_23:00:00
swift download -o data_in/wrfout_d04_2017-12-13_00:00:00 wrf_long_run wrfout_d04_2017-12-13_00:00:00
swift download -o data_in/wrfout_d04_2017-12-13_01:00:00 wrf_long_run wrfout_d04_2017-12-13_01:00:00
swift download -o data_in/wrfout_d04_2017-12-13_02:00:00 wrf_long_run wrfout_d04_2017-12-13_02:00:00
swift download -o data_in/wrfout_d04_2017-12-13_03:00:00 wrf_long_run wrfout_d04_2017-12-13_03:00:00
swift download -o data_in/wrfout_d04_2017-12-13_04:00:00 wrf_long_run wrfout_d04_2017-12-13_04:00:00
swift download -o data_in/wrfout_d04_2017-12-13_05:00:00 wrf_long_run wrfout_d04_2017-12-13_05:00:00
swift download -o data_in/wrfout_d04_2017-12-13_06:00:00 wrf_long_run wrfout_d04_2017-12-13_06:00:00
swift download -o data_in/wrfout_d04_2017-12-13_07:00:00 wrf_long_run wrfout_d04_2017-12-13_07:00:00
swift download -o data_in/wrfout_d04_2017-12-13_08:00:00 wrf_long_run wrfout_d04_2017-12-13_08:00:00
swift download -o data_in/wrfout_d04_2017-12-13_09:00:00 wrf_long_run wrfout_d04_2017-12-13_09:00:00
swift download -o data_in/wrfout_d04_2017-12-13_10:00:00 wrf_long_run wrfout_d04_2017-12-13_10:00:00
swift download -o data_in/wrfout_d04_2017-12-13_11:00:00 wrf_long_run wrfout_d04_2017-12-13_11:00:00
swift download -o data_in/wrfout_d04_2017-12-13_12:00:00 wrf_long_run wrfout_d04_2017-12-13_12:00:00
swift download -o data_in/wrfout_d04_2017-12-13_13:00:00 wrf_long_run wrfout_d04_2017-12-13_13:00:00
swift download -o data_in/wrfout_d04_2017-12-13_14:00:00 wrf_long_run wrfout_d04_2017-12-13_14:00:00
swift download -o data_in/wrfout_d04_2017-12-13_15:00:00 wrf_long_run wrfout_d04_2017-12-13_15:00:00
swift download -o data_in/wrfout_d04_2017-12-13_16:00:00 wrf_long_run wrfout_d04_2017-12-13_16:00:00
swift download -o data_in/wrfout_d04_2017-12-13_17:00:00 wrf_long_run wrfout_d04_2017-12-13_17:00:00
swift download -o data_in/wrfout_d04_2017-12-13_18:00:00 wrf_long_run wrfout_d04_2017-12-13_18:00:00
swift download -o data_in/wrfout_d04_2017-12-13_19:00:00 wrf_long_run wrfout_d04_2017-12-13_19:00:00
swift download -o data_in/wrfout_d04_2017-12-13_20:00:00 wrf_long_run wrfout_d04_2017-12-13_20:00:00
swift download -o data_in/wrfout_d04_2017-12-13_21:00:00 wrf_long_run wrfout_d04_2017-12-13_21:00:00
swift download -o data_in/wrfout_d04_2017-12-13_22:00:00 wrf_long_run wrfout_d04_2017-12-13_22:00:00
swift download -o data_in/wrfout_d04_2017-12-13_23:00:00 wrf_long_run wrfout_d04_2017-12-13_23:00:00
swift download -o data_in/wrfout_d04_2017-12-14_00:00:00 wrf_long_run wrfout_d04_2017-12-14_00:00:00
|
#!/bin/sh
DIR=P02
name=p02
PREFIX=/local/Shared
(cd $PREFIX/output/gridded/$DIR; gzip *xyz)
for year in 1993 2004 2013
do
(cd $PREFIX/output/reported/work; zip ../$DIR/${name}_${year}_ct1.zip ${name}_${year}_????_ct1.csv; rm -fr ${name}_${year}_????_ct1.csv)
done
rm -f $PREFIX/output/reported/work/LOCK
|
<filename>client/src/App.js
import React, { useState, useEffect } from "react";
import GlobalStyle from "./GlobalStyle";
import SplashPage from "./pages/SplashPage";
import StartPage from "./pages/StartPage";
import ChoosingPage from "./pages/ChoosingPage";
import RoulettePage from "./pages/RoulettePage";
import FilterPage from "./pages/FilterPage";
import { PageContainer } from "./components/PageContainer";
import { BrowserRouter as Router, Switch, Route } from "react-router-dom";
function App() {
const [page, setPage] = useState(true);
useEffect(() => {
setTimeout(() => setPage(false), 3500);
}, []);
return (
<Router>
<GlobalStyle />
<PageContainer>
<Switch>
<Route exact path="/">
{page ? <SplashPage /> : <StartPage />}
</Route>
<Route path="/desicion">
<ChoosingPage />
</Route>
<Route path="/random">
<RoulettePage />
</Route>
<Route path="/select">
<FilterPage />
</Route>
</Switch>
</PageContainer>
</Router>
);
}
export default App;
|
class StringConverter {
public:
StringConverter(string str) {
m_str = str;
}
string toUpper() {
for (char& c : m_str) {
c = toupper(c);
}
return m_str;
}
private:
string m_str;
};
|
<reponame>amitksingh1490/Proptics<gh_stars>0
package optics
import cats.instances.either._
import cats.syntax.either._
import proptics.Iso
import proptics.Iso._
import proptics.instances.field2._
import proptics.instances.reverse._
import proptics.specs.PropticsSuite
import proptics.std.either._
import proptics.std.list._
import proptics.std.string._
import proptics.std.tuple._
import proptics.syntax.iso._
class IsoExamples extends PropticsSuite {
test("swap Tuple") {
val tuple = ("Hello", 9)
assertResult(tuple.swap)(swapTuple[String, Int].view(tuple))
}
test("swap Either") {
val either: Either[String, Int] = "Hello".asLeft[Int]
assertResult(either.swap)(swapEither[String, Int].view(either))
}
test("swap Either twice") {
val either: Either[String, Int] = "Hello".asLeft[Int]
val swapTwice: Iso[Either[String, Int], Either[String, Int]] =
swapEither compose swapEither
assertResult(either)(swapTwice.view(either))
}
test("replace the case of all characters using involuted") {
val composed =
_2[Int, String] compose
stringToChars compose
involuted[List[Char]](_.map(c => if (c.isUpper) c.toLower else c.toUpper)) compose
charsToString
val input = (9, "Hi")
assertResult((9, "camelCase"))(composed.set("CAMELcASE")(input))
}
test("reverse the string of an either using map") {
val composed =
stringToChars compose
reverse[List[Char], List[Char]] compose
charsToString
val input = Right("desrever")
assertResult(Right("reversed"))(composed.map[Either[Int, *]] view input)
assertResult(Left(9))(composed.map[Either[Int, *]] view Left(9))
}
test("reverse both sides of an either using bimap") {
val composed =
stringToChars compose
reverse[List[Char], List[Char]] compose
charsToString
assertResult(Right("reversed"))(composed.bimap[Either] view Right("desrever"))
assertResult(Left("reversed"))(composed.bimap[Either] view Left("desrever"))
}
test("using contramap to create a string from boolean") {
val negate = Iso.involuted[Boolean](!_).contramap[* => String]
assertResult("false")(negate.view(_.toString)(true))
}
}
|
<reponame>PinoEire/archi<gh_stars>1-10
/**
* This program and the accompanying materials
* are made available under the terms of the License
* which accompanies this distribution in the file LICENSE.txt
*/
package com.archimatetool.editor.diagram.figures;
import org.eclipse.draw2d.ColorConstants;
import org.eclipse.draw2d.Graphics;
import org.eclipse.draw2d.IFigure;
import org.eclipse.draw2d.ScalableFigure;
import org.eclipse.draw2d.geometry.PointList;
import org.eclipse.draw2d.geometry.Rectangle;
import org.eclipse.swt.graphics.Color;
import org.eclipse.swt.graphics.Device;
import org.eclipse.swt.graphics.Path;
import org.eclipse.swt.graphics.Pattern;
import org.eclipse.swt.widgets.Display;
/**
* Utils for Figures
*
* @author <NAME>
*/
public class FigureUtils {
/**
* @param figure
* @return The Current Zoom drawing scale for a Figure
*/
public static double getFigureScale(IFigure figure) {
if(figure instanceof ScalableFigure) {
return ((ScalableFigure)figure).getScale();
}
return figure == null ? 1.0 : getFigureScale(figure.getParent());
}
/**
* Gradient Direction
*/
public static enum Direction {
TOP, LEFT, RIGHT, BOTTOM;
public static Direction get(int value) {
switch(value) {
default:
case 0: return TOP;
case 1: return LEFT;
case 2: return RIGHT;
case 3: return BOTTOM;
}
}
}
/**
* Create a Pattern class with consideration to the scale of the Graphics class
* Adapted from https://www.eclipse.org/forums/index.php?t=msg&th=198946&goto=894610&#msg_894610
*/
public static Pattern createGradient(Graphics graphics, Device device, float x1, float y1, float x2, float y2, Color color1,
int alpha1, Color color2, int alpha2) {
double scale = graphics.getAbsoluteScale();
return new Pattern(device, (int)(x1 * scale), (int)(y1 * scale), (int)(x2 * scale), (int)(y2 * scale), color1, alpha1, color2,
alpha2);
}
/**
* Create a Pattern class with consideration to the scale of the Graphics class
* Adapted from https://www.eclipse.org/forums/index.php?t=msg&th=198946&goto=894610&#msg_894610
*/
public static Pattern createGradient(Graphics graphics, Device device, float x1, float y1, float x2, float y2, Color color1,
Color color2) {
double scale = graphics.getAbsoluteScale();
return new Pattern(device, (int)(x1 * scale), (int)(y1 * scale), (int)(x2 * scale), (int)(y2 * scale), color1, color2);
}
/**
* Create a Pattern class with consideration to the scale of the Graphics class using the given gradient direction and default gradient end color
*/
public static Pattern createGradient(Graphics graphics, Rectangle r, Color color, Direction direction) {
return createGradient(graphics, r, color, 255, direction);
}
/**
* Create a Pattern class with consideration to the scale of the Graphics class using the
* given gradient direction and default gradient end color and alpha transparency
*/
public static Pattern createGradient(Graphics graphics, Rectangle r, Color color, int alpha, Direction direction) {
if(direction == null) {
return null;
}
Color endColor = ColorConstants.white;
// Gradienting all the way to pure white is too much, this extends the gradient area to cover that
float deltaFactor = 0.15f;
switch(direction) {
case TOP:
default:
int delta = (int) (r.height * deltaFactor);
return createGradient(graphics, Display.getDefault(), r.x, r.y, r.x, r.getBottom().y + delta, color, alpha, endColor, alpha);
case LEFT:
delta = (int) (r.width * deltaFactor);
return createGradient(graphics, Display.getDefault(), r.x, r.y, r.getRight().x + delta, r.y, color, alpha, endColor, alpha);
case RIGHT:
delta = (int) (r.width * deltaFactor);
return createGradient(graphics, Display.getDefault(), r.getRight().x, r.y, r.x - delta, r.y, color, alpha, endColor, alpha);
case BOTTOM:
delta = (int) (r.height * deltaFactor);
return createGradient(graphics, Display.getDefault(), r.x, r.getBottom().y, r.x, r.y - delta, color, alpha, endColor, alpha);
}
}
/**
* Create a Pattern class with consideration to the scale of the Graphics class using the LEFT gradient direction and default gradient end color
*/
@Deprecated
public static Pattern createGradient(Graphics graphics, Rectangle r, Color color) {
return createGradient(graphics, r, color, 255);
}
/**
* Create a Pattern class with consideration to the scale of the Graphics class
* using the LEFT direction and default gradient end color and alpha transparency
*/
@Deprecated
public static Pattern createGradient(Graphics graphics, Rectangle r, Color color, int alpha) {
return createGradient(graphics, r, color, alpha, Direction.LEFT);
}
/**
* Create a Path from a points list
* @param points The points list
* @return The Path - callers should dispose of it
*/
public static Path createPathFromPoints(PointList points) {
return createPathFromPoints(points.toIntArray());
}
/**
* Create a Path from a points list
* @param points The points as x,y
* @return The Path - callers should dispose of it
*/
public static Path createPathFromPoints(int[] points) {
Path path = new Path(null);
path.moveTo(points[0], points[1]);
for(int i = 2; i < points.length; i += 2) {
path.lineTo(points[i], points[i + 1]);
}
path.lineTo(points[0], points[1]);
return path;
}
}
|
<filename>.local/share/Trash/files/ch17/join.rb<gh_stars>0
# The Book of Ruby - http://www.sapphiresteel.com
words = ["hello", "world", "goodbye", "mars" ]
numbers = [1,2,3,4,5,6,7,8,9,10]
Thread.new{
words.each{ |word| puts( word ) }
}.join
Thread.new{
numbers.each{ |number| puts( number ) }
}.join
|
$(function(){
$('#searchBtn').on('tap',function(){
//获取搜索的内容
var content = $(this).next().val();
//判断搜索内容是否为空
if(!content || $.trim(content)==''){
alert("请输入关键字");
return;
}
//将搜索的内容存入本地,如果有keys就存,没有就新创建一个
if(localStorage.getItem('keys')){
var current = JSON.parse(localStorage.getItem('keys'));
current.push(content);
localStorage.setItem('keys',JSON.stringify(current));
}else{
localStorage.setItem('keys',JSON.stringify([content]));
}
location.href = 'search-list.html?key='+content;
});
//如果本地中有搜索历史则取出显示
if(localStorage.getItem('keys')){
var history = JSON.parse(localStorage.getItem('keys'));
var newArr = [];
//去除搜索历史中重复的数据
for(var i=0;i<history.length;i++){
if(newArr.indexOf(history[i]) == -1){
newArr.push(history[i]);
}
}
var str = template('historyTpl',{data:newArr});
$('#historyBox').html(str);
}
//点击清空历史删除本地存储数据
$('#clearBtn').on('tap',function(){
localStorage.removeItem('keys');
location.reload();
});
$('body').on('tap','.title',function(){
$('#searchCon').val($(this).html());
})
});
|
// converted golang begin
package writer
import(
// "errors"
"fmt"
"encoding/json"
_core "nodejs/core"
JSON "nodejs/json"
"nodejs/console"
. "github.com/byteball/go-byteballcore/types"
)
import(
// _ "lodash"
// "async"
"github.com/byteball/go-byteballcore/constants"
"github.com/byteball/go-byteballcore/conf"
"github.com/byteball/go-byteballcore/storage"
"github.com/byteball/go-byteballcore/db"
objectHash "github.com/byteball/go-byteballcore/object_hash"
"github.com/byteball/go-byteballcore/mutex"
"github.com/byteball/go-byteballcore/main_chain"
Definition "github.com/byteball/go-byteballcore/definition"
eventBus "github.com/byteball/go-byteballcore/event_bus"
"github.com/byteball/go-byteballcore/profiler"
)
type(
DBConnT = db.DBConnT
DBParamsT = db.DBParamsT
DBQuerysT = db.DBQuerysT
DBQueryResultT = db.DBQueryResultT
refDBQueryResultT = *DBQueryResultT
ObjJointT struct{
Unit UnitObjectT `json:"unit"`
Ball BallT `json:"ball"`
Skiplist_units UnitsT `json:"skiplist_units,omitempty"`
}
refObjJointT = *ObjJointT
ObjValidationStateT struct{
ArrAdditionalQueries DBQuerysT `json:"arrAdditionalQueries"`
ArrDoubleSpendInputs DoubleSpendInputsT `json:"arrDoubleSpendInputs"`
ArrInputKeys []string `json:"arrInputKeys"`
Max_parent_limci MCIndexT `json:"max_parent_limci"`
Last_ball_mci MCIndexT `json:"last_ball_mci"`
Max_known_mci MCIndexT `json:"max_known_mci"`
Witnessed_level LevelT `json:"witnessed_level"`
Best_parent_unit UnitT `json:"best_parent_unit,omitempty"`
ArrAddressesWithForkedPath AddressesT `json:"arrAddressesWithForkedPath"`
Unit_hash_to_Sign interface{} `json:"unit_hash_to_sign"`
// Unit_hash_to_Sign [32]byte `json:"unit_hash_to_sign"`
Sequence SequenceT `json:"sequence,omitempty"`
}
refObjValidationStateT = *ObjValidationStateT
DoubleSpendInputT struct{
message_index MessageIndexT `json:"message_index"`
input_index InputIndexT `json:"input_index"`
}
DoubleSpendInputsT = []DoubleSpendInputT
SequenceT string
preCommitCallbackT func (*DBConnT)
UnitObjectT struct{
Unit UnitT `json:"unit"`
Version string `json:"version"`
Alt string `json:"alt"`
Witness_list_unit UnitT `json:"witness_list_unit,omitempty"`
Last_ball_unit UnitT `json:"last_ball_unit,omitempty"`
Last_ball BallT `json:"last_ball,omitempty"`
Headers_commission int `json:"headers_commission,omitempty"`
Payload_commission int `json:"payload_commission,omitempty"`
Timestamp int64 `json:"timestamp,omitempty"`
Parent_units UnitsT `json:"parent_units,omitempty"`
Authors AuthorsT `json:"authors,omitempty"`
Messages MessagesT `json:"messages,omitempty"`
// [fyi] field below were not observed
Main_chain_index MCIndexT `json:"main_chain_index,omitempty"`
Witnesses AddressesT `json:"witnesses,omitempty"`
Content_hash *ContentHashT `json:"content_hash,omitempty"`
Earned_headers_commission_recipients EHCRsT `json:"earned_headers_commission_recipients,omitempty"`
}
EHCRT struct{
Address AddressT `json:"address"`
Earned_headers_commission_share EHCST `json:"earned_headers_commission_share"`
}
// [fyi] integer percent
EHCST int
EHCRsT = []EHCRT
refAddressT = *AddressT
XPropsT = storage.XPropsT
)
var count_writes int = 0
var count_units_in_prev_analyze int = 0
//func saveJoint_sync(objJoint objJointT, objValidationState objValidationStateT, preCommitCallback preCommitCallbackT) ErrorT {
func SaveJoint_sync(objJoint refObjJointT, objValidationState refObjValidationStateT, preCommitCallback_sync preCommitCallbackT) ErrorT {
var(
// determineInputAddressFromSrcOutput_sync func (asset AssetT, denomination denominationT, input inputT) AddressT
determineInputAddressFromSrcOutput_sync func (asset AssetT, denomination DenominationT, input InputT) refAddressT
// addInlinePaymentQueries_sync func ()
addInlinePaymentQueries_sync func () ErrorT
updateBestParent_sync func () ErrorT
// determineMaxLevel_sync func ()
determineMaxLevel_sync func () LevelT
updateLevel_sync func () ErrorT
updateWitnessedLevel_sync func () ErrorT
updateWitnessedLevelByWitnesslist_sync func (arrWitnesses AddressesT) ErrorT
)
// objUnit := objJoint.unit
objUnit := objJoint.Unit
// console.Log("saving unit " + objUnit.unit)
console.Log("saving unit %s", objUnit.Unit)
profiler.Start()
conn := /* await */
// db.takeConnectionFromPool_sync()
db.TakeConnectionFromPool_sync()
// << flattened continuation for db.takeConnectionFromPool:24:1
// arrQueries := AsyncFunctorsT{}
conn.ResetAddedQueries()
// conn.AddQuery(arrQueries, "BEGIN")
// [fyi] .BeginTransaction is invoked before .ExecuteAddedQueries
// additional queries generated by the validator, used only when received a doublespend
// for i := 0; i < len(objValidationState.arrAdditionalQueries); i++ {
// objAdditionalQuery := objValidationState.arrAdditionalQueries[i]
for _, objAdditionalQuery := range objValidationState.ArrAdditionalQueries {
// console.Log("----- applying additional queries: " + objAdditionalQuery.sql)
// conn.AddQuery(objAdditionalQuery.sql, objAdditionalQuery.params)
console.Log("----- applying additional queries: %v", objAdditionalQuery.Sql)
conn.AddQuery(objAdditionalQuery.Sql, objAdditionalQuery.Params)
}
fields := "unit, version, alt, witness_list_unit, last_ball_unit, headers_commission, payload_commission, sequence, content_hash"
values := "?,?,?,?,?,?,?,?,?"
/**
var params = [objUnit.unit,
objUnit.version,
objUnit.alt,
objUnit.witness_list_unit,
objUnit.last_ball_unit,
objUnit.headers_commission || 0,
objUnit.payload_commission || 0,
objValidationState.sequence,
objUnit.content_hash];
**/
queryParams := DBParamsT{
objUnit.Unit,
objUnit.Version,
objUnit.Alt,
objUnit.Witness_list_unit.OrNull(),
objUnit.Last_ball_unit.OrNull(),
objUnit.Headers_commission,
objUnit.Payload_commission,
objValidationState.Sequence,
objUnit.Content_hash,
}
// if conf.bLight {
if conf.IsLight {
fields += ", main_chain_index, creation_date"
// values += ",?," + conn.getFromUnixTime("?")
values += ",?," + conn.GetFromUnixTime("?")
queryParams = append(queryParams, objUnit.Main_chain_index)
}
conn.AddQuery("INSERT INTO units (" + fields + ") VALUES (" + values + ")", queryParams)
// if objJoint.ball && ! conf.bLight {
if ! objJoint.Ball.IsNull() && ! conf.IsLight {
conn.AddQuery("INSERT INTO balls (ball, unit) VALUES(?,?)", DBParamsT{
objJoint.Ball,
objUnit.Unit,
})
conn.AddQuery("DELETE FROM hash_tree_balls WHERE ball=? AND unit=?", DBParamsT{
objJoint.Ball,
objUnit.Unit,
})
// if objJoint.skiplist_units {
if objJoint.Skiplist_units != nil {
// if true {
for i := 0; i < len(objJoint.Skiplist_units); i++ {
conn.AddQuery("INSERT INTO skiplist_units (unit, skiplist_unit) VALUES (?,?)", DBParamsT{
objUnit.Unit,
objJoint.Skiplist_units[i],
})
}
}
}
// if objUnit.parent_units {
if objUnit.Parent_units != nil {
// if true {
for i := 0; i < len(objUnit.Parent_units); i++ {
conn.AddQuery("INSERT INTO parenthoods (child_unit, parent_unit) VALUES(?,?)", DBParamsT{
objUnit.Unit,
objUnit.Parent_units[i],
})
}
}
// bGenesis := storage.isGenesisUnit(objUnit.unit)
bGenesis := storage.IsGenesisUnit(objUnit.Unit)
if bGenesis {
conn.AddQuery("UPDATE units SET is_on_main_chain=1, main_chain_index=0, is_stable=1, level=0, witnessed_level=0 \n" +
" WHERE unit=?", DBParamsT{ objUnit.Unit })
} else {
// .. not flattening for conn.AddQuery
// conn.AddQueryCb(arrQueries, "UPDATE units SET is_free=0 WHERE unit IN(?)", DBParamsT{ objUnit.parent_units }, func (result resultT) {
queryParams := DBParamsT{}
pusSql := queryParams.AddUnits(objUnit.Parent_units)
conn.AddQueryCb("UPDATE units SET is_free=0 WHERE unit IN("+ pusSql +")", queryParams, func (result refDBQueryResultT) {
// in sqlite3, result.affectedRows actually returns the number of _matched_ rows
count_consumed_free_units := result.AffectedRows
// console.log(count_consumed_free_units + " free units consumed")
console.Log("%d free units consumed", count_consumed_free_units)
// .. not flattening for Array.forEach
// for parent_unit, _ := range objUnit.parent_units {
for _, parent_unit := range objUnit.Parent_units {
// if storage.AssocUnstableUnits[parent_unit] {
if _, _exists := storage.AssocUnstableUnits[parent_unit]; _exists {
// storage.AssocUnstableUnits[parent_unit].is_free = 0
storage.AssocUnstableUnits[parent_unit].Is_free = 0
}
}
})
}
// if Array.isArray(objUnit.Witnesses) {
if objUnit.Witnesses != nil {
// for i := 0; i < len(objUnit.Witnesses); i++ {
// address := objUnit.Witnesses[i]
for _, address := range objUnit.Witnesses {
conn.AddQuery("INSERT INTO unit_witnesses (unit, address) VALUES(?,?)", DBParamsT{
objUnit.Unit,
address,
})
}
conn.AddQuery("INSERT " + conn.GetIgnore() + " INTO witness_list_hashes (witness_list_unit, witness_list_hash) VALUES (?,?)", DBParamsT{
objUnit.Unit,
// objectHash.getBase64Hash(objUnit.witnesses),
objectHash.GetBase64Hash(objUnit.Witnesses),
})
}
arrAuthorAddresses := AddressesT{}
// for i := 0; i < len(objUnit.Authors); i++ {
// author := objUnit.Authors[i]
for _, author := range objUnit.Authors {
arrAuthorAddresses = append(arrAuthorAddresses, author.Address)
definition := author.Definition
// definition_chash := nil
definition_chash := CHashT_Null
// if definition {
if definition != nil {
// IGNORE for messages out of sequence
// console.Log("definition %#v\n", definition)
{{
jsdef, _ := json.MarshalIndent(definition, "", " ")
console.Log("address %s\ndefinition --[[\n%s\n]]--\n", author.Address, jsdef)
}}
// definition_chash = objectHash.getChash160(definition)
definition_chash = objectHash.GetChash160(definition)
hasReferences := 0
// if Definition.hasReferences(definition) { hasReferences = 1 }
if Definition.HasReferences(definition) { hasReferences = 1 }
conn.AddQuery("INSERT " + conn.GetIgnore() + " INTO definitions (definition_chash, definition, has_references) VALUES (?,?,?)", DBParamsT{
definition_chash,
JSON.Stringify(definition),
hasReferences,
})
// actually inserts only when the address is first used.
// if we change keys and later send a unit signed by new keys, the address is not inserted.
// Its definition_chash was updated before when we posted change-definition message.
// if definition_chash == author.address {
if definition_chash == CHashT(author.Address) {
conn.AddQuery("INSERT " + conn.GetIgnore() + " INTO addresses (address) VALUES(?)", DBParamsT{ author.Address })
}
} else {
// if objUnit.Content_hash {
if objUnit.Content_hash != nil {
conn.AddQuery("INSERT " + conn.GetIgnore() + " INTO addresses (address) VALUES(?)", DBParamsT{ author.Address })
}
}
conn.AddQuery("INSERT INTO unit_authors (unit, address, definition_chash) VALUES(?,?,?)", DBParamsT{
objUnit.Unit,
author.Address,
definition_chash.OrNull(),
})
if bGenesis {
conn.AddQuery("UPDATE unit_authors SET _mci=0 WHERE unit=?", DBParamsT{ objUnit.Unit })
}
// if ! objUnit.Content_hash {
if ! (objUnit.Content_hash != nil) {
// for path := range author.Authentifiers {
for path := range author.Authentifiers {
conn.AddQuery("INSERT INTO authentifiers (unit, address, path, authentifier) VALUES(?,?,?,?)", DBParamsT{
objUnit.Unit,
author.Address,
path,
author.Authentifiers[path],
})
}
}
}
// if ! objUnit.Content_hash {
if ! (objUnit.Content_hash != nil) {
// for i := 0; i < len(objUnit.Messages); i++ {
// message := objUnit.Messages[i]
for i, message := range objUnit.Messages {
// text_payload := nil
text_payload := interface{}(nil)
if message.App == "text" {
//text_payload = message.Payload
text_payload = message.PayloadText
} else {
if message.App == "data" || message.App == "profile" || message.App == "attestation" || message.App == "definition_template" {
// [fyi] no can go back in golang land
//text_payload = JSON.Stringify(message.Payload)
text_payload = message.PayloadText
}
}
conn.AddQuery("INSERT INTO messages \n" +
" (unit, message_index, app, payload_hash, payload_location, payload, payload_uri, payload_uri_hash) VALUES(?,?,?,?,?,?,?,?)", DBParamsT{
objUnit.Unit,
i,
message.App,
message.Payload_hash,
message.Payload_location,
text_payload,
message.Payload_uri,
message.Payload_uri_hash,
})
if message.Payload_location == "inline" {
// [*SwitchStatement*]
switch message.App {
case "address_definition_change":
definition_chash := message.Payload.Definition_chash
address := message.Payload.Address
if address.IsNull() {
address = objUnit.Authors[0].Address
}
conn.AddQuery("INSERT INTO address_definition_changes (unit, message_index, address, definition_chash) VALUES(?,?,?,?)", DBParamsT{
objUnit.Unit,
i,
address,
definition_chash,
})
//break
case "poll":
poll := message.Payload
conn.AddQuery("INSERT INTO polls (unit, message_index, question) VALUES(?,?,?)", DBParamsT{
objUnit.Unit,
i,
poll.Question,
})
for j := range poll.Choices {
conn.AddQuery("INSERT INTO poll_choices (unit, choice_index, choice) VALUES(?,?,?)", DBParamsT{
objUnit.Unit,
j,
poll.Choices[j],
})
}
//break
case "vote":
vote := message.Payload
conn.AddQuery("INSERT INTO votes (unit, message_index, poll_unit, choice) VALUES (?,?,?,?)", DBParamsT{
objUnit.Unit,
i,
vote.Unit,
vote.Choice,
})
//break
case "attestation":
attestation := message.Payload
conn.AddQuery("INSERT INTO attestations (unit, message_index, attestor_address, address) VALUES(?,?,?,?)", DBParamsT{
objUnit.Unit,
i,
objUnit.Authors[0].Address,
attestation.Address,
})
for field, value := range attestation.Profile {
if len(field) <= constants.MAX_PROFILE_FIELD_LENGTH && true && len(value) <= constants.MAX_PROFILE_VALUE_LENGTH {
conn.AddQuery("INSERT INTO attested_fields (unit, message_index, attestor_address, address, field, value) VALUES(?,?, ?,?, ?,?)", DBParamsT{
objUnit.Unit,
i,
objUnit.Authors[0].Address,
attestation.Address,
field,
value,
})
}
}
//break
case "asset":
boolToInt := func (b bool) int {
if b { return 1 }
return 0
}
objToJsonOrNull := func (obj interface{}) *string {
if obj == nil { return nil }
// [tbd] order of fields is not deterministic!!! [or is it?..]
json := JSON.Stringify(obj)
return &json
}
asset := message.Payload
conn.AddQuery("INSERT INTO assets (unit, message_index, \n"+
"cap, is_private, is_transferrable, auto_destroy, fixed_denominations, \n"+
"issued_by_definer_only, cosigned_by_definer, spender_attested, \n"+
"issue_condition, transfer_condition) VALUES(?,?,?,?,?,?,?,?,?,?,?,?)", DBParamsT{
objUnit.Unit,
i,
asset.Cap,
boolToInt(asset.Is_private),
boolToInt(asset.Is_transferrable),
boolToInt(asset.Auto_destroy),
boolToInt(asset.Fixed_denominations),
boolToInt(asset.Issued_by_definer_only),
boolToInt(asset.Cosigned_by_definer),
boolToInt(asset.Spender_attested),
objToJsonOrNull(asset.Issue_condition),
objToJsonOrNull(asset.Transfer_condition),
})
if asset.Attestors != nil {
for j, _ := range asset.Attestors {
conn.AddQuery("INSERT INTO asset_attestors (unit, message_index, asset, attestor_address) VALUES(?,?,?,?)", DBParamsT{
objUnit.Unit,
i,
objUnit.Unit,
asset.Attestors[j],
})
}
}
if asset.Denominations != nil {
for j, _ := range asset.Denominations {
conn.AddQuery("INSERT INTO asset_denominations (asset, denomination, count_coins) VALUES(?,?,?)", DBParamsT{
objUnit.Unit,
asset.Denominations[j].Denomination,
asset.Denominations[j].Count_coins,
})
}
}
//break
case "asset_attestors":
asset_attestors := message.Payload
for j := range asset_attestors.Attestors {
conn.AddQuery("INSERT INTO asset_attestors (unit, message_index, asset, attestor_address) VALUES(?,?,?,?)", DBParamsT{
objUnit.Unit,
i,
asset_attestors.Asset,
asset_attestors.Attestors[j],
})
}
//break
case "data_feed":
// [tbd] must reparse entire incoming message
// [tbd] in order to get Payload in data feed format
data := message.Payload.DataFeed
for feed_name, value := range data {
field_name := ""
switch value.(type) {
case string:
field_name = "`value`"
case int:
field_name = "int_value"
default:
panic("Payload.(data_feed): unexpected value type")
}
conn.AddQuery("INSERT INTO data_feeds (unit, message_index, feed_name, "+field_name+") VALUES(?,?,?,?)", DBParamsT{
objUnit.Unit,
i,
feed_name,
value,
})
}
//break
case "payment":
// we'll add inputs/outputs later because we need to read the payer address
// from src outputs, and it's inconvenient to read it synchronously
//break
}
// switch message.app
}
// inline
// if "spend_proofs" in message {
if message.Spend_proofs != nil {
// for j := 0; j < len(message.Spend_proofs); j++ {
// objSpendProof := message.Spend_proofs[j]
for j, objSpendProof := range message.Spend_proofs {
_address := objSpendProof.Address
if _address.IsNull() { _address = arrAuthorAddresses[0] }
conn.AddQuery("INSERT INTO spend_proofs (unit, message_index, spend_proof_index, spend_proof, address) VALUES(?,?,?,?,?)", DBParamsT{
objUnit.Unit,
i,
j,
objSpendProof.Spend_proof,
// objSpendProof.Address || arrAuthorAddresses[0],
_address,
})
}
}
}
}
// if "earned_headers_commission_recipients" in objUnit {
if objUnit.Earned_headers_commission_recipients != nil {
// for i := 0; i < len(objUnit.Earned_headers_commission_recipients); i++ {
// recipient := objUnit.Earned_headers_commission_recipients[i]
for _, recipient := range objUnit.Earned_headers_commission_recipients {
conn.AddQuery("INSERT INTO earned_headers_commission_recipients (unit, address, earned_headers_commission_share) VALUES(?,?,?)", DBParamsT{
objUnit.Unit,
recipient.Address,
recipient.Earned_headers_commission_share,
})
}
}
// my_best_parent_unit := {*init:null*}
var my_best_parent_unit UnitT
// objNewUnitProps := [*ObjectExpression*]
objNewUnitProps := XPropsT{
Unit: objUnit.Unit,
PropsT: PropsT{
Level: LevelT_Null,
Latest_included_mc_index: MCIndexT_Null,
Main_chain_index: MCIndexT_Null,
Is_on_main_chain: 0,
Is_free: 1,
Is_stable: 0,
Witnessed_level: LevelT_Null,
},
Parent_units: objUnit.Parent_units,
}
if bGenesis {
objNewUnitProps.Level = 0
objNewUnitProps.Main_chain_index = 0
objNewUnitProps.Is_on_main_chain = 1
objNewUnitProps.Is_stable = 1
objNewUnitProps.Witnessed_level = 0
}
// determineInputAddressFromSrcOutput_sync = func (asset AssetT, denomination denominationT, input inputT) AddressT {
determineInputAddressFromSrcOutput_sync = func (asset AssetT, denomination DenominationT, input InputT) refAddressT {
/**
rows := /* await * /
conn.Query_sync("SELECT address, denomination, asset FROM outputs WHERE unit=? AND message_index=? AND output_index=?", DBParamsT{
input.Unit,
input.Message_index,
input.Output_index,
})
**/
rcvr := db.AddressDenominationAssetsReceiver{}
conn.MustQuery("SELECT address, denomination, asset FROM outputs WHERE unit=? AND message_index=? AND output_index=?", DBParamsT{
input.Unit,
input.Message_index,
input.Output_index,
}, &rcvr)
rows := rcvr.Rows
// << flattened continuation for conn.query:237:3
if len(rows) > 1 {
_core.Throw("multiple src outputs found")
}
if len(rows) == 0 {
// if conf.bLight {
if conf.IsLight {
// it's normal that a light client doesn't store the previous output
// :: flattened return for return handleAddress(null);
return nil
} else {
_core.Throw("src output not found")
}
}
row := rows[0]
// if ! ! asset && ! row.asset || asset == row.asset {
if ! (asset.IsNull() && row.Asset.IsNull() || asset == row.Asset) {
_core.Throw("asset doesn't match")
}
// if denomination != row.denomination {
if denomination != row.Denomination {
_core.Throw("denomination doesn't match")
}
// address := row.address
address := row.Address
// if arrAuthorAddresses.indexOf(address) == - 1 {
if arrAuthorAddresses.IndexOf(address) == - 1 {
_core.Throw("src output address not among authors")
}
// :: flattened return for handleAddress(address);
// return address
return &address
// >> flattened continuation for conn.query:237:3
}
// addInlinePaymentQueries_sync = func () {
addInlinePaymentQueries_sync = func () ErrorT {
// :: flattened return for cb(async.forEachOfSeries(objUnit.messages, function (message, i) {
// ** need 0 return(s) instead of 1
return (func () ErrorT {
// :: inlined async.eachOfSeries:263:3
// for message, i := range objUnit.Messages {
for i, message := range objUnit.Messages {
// _err := (func (message messageT, i iT) ErrorT {
_err := (func (message MessageT, i int) ErrorT {
if message.Payload_location != "inline" {
// :: flattened return for return cb2();
// ** need 1 return(s) instead of 0
// return
return nil
}
payload := message.Payload
if message.App != "payment" {
// :: flattened return for return cb2();
// ** need 1 return(s) instead of 0
// return
return nil
}
// denomination := payload.Denomination || 1
denomination := payload.Denomination
if denomination.IsNull() { denomination = 1 }
(func () ErrorT {
// :: inlined async.eachOfSeries:274:5
// for input, j := range payload.Inputs {
for j, input := range payload.Inputs {
// _err := (func (input inputT, j jT) ErrorT {
_err := (func (input InputT, j int) ErrorT {
//console.Log("j %d input %#v", j, input)
var(
// determineInputAddress_sync func () AddressT
determineInputAddress_sync func () refAddressT
)
/**
type := input.Type || "transfer"
src_unit := (type == "transfer" ? input.Unit: nil)
src_message_index := (type == "transfer" ? input.Message_index: nil)
src_output_index := (type == "transfer" ? input.Output_index: nil)
from_main_chain_index := (type == "witnessing" || type == "headers_commission" ? input.From_main_chain_index: nil)
to_main_chain_index := (type == "witnessing" || type == "headers_commission" ? input.To_main_chain_index: nil)
**/
_type := input.Type
src_unit := input.Unit
src_message_index := input.Message_index
src_output_index := input.Output_index
from_main_chain_index := input.From_main_chain_index
to_main_chain_index := input.To_main_chain_index
if len(_type) == 0 {
_type = "transfer"
}
if !(_type == "transfer") {
src_unit = UnitT_Null
src_message_index = -1
src_output_index = -1
}
if !(_type == "witnessing" || _type == "headers_commission") {
from_main_chain_index = MCIndexT_Null
to_main_chain_index = MCIndexT_Null
}
// determineInputAddress_sync = func () AddressT {
determineInputAddress_sync = func () refAddressT {
if _type == "headers_commission" || _type == "witnessing" || _type == "issue" {
// :: flattened return for return handleAddress(arrAuthorAddresses.length === 1 ? arrAuthorAddresses[0] : input.Address);
// if len(arrAuthorAddresses) == 1 { return arrAuthorAddresses[0] }
if len(arrAuthorAddresses) == 1 { return &arrAuthorAddresses[0] }
// return input.Address
return &input.Address
}
// hereafter, transfer
if len(arrAuthorAddresses) == 1 {
// :: flattened return for return handleAddress(arrAuthorAddresses[0]);
// return arrAuthorAddresses[0]
return &arrAuthorAddresses[0]
}
// :: flattened return for handleAddress(determineInputAddressFromSrcOutput(payload.asset, denomination, input));
// return /* await */
return determineInputAddressFromSrcOutput_sync(payload.Asset, denomination, input)
}
address := /* await */
determineInputAddress_sync()
// << flattened continuation for determineInputAddress:293:7
bDSIs := false
// for ds, _ := range objValidationState.arrDoubleSpendInputs {
for _, ds := range objValidationState.ArrDoubleSpendInputs {
// if ds.message_index == i && ds.input_index == j { bDSIs = true; break }
if ds.message_index == MessageIndexT(i) && ds.input_index == InputIndexT(j) { bDSIs = true; break }
}
// is_unique := 1
is_unique := interface{}(1)
if bDSIs {
is_unique = nil
}
conn.AddQuery("INSERT INTO inputs \n" +
" (unit, message_index, input_index, type, \n" +
" src_unit, src_message_index, src_output_index, \n" +
" from_main_chain_index, to_main_chain_index, \n" +
" denomination, amount, serial_number, \n" +
" asset, is_unique, address) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", DBParamsT{
objUnit.Unit,
i,
j,
_type,
// src_unit,
src_unit.OrNull(),
src_message_index,
src_output_index,
// from_main_chain_index,
from_main_chain_index.OrNull(),
// to_main_chain_index,
to_main_chain_index.OrNull(),
denomination,
input.Amount,
// input.Serial_number,
input.Serial_number.OrNull(),
// payload.Asset,
payload.Asset.OrNull(),
is_unique,
*address,
})
// [*SwitchStatement*]
switch _type {
case "transfer":
conn.AddQuery("UPDATE outputs SET is_spent=1 WHERE unit=? AND message_index=? AND output_index=?", DBParamsT{
src_unit,
src_message_index,
src_output_index,
})
//break
case "headers_commission":
fallthrough
case "witnessing":
table := _type + "_outputs"
conn.AddQuery("UPDATE "+table+" SET is_spent=1 \n"+
"WHERE main_chain_index>=? AND main_chain_index<=? AND address=?", DBParamsT{
from_main_chain_index,
to_main_chain_index,
*address,
})
//break
}
// :: flattened return for cb3();
// ** need 1 return(s) instead of 0
// return
return nil
// >> flattened continuation for determineInputAddress:293:7
})(input, j)
if _err != nil { return _err }
}
return nil
})()
// << flattened continuation for async.forEachOfSeries:274:5
for j := 0; j < len(payload.Outputs); j++ {
output := payload.Outputs[j]
// we set is_serial=1 for public payments as we check that their inputs are stable and serial before spending,
// therefore it is impossible to have a nonserial in the middle of the chain (but possible for private payments)
conn.AddQuery("INSERT INTO outputs \n" +
" (unit, message_index, output_index, address, amount, asset, denomination, is_serial) VALUES(?,?,?,?,?,?,?,1)", DBParamsT{
objUnit.Unit,
i,
j,
output.Address,
// parseInt(output.Amount),
(output.Amount),
// payload.Asset,
payload.Asset.OrNull(),
denomination,
})
}
// :: flattened return for cb2();
// ** need 1 return(s) instead of 0
// return
return nil
// >> flattened continuation for async.forEachOfSeries:274:5
})(message, i)
if _err != nil { return _err }
}
return nil
})()
}
updateBestParent_sync = func () ErrorT {
/**
rows := /* await * /
conn.Query_sync("SELECT unit \n" +
**/
rcvr := db.UnitsReceiver{}
queryParams := DBParamsT{}
pusSql := queryParams.AddUnits(objUnit.Parent_units)
queryParams = append(queryParams,
objUnit.Witness_list_unit,
objUnit.Unit,
objUnit.Witness_list_unit,
constants.COUNT_WITNESSES - constants.MAX_WITNESS_LIST_MUTATIONS)
conn.MustQuery("SELECT unit \n" +
" FROM units AS parent_units \n" +
" WHERE unit IN(" + pusSql + ") \n" +
" AND (witness_list_unit=? OR ( \n" +
" SELECT COUNT(*) \n" +
" FROM unit_witnesses \n" +
" JOIN unit_witnesses AS parent_witnesses USING(address) \n" +
" WHERE parent_witnesses.unit IN(parent_units.unit, parent_units.witness_list_unit) \n" +
" AND unit_witnesses.unit IN(?, ?) \n" +
" )>=?) \n" +
" ORDER BY witnessed_level DESC, \n" +
" level-witnessed_level ASC, \n" +
" unit ASC \n" +
" LIMIT 1", queryParams, &rcvr)
rows := rcvr.Rows
// << flattened continuation for conn.query:346:3
if len(rows) != 1 {
_core.Throw("zero or more than one best parent unit?")
}
my_best_parent_unit = rows[0].Unit
if my_best_parent_unit != objValidationState.Best_parent_unit {
// throwError("different best parents, validation: " + objValidationState.Best_parent_unit + ", writer: " + my_best_parent_unit)
throwError(fmt.Sprintf("different best parents, validation: %s, writer: %s", objValidationState.Best_parent_unit, my_best_parent_unit))
}
/**
/* await * /
conn.Query_sync("UPDATE units SET best_parent_unit=? WHERE unit=?", DBParamsT{
my_best_parent_unit,
objUnit.Unit,
})
**/
conn.MustExec("UPDATE units SET best_parent_unit=? WHERE unit=?", DBParamsT{
my_best_parent_unit,
objUnit.Unit,
})
// << flattened continuation for conn.query:369:5
// :: flattened return for cb();
// ** need 1 return(s) instead of 0
// return
return nil
// >> flattened continuation for conn.query:369:5
// >> flattened continuation for conn.query:346:3
}
determineMaxLevel_sync = func () LevelT {
// max_level := 0
max_level := LevelT(0)
(func () ErrorT {
// :: inlined async.each:376:3 !! [tbd] finish this
// for parent_unit := range objUnit.parent_units {
for _, parent_unit := range objUnit.Parent_units {
_err := (func (parent_unit UnitT) ErrorT {
props := /* await */
// storage.readStaticUnitProps_sync(conn, parent_unit)
storage.ReadStaticUnitProps_sync(conn, parent_unit)
// << flattened continuation for storage.readStaticUnitProps:379:5
// if props.level > max_level {
// max_level = props.level
if props.Level > max_level {
max_level = props.Level
}
// :: flattened return for cb();
// ** need 1 return(s) instead of 0
// return
return nil
// >> flattened continuation for storage.readStaticUnitProps:379:5
})(parent_unit)
if _err != nil { return _err }
}
return nil
})()
// << flattened continuation for async.each:376:3
// :: flattened return for handleMaxLevel(max_level);
// ** need 0 return(s) instead of 1
return max_level
// >> flattened continuation for async.each:376:3
}
updateLevel_sync = func () ErrorT {
/**
rows := /* await * /
conn.Query_sync("SELECT MAX(level) AS max_level FROM units WHERE unit IN(?)", DBParamsT{
objUnit.Parent_units,
})
**/
rcvr := db.MaxLevelsReceiver{}
queryParams := DBParamsT{}
pusSql := queryParams.AddUnits(objUnit.Parent_units)
conn.MustQuery("SELECT MAX(level) AS max_level FROM units WHERE unit IN(" + pusSql + ")", queryParams, &rcvr)
rows := rcvr.Rows
// << flattened continuation for conn.query:392:3
if len(rows) != 1 {
_core.Throw("not a single max level?")
}
max_level := /* await */
determineMaxLevel_sync()
// << flattened continuation for determineMaxLevel:395:4
// if max_level != rows[0].max_level {
if max_level != rows[0].Max_level {
// throwError("different max level, sql: " + rows[0].max_level + ", props: " + max_level)
throwError(fmt.Sprintf("different max level, sql: %d, props: %d", rows[0].Max_level, max_level))
}
// objNewUnitProps.level = max_level + 1
objNewUnitProps.Level = LevelT(max_level + 1)
/**
/* await * /
conn.Query_sync("UPDATE units SET level=? WHERE unit=?", DBParamsT{
rows[0].Max_level + 1,
objUnit.Unit,
})
**/
conn.MustExec("UPDATE units SET level=? WHERE unit=?", DBParamsT{
rows[0].Max_level + 1,
objUnit.Unit,
})
// << flattened continuation for conn.query:399:5
// :: flattened return for cb();
// ** need 1 return(s) instead of 0
// return
return nil
// >> flattened continuation for conn.query:399:5
// >> flattened continuation for determineMaxLevel:395:4
// >> flattened continuation for conn.query:392:3
}
updateWitnessedLevel_sync = func () ErrorT {
// if objUnit.Witnesses {
if objUnit.Witnesses != nil {
// if len(objUnit.Witnesses) != 0 {
// :: flattened return for cb(updateWitnessedLevelByWitnesslist(objUnit.Witnesses));
// return /* await */
// updateWitnessedLevelByWitnesslist_sync(objUnit.Witnesses)
return updateWitnessedLevelByWitnesslist_sync(objUnit.Witnesses)
} else {
arrWitnesses := /* await */
// storage.readWitnessList_sync(conn, objUnit.Witness_list_unit)
storage.ReadWitnessList_sync(conn, objUnit.Witness_list_unit, false)
// << flattened continuation for storage.readWitnessList:411:4
// :: flattened return for cb(updateWitnessedLevelByWitnesslist(arrWitnesses));
// return /* await */
// updateWitnessedLevelByWitnesslist_sync(arrWitnesses)
return updateWitnessedLevelByWitnesslist_sync(arrWitnesses)
// >> flattened continuation for storage.readWitnessList:411:4
}
}
// The level at which we collect at least 7 distinct witnesses while walking up the main chain from our unit.
// The unit itself is not counted even if it is authored by a witness
updateWitnessedLevelByWitnesslist_sync = func (arrWitnesses AddressesT) ErrorT {
var(
// setWitnessedLevel func (witnessed_level witnessed_levelT)
setWitnessedLevel func (witnessed_level LevelT) ErrorT
// addWitnessesAndGoUp func (start_unit UnitT) (int, UnitT)
addWitnessesAndGoUp func (start_unit UnitT) ErrorT
)
arrCollectedWitnesses := AddressesT{}
// setWitnessedLevel = func (witnessed_level witnessed_levelT) {
setWitnessedLevel = func (witnessed_level LevelT) ErrorT {
profiler.Start()
if witnessed_level != objValidationState.Witnessed_level {
// throwError("different witnessed levels, validation: " + objValidationState.witnessed_level + ", writer: " + witnessed_level)
throwError(fmt.Sprintf("different witnessed levels, validation: %d, writer: %d", objValidationState.Witnessed_level, witnessed_level))
}
// objNewUnitProps.witnessed_level = witnessed_level
objNewUnitProps.Witnessed_level = witnessed_level
/**
/* await * /
conn.Query_sync("UPDATE units SET witnessed_level=? WHERE unit=?", DBParamsT{
witnessed_level,
objUnit.Unit,
})
**/
conn.MustExec("UPDATE units SET witnessed_level=? WHERE unit=?", DBParamsT{
witnessed_level,
objUnit.Unit,
})
// << flattened continuation for conn.query:426:4
profiler.Stop("write-wl-update")
// :: flattened return for cb();
// ** need 1 return(s) instead of 0
// return
return nil
// >> flattened continuation for conn.query:426:4
}
// addWitnessesAndGoUp = func (start_unit UnitT) (int, UnitT) {
addWitnessesAndGoUp = func (start_unit UnitT) ErrorT {
profiler.Start()
props := /* await */
storage.ReadStaticUnitProps_sync(conn, start_unit)
// << flattened continuation for storage.readStaticUnitProps:434:4
profiler.Stop("write-wl-select-bp")
// best_parent_unit := props.best_parent_unit
best_parent_unit := props.Best_parent_unit
// level := props.level
level := props.Level
// if level == nil {
if level.IsNull() {
// if level == -1 {
_core.Throw("null level in updateWitnessedLevel")
}
if level == 0 {
// genesis
// setWitnessedLevel(0)
// return
return setWitnessedLevel(0)
}
profiler.Start()
arrAuthors := /* await */
storage.ReadUnitAuthors_sync(conn, start_unit)
// << flattened continuation for storage.readUnitAuthors:443:5
profiler.Stop("write-wl-select-authors")
profiler.Start()
// for i := 0; i < len(arrAuthors); i++ {
// address := arrAuthors[i]
for _, address := range(arrAuthors) {
// if arrWitnesses.indexOf(address) != - 1 && arrCollectedWitnesses.indexOf(address) == - 1 {
if arrWitnesses.IndexOf(address) != - 1 && arrCollectedWitnesses.IndexOf(address) == - 1 {
arrCollectedWitnesses = append(arrCollectedWitnesses, address)
}
}
profiler.Stop("write-wl-search")
if len(arrCollectedWitnesses) >= constants.MAJORITY_OF_WITNESSES {
// setWitnessedLevel(level)
// return
return setWitnessedLevel(level)
}
// addWitnessesAndGoUp(best_parent_unit)
return addWitnessesAndGoUp(best_parent_unit)
// >> flattened continuation for storage.readUnitAuthors:443:5
// >> flattened continuation for storage.readStaticUnitProps:434:4
}
profiler.Stop("write-update")
// addWitnessesAndGoUp(my_best_parent_unit)
return addWitnessesAndGoUp(my_best_parent_unit)
}
// [tbd] move variable declaration up before function declarations
// objNewUnitProps := [*ObjectExpression*]
unlock := /* await */
// mutex.lock_sync({*ArrayExpression*})
mutex.Lock_sync([]string{"write"})
// << flattened continuation for mutex.lock:477:2
// console.Log("got lock to write " + objUnit.unit)
console.Log("got lock to write %s", objUnit.Unit)
// storage.assocUnstableUnits[objUnit.unit] = objNewUnitProps
storage.AssocUnstableUnits[objUnit.Unit] = &objNewUnitProps
/* await */
addInlinePaymentQueries_sync()
// << flattened continuation for addInlinePaymentQueries:480:3
/**
(func () ErrorT {
// :: inlined async.series:481:4
// for _f := range arrQueries {
for _, _f := range arrQueries {
if _err := _f() ; _err != nil { return _err }
}
return nil
})()
**/
conn.BeginTransaction()
conn.ExecuteAddedQuerys()
// << flattened continuation for async.series:481:4
profiler.Stop("write-raw")
profiler.Start()
arrOps := AsyncFunctorsT{}
// if objUnit.parent_units {
if objUnit.Parent_units != nil {
// if 0 < len(objUnit.Parent_units) {
// if ! conf.bLight {
if ! conf.IsLight {
// arrOps = append(arrOps, updateBestParent)
arrOps = append(arrOps, updateBestParent_sync)
// arrOps = append(arrOps, updateLevel)
arrOps = append(arrOps, updateLevel_sync)
// arrOps = append(arrOps, updateWitnessedLevel)
arrOps = append(arrOps, updateWitnessedLevel_sync)
arrOps = append(arrOps, func () ErrorT {
// console.Log("updating MC after adding " + objUnit.unit)
console.Log("updating MC after adding %s", objUnit.Unit)
/* await */
// main_chain.updateMainChain_sync(conn, nil, objUnit.unit)
main_chain.UpdateMainChain_sync(conn, UnitT_Null, objUnit.Unit)
// << flattened continuation for main_chain.updateMainChain:492:8
// :: flattened return for cb();
// ** need 1 return(s) instead of 0
// return
return nil
// >> flattened continuation for main_chain.updateMainChain:492:8
})
}
// if preCommitCallback {
if preCommitCallback_sync != nil {
arrOps = append(arrOps, func () ErrorT {
console.Log("executing pre-commit callback")
/* await */
preCommitCallback_sync(conn)
// << flattened continuation for preCommitCallback:498:8
// :: flattened return for cb();
// ** need 1 return(s) instead of 0
// return
return nil
// >> flattened continuation for preCommitCallback:498:8
})
}
}
err := (func () ErrorT {
// :: inlined async.series:501:5
// for _f := range arrOps {
for _, _f := range arrOps {
if _err := _f() ; _err != nil { return _err }
}
return nil
})()
// << flattened continuation for async.series:501:5
// [!!!] force rollback
//!! if err == nil { err = errors.New("forced ROLLBACK") }
profiler.Start()
/* await */
// conn.query_sync((err ? "ROLLBACK": "COMMIT"))
if err == nil {
// conn.Query_sync("COMMIT")
//-- conn.MustExec("COMMIT", nil)
conn.Commit()
} else {
// conn.Query_sync("ROLLBACK")
//-- conn.MustExec("ROLLBACK", nil)
conn.Rollback()
}
// << flattened continuation for conn.query:503:6
// conn.release()
conn.Release()
// console.Log((err ? err + ", therefore rolled back unit ": "committed unit ") + objUnit.unit)
if err == nil {
// console.Log("committed unit " + objUnit.unit)
console.Log("committed unit %s", objUnit.Unit)
} else {
// console.Log(err + ", therefore rolled back unit " + objUnit.unit)
console.Log("%s, therefore rolled back unit %s", err, objUnit.Unit)
}
profiler.Stop("write-commit")
profiler.Increment()
// if err {
if err != nil {
/* await */
storage.ResetUnstableUnits_sync()
// << flattened continuation for storage.resetUnstableUnits:509:8
unlock()
// >> flattened continuation for storage.resetUnstableUnits:509:8
} else {
unlock()
}
// if ! err {
if err == nil {
eventBus.Emit(fmt.Sprintf("saved_unit-%s", objUnit.Unit), objJoint)
}
// if onDone {
if true {
// :: flattened return for onDone(err);
return err
}
count_writes++
if conf.Storage == "sqlite" {
updateSqliteStats()
}
// >> flattened continuation for conn.query:503:6
// >> flattened continuation for async.series:501:5
// >> flattened continuation for async.series:481:4
// >> flattened continuation for addInlinePaymentQueries:480:3
// >> flattened continuation for mutex.lock:477:2
// >> flattened continuation for db.takeConnectionFromPool:24:1
return nil
}
func updateSqliteStats() {
}
const _readCountOfAnalyzedUnits_sync = `
func readCountOfAnalyzedUnits_sync() {
if count_units_in_prev_analyze {
// :: flattened return for return handleCount(count_units_in_prev_analyze);
// ** need 0 return(s) instead of 1
return count_units_in_prev_analyze
}
rows := /* await */
db.Query_sync("SELECT * FROM sqlite_master WHERE type='table' AND name='sqlite_stat1'")
// << flattened continuation for db.query:531:1
if len(rows) == 0 {
// :: flattened return for return handleCount(0);
// ** need 0 return(s) instead of 1
return 0
}
// rows := /* await */
rows = /* await */
db.Query_sync("SELECT stat FROM sqlite_stat1 WHERE tbl='units' AND idx='sqlite_autoindex_units_1'")
// << flattened continuation for db.query:534:2
if len(rows) != 1 {
console.Log("no stat for sqlite_autoindex_units_1")
// :: flattened return for return handleCount(0);
// ** need 0 return(s) instead of 1
return 0
}
// :: flattened return for handleCount(parseInt(rows[0].stat.split(' ')[0]));
// ** need 0 return(s) instead of 1
return parseInt(rows[0].stat.split(" ")[0])
// >> flattened continuation for db.query:534:2
// >> flattened continuation for db.query:531:1
}
start_time := 0
prev_time := 0
// update stats for query planner
func updateSqliteStats() {
if count_writes == 1 {
start_time = Date.now()
prev_time = Date.now()
}
if count_writes % 100 != 0 {
return
}
if count_writes % 1000 == 0 {
total_time := Date.now() - start_time / 1000
recent_time := Date.now() - prev_time / 1000
recent_tps := 1000 / recent_time
avg_tps := count_writes / total_time
prev_time = Date.now()
}
rows := /* await */
db.query_sync("SELECT MAX(rowid) AS count_units FROM units")
// << flattened continuation for db.query:562:1
count_units := rows[0].count_units
if count_units > 500000 {
// the db is too big
return
}
count_analyzed_units := /* await * /
readCountOfAnalyzedUnits_sync()
// << flattened continuation for readCountOfAnalyzedUnits:566:2
console.Log("count analyzed units: %d", count_analyzed_units)
if count_units < 2 * count_analyzed_units {
return
}
count_units_in_prev_analyze = count_units
console.Log("will update sqlite stats")
/* await * /
db.query_sync("ANALYZE")
// << flattened continuation for db.query:572:3
/* await * /
db.query_sync("ANALYZE sqlite_master")
// << flattened continuation for db.query:573:4
console.Log("sqlite stats updated")
// >> flattened continuation for db.query:573:4
// >> flattened continuation for db.query:572:3
// >> flattened continuation for readCountOfAnalyzedUnits:566:2
// >> flattened continuation for db.query:562:1
}
`
func throwError(msg string) {
// if typeof window == "undefined" {
if true {
_core.Throw(msg)
} else {
// eventBus.emit("nonfatal_error", msg, [*NewExpression*])
eventBus.Emit("nonfatal_error", msg, nil)
}
}
//exports.saveJoint = saveJoint
// converted golang end
|
/////////////////////////////////////////////////////////////////////////////
// Name: src/osx/carbon/font.cpp
// Purpose: wxFont class
// Author: <NAME>
// Modified by:
// Created: 1998-01-01
// Copyright: (c) <NAME>
// Licence: wxWindows licence
/////////////////////////////////////////////////////////////////////////////
#include "wx/wxprec.h"
#include "wx/font.h"
#ifndef WX_PRECOMP
#include "wx/string.h"
#include "wx/utils.h"
#include "wx/intl.h"
#include "wx/gdicmn.h"
#include "wx/log.h"
#include "wx/math.h"
#endif
#include "wx/fontutil.h"
#include "wx/graphics.h"
#include "wx/settings.h"
#include "wx/tokenzr.h"
#include "wx/osx/private.h"
#include "wx/osx/private/available.h"
#include <map>
#include <string>
#include <float.h> // for FLT_MAX
#define TRACE_CTFONT "ctfont"
class WXDLLEXPORT wxFontRefData : public wxGDIRefData
{
public:
wxFontRefData(const wxFontInfo& info = wxFontInfo());
wxFontRefData(const wxFontRefData& data);
wxFontRefData(const wxNativeFontInfo& info)
: m_info(info)
{
}
wxFontRefData(CTFontRef font);
double GetFractionalPointSize() const { return m_info.GetFractionalPointSize(); }
wxFontFamily GetFamily() const { return m_info.GetFamily(); }
wxFontStyle GetStyle() const { return m_info.GetStyle(); }
int GetNumericWeight() const { return m_info.GetNumericWeight(); }
bool GetUnderlined() const { return m_info.GetUnderlined(); }
bool GetStrikethrough() const { return m_info.GetStrikethrough(); }
wxString GetFaceName() const { return m_info.GetFaceName(); }
wxFontEncoding GetEncoding() const { return m_info.GetEncoding(); }
bool IsFixedWidth() const;
CTFontRef OSXGetCTFont() const;
CFDictionaryRef OSXGetCTFontAttributes() const;
CGFontRef OSXGetCGFont() const;
const wxNativeFontInfo& GetNativeFontInfo() const;
void SetFractionalPointSize(double size)
{
if (GetFractionalPointSize() != size)
{
m_info.SetFractionalPointSize(size);
Free();
}
}
void SetFamily(wxFontFamily family)
{
if (m_info.GetFamily() != family)
{
m_info.SetFamily(family);
Free();
}
}
void SetStyle(wxFontStyle style)
{
if (m_info.GetStyle() != style)
{
m_info.SetStyle(style);
Free();
}
}
void SetNumericWeight(int weight)
{
if (m_info.GetNumericWeight() != weight)
{
m_info.SetNumericWeight(weight);
Free();
}
}
void SetStrikethrough(bool s)
{
if (m_info.GetStrikethrough() != s)
{
m_info.SetStrikethrough(s);
Free();
}
}
void SetUnderlined(bool u)
{
if (m_info.GetUnderlined() != u)
{
m_info.SetUnderlined(u);
Free();
}
}
void SetFaceName(const wxString& facename)
{
if (m_info.GetFaceName() != facename)
{
m_info.SetFaceName(facename);
Free();
}
}
void SetEncoding(wxFontEncoding encoding)
{
if (m_info.GetEncoding() != encoding)
{
m_info.SetEncoding(encoding);
Free();
}
}
void Free();
void Alloc();
protected:
void SetFont(CTFontRef font);
void AllocIfNeeded() const;
wxCFRef<CTFontRef> m_ctFont;
wxCFMutableDictionaryRef m_ctFontAttributes;
wxCFRef<CGFontRef> m_cgFont;
wxNativeFontInfo m_info;
};
// ============================================================================
// implementation
// ============================================================================
namespace
{
const int kCTWeightsCount = 12;
static CGFloat gCTWeights[kCTWeightsCount] = {
-1.000, // 0
-0.800, // 100, NSFontWeightUltraLight
-0.600, // 200, NSFontWeightThin
-0.400, // 300, NSFontWeightLight
0.000, // 400, NSFontWeightRegular
0.230, // 500, NSFontWeightMedium
0.300, // 600, NSFontWeightSemibold
0.400, // 700, NSFontWeightBold
0.560, // 800, NSFontWeightHeavy
0.620, // 900, NSFontWeightBlack
0.750, // 1000
};
int CTWeightToWX(CGFloat weight)
{
for (int i = 0; i < kCTWeightsCount - 1; ++i)
{
if ( (weight - gCTWeights[i]) < (gCTWeights[i+1]-weight) )
return i * 100;
}
return 1000;
}
CGFloat WXWeightToCT(int w)
{
if (w < 0)
w = 0;
else if (w > 1000)
w = 1000;
return gCTWeights[w / 100];
}
wxString FamilyToFaceName(wxFontFamily family)
{
wxString faceName;
switch (family)
{
case wxFONTFAMILY_DEFAULT:
faceName = wxT("Lucida Grande");
break;
case wxFONTFAMILY_SCRIPT:
case wxFONTFAMILY_ROMAN:
case wxFONTFAMILY_DECORATIVE:
faceName = wxT("Times");
break;
case wxFONTFAMILY_SWISS:
faceName = wxT("Helvetica");
break;
case wxFONTFAMILY_MODERN:
case wxFONTFAMILY_TELETYPE:
faceName = wxT("Courier");
break;
default:
faceName = wxT("Times");
break;
}
return faceName;
}
} // anonymous namespace
// ----------------------------------------------------------------------------
// wxFontRefData
// ----------------------------------------------------------------------------
#define M_FONTDATA ((wxFontRefData*)m_refData)
wxFontRefData::wxFontRefData(const wxFontRefData& data)
: wxGDIRefData()
, m_info(data.m_info)
{
m_ctFont = data.m_ctFont;
m_ctFontAttributes = data.m_ctFontAttributes;
m_cgFont = data.m_cgFont;
}
wxFontRefData::wxFontRefData(const wxFontInfo& info)
{
m_info.Init();
if ( info.HasFaceName() )
SetFaceName(info.GetFaceName());
else
SetFamily(info.GetFamily());
m_info.SetSizeOrDefault(info.GetFractionalPointSize());
SetNumericWeight(info.GetNumericWeight());
SetStyle(info.GetStyle());
SetUnderlined(info.IsUnderlined());
SetStrikethrough(info.IsStrikethrough());
SetEncoding(info.GetEncoding());
}
void wxFontRefData::Free()
{
m_ctFont.reset();
m_ctFontAttributes.reset();
m_cgFont.reset();
}
wxFontRefData::wxFontRefData(CTFontRef font)
{
SetFont(font);
m_info.InitFromFont(font);
}
void wxFontRefData::SetFont(CTFontRef font)
{
m_ctFont.reset(wxCFRetain(font));
wxCFMutableDictionaryRef dict;
dict.SetValue(kCTFontAttributeName, m_ctFont.get());
dict.SetValue(kCTForegroundColorFromContextAttributeName, kCFBooleanTrue);
m_ctFontAttributes = dict;
m_cgFont = CTFontCopyGraphicsFont(m_ctFont, NULL);
}
static const CGAffineTransform kSlantTransform = CGAffineTransformMake(1, 0, tan(wxDegToRad(11)), 1, 0, 0);
namespace
{
struct CachedFontEntry
{
CachedFontEntry()
{
used = false;
}
wxCFRef<CTFontRef> font;
wxCFMutableDictionaryRef fontAttributes;
wxCFRef<CGFontRef> cgFont;
bool used;
};
} // anonymous namespace
void wxFontRefData::AllocIfNeeded() const
{
if (!m_ctFont)
const_cast<wxFontRefData *>(this)->Alloc();
}
void wxFontRefData::Alloc()
{
wxCHECK_RET(m_info.GetPointSize() > 0, wxT("Point size should not be zero."));
// use font caching, we cache a font with a certain size and a font with just any size for faster creation
wxString lookupnameNoSize = wxString::Format("%s_%d_%d", m_info.GetPostScriptName(), (int)m_info.GetStyle(), m_info.GetNumericWeight());
wxString lookupnameWithSize = wxString::Format("%s_%d_%d_%.2f", m_info.GetPostScriptName(), (int)m_info.GetStyle(), m_info.GetNumericWeight(), m_info.GetFractionalPointSize());
static std::map<wxString, CachedFontEntry> fontcache;
CachedFontEntry& entryWithSize = fontcache[lookupnameWithSize];
if (entryWithSize.used)
{
m_ctFont = entryWithSize.font;
m_ctFontAttributes = entryWithSize.fontAttributes;
}
else
{
CachedFontEntry& entryNoSize = fontcache[lookupnameNoSize];
if ( entryNoSize.used )
{
m_ctFont = CTFontCreateCopyWithAttributes(entryNoSize.font, m_info.GetPointSize(), NULL, NULL);
m_ctFontAttributes = entryNoSize.fontAttributes.CreateCopy();
m_ctFontAttributes.SetValue(kCTFontAttributeName,m_ctFont.get());
m_cgFont = CTFontCopyGraphicsFont(m_ctFont, NULL);
entryWithSize.font = m_ctFont;
entryWithSize.cgFont = m_cgFont;
entryWithSize.fontAttributes = m_ctFontAttributes;
entryWithSize.used = true;
}
else
{
// emulate slant if necessary, the font descriptor itself carries that information,
// while the weight can only be determined properly from the generated font itself
const CGAffineTransform* remainingTransform = NULL;
if ( m_info.GetStyle() != wxFONTSTYLE_NORMAL && m_info.GetCTSlant(m_info.GetCTFontDescriptor()) < 0.01 )
remainingTransform = &kSlantTransform;
wxCFRef<CTFontRef> font = CTFontCreateWithFontDescriptor(m_info.GetCTFontDescriptor(), m_info.GetPointSize(), remainingTransform);
// emulate weigth if necessary
int difference = m_info.GetNumericWeight() - CTWeightToWX(wxNativeFontInfo::GetCTWeight(font));
SetFont(font); // Sets m_ctFont, m_ctFontAttributes, m_cgFont
if ( difference != 0 )
{
if ( difference > 0 )
{
// TODO: find better heuristics to determine target stroke width
CGFloat width = 0;
width = -1.0 * (1 + (difference / 100));
m_ctFontAttributes.SetValue(kCTStrokeWidthAttributeName, width);
}
else
{
// we cannot emulate lighter fonts
}
}
entryWithSize.font = m_ctFont;
entryWithSize.cgFont = m_cgFont;
entryWithSize.fontAttributes = m_ctFontAttributes;
entryWithSize.used = true;
entryNoSize.font = m_ctFont;
entryNoSize.fontAttributes = m_ctFontAttributes;
// no reason to copy cgFont as will have to be regenerated anyway
entryNoSize.used = true;
}
}
m_cgFont.reset(CTFontCopyGraphicsFont(m_ctFont, NULL));
}
bool wxFontRefData::IsFixedWidth() const
{
CTFontSymbolicTraits traits = CTFontGetSymbolicTraits(m_ctFont);
return (traits & kCTFontMonoSpaceTrait) != 0;
}
CTFontRef wxFontRefData::OSXGetCTFont() const
{
AllocIfNeeded();
return m_ctFont;
}
CFDictionaryRef wxFontRefData::OSXGetCTFontAttributes() const
{
AllocIfNeeded();
return m_ctFontAttributes;
}
CGFontRef wxFontRefData::OSXGetCGFont() const
{
AllocIfNeeded();
return m_cgFont;
}
const wxNativeFontInfo& wxFontRefData::GetNativeFontInfo() const
{
AllocIfNeeded();
return m_info;
}
// ----------------------------------------------------------------------------
// wxFont
// ----------------------------------------------------------------------------
bool wxFont::Create(const wxNativeFontInfo& info)
{
UnRef();
m_refData = new wxFontRefData(info);
RealizeResource();
return true;
}
wxFont::wxFont(wxOSXSystemFont font)
{
wxASSERT(font != wxOSX_SYSTEM_FONT_NONE);
CTFontUIFontType uifont = kCTFontSystemFontType;
switch (font)
{
case wxOSX_SYSTEM_FONT_NORMAL:
uifont = kCTFontSystemFontType;
break;
case wxOSX_SYSTEM_FONT_BOLD:
uifont = kCTFontEmphasizedSystemFontType;
break;
case wxOSX_SYSTEM_FONT_SMALL:
uifont = kCTFontSmallSystemFontType;
break;
case wxOSX_SYSTEM_FONT_SMALL_BOLD:
uifont = kCTFontSmallEmphasizedSystemFontType;
break;
case wxOSX_SYSTEM_FONT_MINI:
uifont = kCTFontMiniSystemFontType;
break;
case wxOSX_SYSTEM_FONT_MINI_BOLD:
uifont = kCTFontMiniEmphasizedSystemFontType;
break;
case wxOSX_SYSTEM_FONT_LABELS:
uifont = kCTFontLabelFontType;
break;
case wxOSX_SYSTEM_FONT_VIEWS:
uifont = kCTFontViewsFontType;
break;
case wxOSX_SYSTEM_FONT_FIXED:
uifont = kCTFontUIFontUserFixedPitch;
break;
default:
break;
}
wxCFRef<CTFontRef> ctfont(CTFontCreateUIFontForLanguage(uifont, 0.0, NULL));
m_refData = new wxFontRefData(ctfont);
}
#if wxOSX_USE_COCOA
wxFont::wxFont(WX_NSFont nsfont)
{
m_refData = new wxFontRefData((CTFontRef)nsfont);
}
#endif
wxFont::wxFont(CTFontRef font)
{
m_refData = new wxFontRefData(font);
}
wxFont::wxFont(const wxString& fontdesc)
{
wxNativeFontInfo info;
if (info.FromString(fontdesc))
(void)Create(info);
}
wxFont::wxFont(const wxFontInfo& info)
{
m_refData = new wxFontRefData(info);
if ( info.IsUsingSizeInPixels() )
SetPixelSize(info.GetPixelSize());
}
wxFont::wxFont(int size,
int family,
int style,
int weight,
bool underlined,
const wxString& face,
wxFontEncoding encoding)
{
(void)Create(size, (wxFontFamily)family, (wxFontStyle)style,
(wxFontWeight)weight, underlined, face, encoding);
}
bool wxFont::Create(int pointSize,
wxFontFamily family,
wxFontStyle style,
wxFontWeight weight,
bool underlined,
const wxString& faceName,
wxFontEncoding encoding)
{
m_refData = new wxFontRefData(InfoFromLegacyParams(pointSize, family,
style, weight, underlined,
faceName, encoding));
return true;
}
wxFont::~wxFont()
{
}
void wxFont::DoSetNativeFontInfo(const wxNativeFontInfo& info)
{
UnRef();
m_refData = new wxFontRefData(info);
}
bool wxFont::RealizeResource()
{
return OSXGetCTFont();
}
void wxFont::SetEncoding(wxFontEncoding encoding)
{
AllocExclusive();
M_FONTDATA->SetEncoding(encoding);
}
wxGDIRefData* wxFont::CreateGDIRefData() const
{
return new wxFontRefData;
}
wxGDIRefData* wxFont::CloneGDIRefData(const wxGDIRefData* data) const
{
return new wxFontRefData(*static_cast<const wxFontRefData*>(data));
}
void wxFont::SetFractionalPointSize(double pointSize)
{
AllocExclusive();
M_FONTDATA->SetFractionalPointSize(pointSize);
}
void wxFont::SetFamily(wxFontFamily family)
{
AllocExclusive();
M_FONTDATA->SetFamily(family);
}
void wxFont::SetStyle(wxFontStyle style)
{
AllocExclusive();
M_FONTDATA->SetStyle(style);
}
void wxFont::SetNumericWeight(int weight)
{
AllocExclusive();
M_FONTDATA->SetNumericWeight(weight);
}
bool wxFont::SetFaceName(const wxString& faceName)
{
AllocExclusive();
M_FONTDATA->SetFaceName(faceName);
return wxFontBase::SetFaceName(faceName);
}
void wxFont::SetUnderlined(bool underlined)
{
AllocExclusive();
M_FONTDATA->SetUnderlined(underlined);
}
void wxFont::SetStrikethrough(bool strikethrough)
{
AllocExclusive();
M_FONTDATA->SetStrikethrough(strikethrough);
}
// ----------------------------------------------------------------------------
// accessors
// ----------------------------------------------------------------------------
// TODO: insert checks everywhere for M_FONTDATA == NULL!
double wxFont::GetFractionalPointSize() const
{
wxCHECK_MSG(IsOk(), 0, wxT("invalid font"));
return M_FONTDATA->GetFractionalPointSize();
}
wxSize wxFont::GetPixelSize() const
{
#if wxUSE_GRAPHICS_CONTEXT
// TODO: consider caching the value
wxGraphicsContext* dc = wxGraphicsContext::CreateFromNative((CGContextRef)NULL);
dc->SetFont(*this, *wxBLACK);
wxDouble width, height = 0;
dc->GetTextExtent(wxT("g"), &width, &height, NULL, NULL);
delete dc;
return wxSize((int)width, (int)height);
#else
return wxFontBase::GetPixelSize();
#endif
}
bool wxFont::IsFixedWidth() const
{
wxCHECK_MSG(IsOk(), false, wxT("invalid font"));
// cast away constness otherwise lazy font resolution is not possible
const_cast<wxFont*>(this)->RealizeResource();
return M_FONTDATA->IsFixedWidth();
}
wxFontFamily wxFont::DoGetFamily() const
{
return M_FONTDATA->GetFamily();
}
wxFontStyle wxFont::GetStyle() const
{
wxCHECK_MSG(IsOk(), wxFONTSTYLE_MAX, wxT("invalid font"));
return M_FONTDATA->GetStyle();
}
int wxFont::GetNumericWeight() const
{
wxCHECK_MSG(IsOk(), wxFONTWEIGHT_MAX, wxT("invalid font"));
return M_FONTDATA->GetNumericWeight();
}
bool wxFont::GetUnderlined() const
{
wxCHECK_MSG(IsOk(), false, wxT("invalid font"));
return M_FONTDATA->GetUnderlined();
}
bool wxFont::GetStrikethrough() const
{
wxCHECK_MSG(IsOk(), false, wxT("invalid font"));
return M_FONTDATA->GetStrikethrough();
}
wxString wxFont::GetFaceName() const
{
wxCHECK_MSG(IsOk(), wxEmptyString, wxT("invalid font"));
return M_FONTDATA->GetFaceName();
}
wxFontEncoding wxFont::GetEncoding() const
{
wxCHECK_MSG(IsOk(), wxFONTENCODING_DEFAULT, wxT("invalid font"));
return M_FONTDATA->GetEncoding();
}
CTFontRef wxFont::OSXGetCTFont() const
{
wxCHECK_MSG(IsOk(), 0, wxT("invalid font"));
return M_FONTDATA->OSXGetCTFont();
}
CFDictionaryRef wxFont::OSXGetCTFontAttributes() const
{
wxCHECK_MSG(IsOk(), 0, wxT("invalid font"));
return M_FONTDATA->OSXGetCTFontAttributes();
}
#if wxOSX_USE_COCOA_OR_CARBON
CGFontRef wxFont::OSXGetCGFont() const
{
wxCHECK_MSG(IsOk(), 0, wxT("invalid font"));
return M_FONTDATA->OSXGetCGFont();
}
#endif
const wxNativeFontInfo* wxFont::GetNativeFontInfo() const
{
return IsOk() ? &(M_FONTDATA->GetNativeFontInfo()) : NULL;
}
// ----------------------------------------------------------------------------
// wxNativeFontInfo
// ----------------------------------------------------------------------------
/* from Core Text Manual Common Operations */
void wxNativeFontInfo::Init()
{
m_descriptor.reset();
m_underlined = false;
m_strikethrough = false;
m_encoding = wxFONTENCODING_UTF8;
m_ctWeight = 0.0;
m_ctWidth = 0.0;
m_style = wxFONTSTYLE_NORMAL;
m_ctSize = 0.0;
m_family = wxFONTFAMILY_DEFAULT;
m_familyName.clear();
m_postScriptName.clear();
}
void wxNativeFontInfo::Init(const wxNativeFontInfo& info)
{
Init();
m_descriptor = info.m_descriptor;
m_underlined = info.m_underlined;
m_strikethrough = info.m_strikethrough;
m_encoding = info.m_encoding;
m_ctWeight = info.m_ctWeight;
m_ctWidth = info.m_ctWidth;
m_style = info.m_style;
m_ctSize = info.m_ctSize;
m_family = info.m_family;
m_familyName = info.m_familyName;
m_postScriptName = info.m_postScriptName;
}
void wxNativeFontInfo::InitFromFont(CTFontRef font)
{
Init();
wxCFRef<CTFontDescriptorRef> desc(CTFontCopyFontDescriptor(font));
InitFromFontDescriptor( desc );
}
void wxNativeFontInfo::InitFromFontDescriptor(CTFontDescriptorRef desc)
{
Init();
m_descriptor.reset(wxCFRetain(desc));
m_ctWeight = GetCTWeight(desc);
m_ctWidth = GetCTwidth(desc);
m_style = GetCTSlant(desc) > 0.01 ? wxFONTSTYLE_ITALIC : wxFONTSTYLE_NORMAL;
wxCFTypeRef(CTFontDescriptorCopyAttribute(desc, kCTFontSizeAttribute)).GetValue(m_ctSize, CGFloat(0.0));
// determine approximate family
CTFontSymbolicTraits symbolicTraits;
wxCFDictionaryRef traits((CFDictionaryRef)CTFontDescriptorCopyAttribute(desc, kCTFontTraitsAttribute));
traits.GetValue(kCTFontSymbolicTrait).GetValue((int32_t*)&symbolicTraits, 0);
if (symbolicTraits & kCTFontMonoSpaceTrait)
m_family = wxFONTFAMILY_TELETYPE;
else
{
uint32_t stylisticClass = symbolicTraits & kCTFontClassMaskTrait;
if (stylisticClass == kCTFontSansSerifClass)
m_family = wxFONTFAMILY_SWISS;
else if (stylisticClass == kCTFontScriptsClass)
m_family = wxFONTFAMILY_SCRIPT;
else if (stylisticClass == kCTFontOrnamentalsClass)
m_family = wxFONTFAMILY_DECORATIVE;
else if (stylisticClass == kCTFontSymbolicClass)
m_family = wxFONTFAMILY_DECORATIVE;
else
m_family = wxFONTFAMILY_ROMAN;
}
wxCFTypeRef(CTFontDescriptorCopyAttribute(m_descriptor, kCTFontFamilyNameAttribute)).GetValue(m_familyName);
}
void wxNativeFontInfo::Free()
{
m_descriptor.reset();
}
CTFontDescriptorRef wxNativeFontInfo::GetCTFontDescriptor() const
{
if ( !m_descriptor )
const_cast<wxNativeFontInfo *>(this)->CreateCTFontDescriptor();
return m_descriptor;
}
void wxNativeFontInfo::RealizeResource() const
{
(void) GetCTFontDescriptor();
}
void wxNativeFontInfo::CreateCTFontDescriptor()
{
CTFontDescriptorRef descriptor = NULL;
wxCFMutableDictionaryRef attributes;
// build all attributes that define our font.
if ( m_postScriptName.empty() )
{
wxString fontname = m_familyName;
if ( fontname.empty() )
fontname = FamilyToFaceName(m_family);
CFDictionaryAddValue(attributes, kCTFontFamilyNameAttribute, wxCFStringRef(fontname));
}
else
{
CFDictionaryAddValue(attributes, kCTFontNameAttribute, wxCFStringRef(m_postScriptName));
}
wxCFMutableDictionaryRef traits;
if ( m_style != wxFONTSTYLE_NORMAL )
traits.SetValue(kCTFontSymbolicTrait, kCTFontItalicTrait);
traits.SetValue(kCTFontWeightTrait,m_ctWeight);
traits.SetValue(kCTFontWidthTrait,m_ctWidth);
attributes.SetValue(kCTFontTraitsAttribute,traits.get());
attributes.SetValue(kCTFontSizeAttribute, m_ctSize);
// Create the font descriptor with our attributes
descriptor = CTFontDescriptorCreateWithAttributes(attributes);
wxASSERT(descriptor != NULL);
m_descriptor = descriptor;
wxCFTypeRef(CTFontDescriptorCopyAttribute(m_descriptor, kCTFontFamilyNameAttribute)).GetValue(m_familyName);
#if wxDEBUG_LEVEL >= 2
// for debugging: show all different font names
wxCFRef<CTFontRef> font = CTFontCreateWithFontDescriptor(m_descriptor, 12, NULL);
wxString familyname;
wxCFTypeRef(CTFontDescriptorCopyAttribute(m_descriptor, kCTFontFamilyNameAttribute)).GetValue(familyname);
wxLogTrace(TRACE_CTFONT,"****** CreateCTFontDescriptor ******");
wxLogTrace(TRACE_CTFONT,"Descriptor FontFamilyName: %s",familyname.c_str());
wxString name;
wxCFTypeRef(CTFontDescriptorCopyAttribute(m_descriptor, kCTFontNameAttribute)).GetValue(name);
wxLogTrace(TRACE_CTFONT,"Descriptor FontName: %s",name.c_str());
wxString display;
wxCFTypeRef(CTFontDescriptorCopyAttribute(m_descriptor, kCTFontDisplayNameAttribute)).GetValue(display);
wxLogTrace(TRACE_CTFONT,"Descriptor DisplayName: %s",display.c_str());
wxString style;
wxCFTypeRef(CTFontDescriptorCopyAttribute(m_descriptor, kCTFontStyleNameAttribute)).GetValue(style);
wxLogTrace(TRACE_CTFONT,"Descriptor StyleName: %s",style.c_str());
wxString psname;
wxCFTypeRef(CTFontCopyPostScriptName(font)).GetValue(psname);
wxLogTrace(TRACE_CTFONT,"Created Font PostScriptName: %s",psname.c_str());
wxString fullname;
wxCFTypeRef(CTFontCopyFullName(font)).GetValue(fullname);
wxLogTrace(TRACE_CTFONT,"Created Font FullName: %s",fullname.c_str());
wxLogTrace(TRACE_CTFONT,"************************************");
#endif
}
// Core Text Helpers
CGFloat wxNativeFontInfo::GetCTWeight(CTFontRef font)
{
CGFloat weight;
CFTypeRef fonttraitstype = CTFontCopyAttribute(font, kCTFontTraitsAttribute);
wxCFDictionaryRef traits((CFDictionaryRef)fonttraitstype);
traits.GetValue(kCTFontWeightTrait).GetValue(&weight, CGFloat(0.0));
return weight;
}
CGFloat wxNativeFontInfo::GetCTWeight(CTFontDescriptorRef descr)
{
CGFloat weight;
CFTypeRef fonttraitstype = CTFontDescriptorCopyAttribute(descr, kCTFontTraitsAttribute);
wxCFDictionaryRef traits((CFDictionaryRef)fonttraitstype);
traits.GetValue(kCTFontWeightTrait).GetValue(&weight, CGFloat(0.0));
return weight;
}
CGFloat wxNativeFontInfo::GetCTwidth(CTFontDescriptorRef descr)
{
CGFloat weight;
CFTypeRef fonttraitstype = CTFontDescriptorCopyAttribute(descr, kCTFontTraitsAttribute);
wxCFDictionaryRef traits((CFDictionaryRef)fonttraitstype);
traits.GetValue(kCTFontWidthTrait).GetValue(&weight, CGFloat(0.0));
return weight;
}
CGFloat wxNativeFontInfo::GetCTSlant(CTFontDescriptorRef descr)
{
CGFloat slant;
CFTypeRef fonttraitstype = CTFontDescriptorCopyAttribute(descr, kCTFontTraitsAttribute);
wxCFDictionaryRef traits((CFDictionaryRef)fonttraitstype);
traits.GetValue(kCTFontSlantTrait).GetValue(&slant, CGFloat(0.0));
return slant;
}
// recipe taken from
// https://developer.apple.com/library/archive/documentation/StringsTextFonts/Conceptual/CoreText_Programming/FontOperations/FontOperations.html
// common prefix of plist serializiation, gets removed and readded
static const wxString& GetPListPrefix()
{
static const wxString s_plistPrefix = "<?xml version=\"1.0\" encoding=\"UTF-8\"?><!DOCTYPE plist PUBLIC "
"\"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">";
return s_plistPrefix;
}
bool wxNativeFontInfo::FromString(const wxString& s)
{
double d;
long l, version;
Init();
wxStringTokenizer tokenizer(s, wxT(";"));
wxString token = tokenizer.GetNextToken();
if ( !token.ToLong(&l) )
return false;
version = l;
if ( version == 0 || version == 1 )
{
token = tokenizer.GetNextToken();
if ( !token.ToCDouble(&d) )
return false;
if ( d < 0 )
return false;
#ifdef __LP64__
// CGFloat is just double in this case.
m_ctSize = d;
#else // !__LP64__
if ( d > FLT_MAX )
return false;
m_ctSize = static_cast<CGFloat>(d);
#endif // __LP64__/!__LP64__
token = tokenizer.GetNextToken();
if ( !token.ToLong(&l) )
return false;
m_family = (wxFontFamily)l;
token = tokenizer.GetNextToken();
if ( !token.ToLong(&l) )
return false;
m_style = (wxFontStyle)l;
token = tokenizer.GetNextToken();
if ( !token.ToLong(&l) )
return false;
m_ctWeight = WXWeightToCT(wxFont::ConvertFromLegacyWeightIfNecessary(l));
token = tokenizer.GetNextToken();
if ( !token.ToLong(&l) )
return false;
m_underlined = l != 0;
if ( version == 0L )
{
m_strikethrough = false;
}
else
{
token = tokenizer.GetNextToken();
if ( !token.ToLong(&l) )
return false;
m_strikethrough = l != 0;
}
// this works correctly via fallback even if this is (backwards compatibility) a font family name
SetPostScriptName( tokenizer.GetNextToken() );
RealizeResource();
#ifndef __WXMAC__
if( !m_familyName )
return false;
#endif
token = tokenizer.GetNextToken();
if ( !token.ToLong(&l) )
return false;
m_encoding = (wxFontEncoding)l;
return true;
}
else if ( version == 2 )
{
token = tokenizer.GetNextToken();
if ( !token.ToLong(&l) )
return false;
bool underlined = l != 0;
token = tokenizer.GetNextToken();
if ( !token.ToLong(&l) )
return false;
bool strikethrough = l != 0;
token = tokenizer.GetNextToken();
if ( !token.ToLong(&l) )
return false;
wxFontEncoding encoding = (wxFontEncoding)l;
wxString xml = tokenizer.GetString();
xml = GetPListPrefix()+xml;
wxCFStringRef plist(xml);
wxCFDataRef listData(CFStringCreateExternalRepresentation(kCFAllocatorDefault,plist,kCFStringEncodingUTF8,0));
wxCFDictionaryRef attributes((CFDictionaryRef) CFPropertyListCreateWithData(kCFAllocatorDefault, listData, 0, NULL, NULL));
CTFontDescriptorRef descriptor = NULL;
if (attributes != NULL)
descriptor = CTFontDescriptorCreateWithAttributes(attributes);
if (descriptor != NULL)
{
InitFromFontDescriptor(descriptor);
CFRelease(descriptor);
m_underlined = underlined;
m_strikethrough = strikethrough;
m_encoding = encoding;
return true;
}
}
return false;
}
wxString wxNativeFontInfo::ToString() const
{
wxString s;
// version 2 is a streamed property list of the font descriptor as recommended by Apple
// prefixed by the attributes that are non-native to the native font ref like underline, strikethrough etc.
wxCFDictionaryRef attributes(CTFontDescriptorCopyAttributes(GetCTFontDescriptor()));
if (attributes != NULL)
{
CFPropertyListFormat format = kCFPropertyListXMLFormat_v1_0;
if (CFPropertyListIsValid(attributes, format))
{
wxCFDataRef listData(CFPropertyListCreateData(kCFAllocatorDefault, attributes, format, 0, NULL));
wxCFStringRef cfString( CFStringCreateFromExternalRepresentation( kCFAllocatorDefault, listData, kCFStringEncodingUTF8) );
wxString xml = cfString.AsString();
xml.Replace("\r",wxEmptyString,true);
xml.Replace("\t",wxEmptyString,true);
xml = xml.Mid(xml.Find("<plist"));
s.Printf("%d;%d;%d;%d;%s",
2, // version
GetUnderlined(),
GetStrikethrough(),
(int)GetEncoding(),
xml);
}
}
if ( s.empty() )
{
// fallback to version 1
s.Printf(wxT("%d;%s;%d;%d;%d;%d;%d;%s;%d"),
1, // version
wxString::FromCDouble(GetFractionalPointSize()),
GetFamily(),
(int)GetStyle(),
GetNumericWeight(),
GetUnderlined(),
GetStrikethrough(),
GetPostScriptName().GetData(),
(int)GetEncoding());
}
return s;
}
double wxNativeFontInfo::GetFractionalPointSize() const
{
return m_ctSize;
}
wxFontStyle wxNativeFontInfo::GetStyle() const
{
return m_style;
}
int wxNativeFontInfo::GetNumericWeight() const
{
return CTWeightToWX(m_ctWeight /* GetCTWeight(m_descriptor)*/);
}
bool wxNativeFontInfo::GetUnderlined() const
{
return m_underlined;
}
wxString wxNativeFontInfo::GetPostScriptName() const
{
// return user-set PostScript name as-is
if ( !m_postScriptName.empty() )
return m_postScriptName;
// if not explicitly set, obtain it from the font descriptor
wxString ps;
wxCFTypeRef(CTFontDescriptorCopyAttribute(GetCTFontDescriptor(), kCTFontNameAttribute)).GetValue(ps);
if ( WX_IS_MACOS_AVAILABLE(10, 16) )
{
// the PostScript names reported in macOS start with a dot for System Fonts, this has to be corrected
// otherwise round-trips are not possible, resulting in a Times Fallback, therefore we replace these with
// their official PostScript Name
wxString rest;
if ( ps.StartsWith(".SFNS", &rest) )
return "SFPro" + rest;
}
return ps;
}
wxString wxNativeFontInfo::GetFaceName() const
{
return m_familyName;
}
wxFontFamily wxNativeFontInfo::GetFamily() const
{
return m_family;
}
wxFontEncoding wxNativeFontInfo::GetEncoding() const
{
return m_encoding;
}
bool wxNativeFontInfo::GetStrikethrough() const
{
return m_strikethrough;
}
// changing the font descriptor
void wxNativeFontInfo::SetFractionalPointSize(double pointsize)
{
if (GetFractionalPointSize() != pointsize)
{
m_ctSize = pointsize;
if ( m_descriptor)
{
wxCFMutableDictionaryRef attributes;
attributes.SetValue(kCTFontSizeAttribute, wxCFNumberRef((CGFloat)pointsize));
m_descriptor.reset(CTFontDescriptorCreateCopyWithAttributes(m_descriptor, attributes));
}
}
}
void wxNativeFontInfo::SetStyle(wxFontStyle style_)
{
bool formerIsItalic = GetStyle() != wxFONTSTYLE_NORMAL;
bool newIsItalic = style_ != wxFONTSTYLE_NORMAL;
m_style = style_;
if (formerIsItalic != newIsItalic)
{
if ( m_descriptor )
{
if ( m_style != wxFONTSTYLE_NORMAL )
m_descriptor = CTFontDescriptorCreateCopyWithSymbolicTraits(m_descriptor, kCTFontItalicTrait, kCTFontItalicTrait);
else
m_descriptor = CTFontDescriptorCreateCopyWithSymbolicTraits(m_descriptor, 0, kCTFontItalicTrait);
}
}
}
void wxNativeFontInfo::SetNumericWeight(int weight)
{
int formerWeight = GetNumericWeight();
if (formerWeight != weight)
{
Free();
m_ctWeight = WXWeightToCT(weight);
}
}
void wxNativeFontInfo::SetUnderlined(bool underlined)
{
m_underlined = underlined;
}
bool wxNativeFontInfo::SetFaceName(const wxString& facename)
{
if (GetFaceName() != facename)
{
Free();
m_familyName = facename;
}
return true;
}
bool wxNativeFontInfo::SetPostScriptName(const wxString& postScriptName)
{
if (m_postScriptName != postScriptName)
{
Free();
m_postScriptName = postScriptName;
}
return true;
}
void wxNativeFontInfo::SetFamily(wxFontFamily family)
{
Free();
m_familyName.clear();
m_family = family;
}
void wxNativeFontInfo::SetEncoding(wxFontEncoding encoding)
{
wxUnusedVar(encoding);
}
void wxNativeFontInfo::SetStrikethrough(bool strikethrough)
{
m_strikethrough = strikethrough;
}
|
#!/usr/bin/env bash
# This script automatically detect the EasyEffects presets directory and installs the presets
GIT_REPOSITORY="https://raw.githubusercontent.com/JackHack96/PulseEffects-Presets/master"
check_installation() {
if command -v flatpak &> /dev/null; then
if flatpak list | grep -q "com.github.wwmm.easyeffects"; then
PRESETS_DIRECTORY="$HOME/.var/app/com.github.wwmm.easyeffects/config/easyeffects"
fi
elif [ -d "$HOME/.config/easyeffects" ]; then
PRESETS_DIRECTORY="$HOME/.config/easyeffects"
else
echo "Error! Couldn't find EasyEffects presets directory!"
exit 1
fi
}
check_impulse_response_directory() {
if [ ! -d "$PRESETS_DIRECTORY/irs" ]; then
mkdir "$PRESETS_DIRECTORY/irs"
fi
}
read_choice() {
CHOICE=""
while [[ ! $CHOICE =~ ^[1-5]+$ ]]; do
read -r CHOICE
if [ "$CHOICE" -lt 1 ] || [ "$CHOICE" -gt 5 ]; then
echo "Invalid option! Please input a value between 1 and 5!"
fi
done
}
install_menu(){
echo "Please select an option for presets installation (Default=1)"
echo "1) Install all presets"
echo "2) Install Perfect EQ preset"
echo "3) Install all bass boosting presets"
echo "4) Install Advanced Auto Gain"
echo "5) Install Laptop speaker preset"
}
install_presets(){
case $CHOICE in
1) echo "Installing Bass Enhancing + Perfect EQ preset..."
curl "$GIT_REPOSITORY/Bass%20Enhancing%20%2B%20Perfect%20EQ.json" --output "$PRESETS_DIRECTORY/output/Bass Enhancing + Perfect EQ.json" --silent
echo "Installing impulse response files..."
curl "$GIT_REPOSITORY/irs/Dolby%20ATMOS%20((128K%20MP3))%201.Default.irs" --output "$PRESETS_DIRECTORY/irs/Dolby ATMOS ((128K MP3)) 1.Default.irs" --silent
curl "$GIT_REPOSITORY/irs/MaxxAudio%20Pro%20((128K%20MP3))%204.Music%20w%20MaxxSpace.irs" --output "$PRESETS_DIRECTORY/irs/MaxxAudio Pro ((128K MP3)) 4.Music w MaxxSpace.irs" --silent
curl "$GIT_REPOSITORY/irs/Razor%20Surround%20((48k%20Z-Edition))%202.Stereo%20+20%20bass.irs" --output "$PRESETS_DIRECTORY/irs/Razor Surround ((48k Z-Edition)) 2.Stereo +20 bass.irs" --silent
curl "$GIT_REPOSITORY/irs/Accudio%20((48kHz%20Z.E.))%20Earpods%20HIFI.irs" --output "$PRESETS_DIRECTORY/irs/Accudio ((48kHz Z.E.)) Earpods HIFI.irs" --silent
curl "$GIT_REPOSITORY/irs/Accudio%20((48kHz%20Z.E.))%20MDR-E9LP%20HIFI.irs" --output "$PRESETS_DIRECTORY/irs/Accudio ((48kHz Z.E.)) MDR-E9LP HIFI.irs" --silent
curl "$GIT_REPOSITORY/irs/Accudio%20((48kHz%20Z.E.))%20MDR-E9LP%20SM%20SRH940.irs" --output "$PRESETS_DIRECTORY/irs/Accudio ((48kHz Z.E.)) MDR-E9LP SM SRH940.irs" --silent
curl "$GIT_REPOSITORY/irs/Accudio%20((48kHz%20Z.E.))%20MDR-E9LP%20SM%20XBA3.irs" --output "$PRESETS_DIRECTORY/irs/Accudio ((48kHz Z.E.)) MDR-E9LP SM XBA3.irs" --silent
curl "$GIT_REPOSITORY/irs/Accudio%20((48kHz%20Z.E.))%20MDR-E9LP%20SM%20beyerT1.irs" --output "$PRESETS_DIRECTORY/irs/Accudio ((48kHz Z.E.)) MDR-E9LP SM beyerT1.irs" --silent
curl "$GIT_REPOSITORY/irs/Accudio%20((48kHz%20Z.E.))%20MDR-XB500%20HIFI.irs" --output "$PRESETS_DIRECTORY/irs/Accudio ((48kHz Z.E.)) MDR-XB500 HIFI.irs" --silent
curl "$GIT_REPOSITORY/irs/Accudio%20((48kHz%20Z.E.))%20XBA-H3%20HIFI.irs" --output "$PRESETS_DIRECTORY/irs/Accudio ((48kHz Z.E.)) XBA-H3 HIFI.irs" --silent
curl "$GIT_REPOSITORY/irs/Accudio%20((48kHz%20Z.E.))%20XBA-H3%20SM%20SRH940.irs" --output "$PRESETS_DIRECTORY/irs/Accudio ((48kHz Z.E.)) XBA-H3 SM SRH940.irs" --silent
curl "$GIT_REPOSITORY/irs/Accudio%20((48kHz%20Z.E.))%20XBA-H3%20SM%20XBA4.irs" --output "$PRESETS_DIRECTORY/irs/Accudio ((48kHz Z.E.)) XBA-H3 SM XBA4.irs" --silent
curl "$GIT_REPOSITORY/irs/Accudio%20((48kHz%20Z.E.))%20XBA-H3%20SM%20beyerT1.irs" --output "$PRESETS_DIRECTORY/irs/Accudio ((48kHz Z.E.)) XBA-H3 SM beyerT1.irs" --silent
curl "$GIT_REPOSITORY/irs/Creative%20X-Fi%20((Z-Edition))%20Crystalizer%2010%20%2B%20Expand%2010.irs" --output "$PRESETS_DIRECTORY/irs/Creative X-Fi ((Z-Edition)) Crystalizer 10 + Expand 10.irs" --silent
curl "$GIT_REPOSITORY/irs/HTC%20Beats%20Audio%20((Z-Edition)).irs" --output "$PRESETS_DIRECTORY/irs/HTC Beats Audio ((Z-Edition)).irs" --silent
curl "$GIT_REPOSITORY/irs/Waves%20MaxxAudio%20((Z-Edition))%20AudioWizard%201.Music.irs" --output "$PRESETS_DIRECTORY/irs/Waves MaxxAudio ((Z-Edition)) AudioWizard 1.Music.irs" --silent
echo "Selecting default impulse response file..."
sed -i 's/matteo/'"$USER"'/g' "$PRESETS_DIRECTORY/output/Bass Enhancing + Perfect EQ.json"
echo "Installing Boosted preset..."
curl "$GIT_REPOSITORY/Boosted.json" --output "$PRESETS_DIRECTORY/output/Boosted.json" --silent
echo "Installing Perfect EQ preset..."
curl "$GIT_REPOSITORY/Perfect%20EQ.json" --output "$PRESETS_DIRECTORY/output/Perfect EQ.json" --silent
sed -i 's/matteo/'"$USER"'/g' "$PRESETS_DIRECTORY/output/Perfect EQ.json"
curl "$GIT_REPOSITORY/Bass%20Boosted.json" --output "$PRESETS_DIRECTORY/output/Bass Boosted.json" --silent
sed -i 's/matteo/'"$USER"'/g' "$PRESETS_DIRECTORY/output/Bass Boosted.json"
echo "Installing Advanced Auto Gain..."
curl "$GIT_REPOSITORY/Advanced%20Auto%20Gain.json" --output "$PRESETS_DIRECTORY/output/Advanced Auto Gain.json" --silent
sed -i 's/matteo/'"$USER"'/g' "$PRESETS_DIRECTORY/output/Advanced Auto Gain.json"
;;
2) echo "Installing Perfect EQ preset..."
curl "$GIT_REPOSITORY/Perfect%20EQ.json" --output "$PRESETS_DIRECTORY/output/Perfect EQ.json" --silent
sed -i 's/matteo/'"$USER"'/g' "$PRESETS_DIRECTORY/output/Perfect EQ.json"
;;
3) echo "Installing Bass Enhancing + Perfect EQ preset..."
curl "$GIT_REPOSITORY/Bass%20Enhancing%20%2B%20Perfect%20EQ.json" --output "$PRESETS_DIRECTORY/output/Bass Enhancing + Perfect EQ.json" --silent
sed -i 's/matteo/'"$USER"'/g' "$PRESETS_DIRECTORY/output/Bass Enhancing + Perfect EQ.json"
echo "Installing Boosted preset..."
curl "$GIT_REPOSITORY/Boosted.json" --output "$PRESETS_DIRECTORY/output/Boosted.json" --silent
curl "$GIT_REPOSITORY/Bass%20Boosted.json" --output "$PRESETS_DIRECTORY/output/Bass Boosted.json" --silent
sed -i 's/matteo/'"$USER"'/g' "$PRESETS_DIRECTORY/output/Bass Boosted.json"
;;
4) echo "Installing Advanced Auto Gain..."
curl "$GIT_REPOSITORY/Advanced%20Auto%20Gain.json" --output "$PRESETS_DIRECTORY/output/Advanced Auto Gain.json" --silent
sed -i 's/matteo/'"$USER"'/g' "$PRESETS_DIRECTORY/output/Advanced Auto Gain.json"
;;
5) echo "Installing Laptop preset..."
curl "https://raw.githubusercontent.com/Digitalone1/EasyEffects-Presets/master/LoudnessEqualizerPE.json" --output "$PRESETS_DIRECTORY/output/Laptop.json" --silent
sed -i 's/matteo/'"$USER"'/g' "$PRESETS_DIRECTORY/output/Laptop.json"
;;
esac
}
check_installation
check_impulse_response_directory
install_menu
read_choice
install_presets
|
#!/usr/bin/env bash
#
# Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
checkPrereqs() {
command -v host > /dev/null || echo "please install host command for lookup"
command -v inlets > /dev/null || echo "please install the inlets command. For mac, simply use \`brew install inlets\`, for linux \`curl -sLS https://get.inlets.dev | sudo sh\`"
}
createOrUpdateWebhookSVC(){
providerName=${1:-}
[[ -z $providerName ]] && echo "Please specify the provider name (aws,gcp,azure,..etc.)!" && exit 1
namespace=${2:-}
[[ -z $namespace ]] && echo "Please specify extension namespace!" && exit 1
tmpService=$(mktemp)
kubectl get svc gardener-extension-provider-$providerName -o yaml --export > $tmpService
cat <<EOF | kubectl apply -f -
---
apiVersion: v1
kind: Service
metadata:
labels:
app: gardener-extension-provider-$providerName
app.kubernetes.io/instance: provider-$providerName
app.kubernetes.io/name: gardener-extension-provider-$providerName
name: gardener-extension-provider-$providerName
namespace: $namespace
spec:
ports:
- port: 443
protocol: TCP
targetPort: 9443
selector:
app: inlets-server
app.kubernetes.io/instance: provider-$providerName
app.kubernetes.io/name: gardener-extension-provider-$providerName
type: ClusterIP
EOF
}
createInletsLB(){
namespace=${1:-}
[[ -z $namespace ]] && echo "Please specify extension namespace!" && exit 1
cat <<EOF | kubectl apply -f -
---
apiVersion: v1
kind: Service
metadata:
labels:
app: inlets-lb
name: inlets-lb
namespace: $namespace
spec:
externalTrafficPolicy: Cluster
ports:
- name: 8000-8080
port: 8000
protocol: TCP
targetPort: 8080
selector:
app: inlets-server
type: LoadBalancer
EOF
}
waitForInletsLBToBeReady(){
namespace=${1:-}
[[ -z $namespace ]] && echo "Please specify extension namespace!" && exit 1
providerName=${2:-}
[[ -z $providerName ]] && echo "Please specify the provider name (aws,gcp,azure,..etc.)!" && exit 1
case $providerName in
aws*)
until host $(kubectl -n $namespace get svc inlets-lb -o go-template="{{ index (index .status.loadBalancer.ingress 0).hostname }}") 2>&1 > /dev/null
do
sleep 2s
done
echo $(kubectl -n $namespace get svc inlets-lb -o go-template="{{ index (index .status.loadBalancer.ingress 0).hostname }}")
;;
*)
until host $(kubectl -n $namespace get svc inlets-lb -o go-template="{{ index (index .status.loadBalancer.ingress 0).ip }}") 2>&1 > /dev/null
do
sleep 2s
done
echo $(kubectl -n $namespace get svc inlets-lb -o go-template="{{ index (index .status.loadBalancer.ingress 0).ip }}") ;;
esac
}
createServerPod(){
namespace=${1:-}
[[ -z $namespace ]] && echo "Please specify extension namespace!" && exit 1
providerName=${2:-}
[[ -z $providerName ]] && echo "Please specify the provider name (aws,gcp,azure,..etc.)!" && exit 1
cat <<EOF | kubectl apply -f -
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: inlets-server
app.kubernetes.io/instance: provider-$providerName
app.kubernetes.io/name: gardener-extension-provider-$providerName
networking.gardener.cloud/to-dns: allowed
networking.gardener.cloud/to-public-networks: allowed
name: inlets-server
namespace: $namespace
spec:
containers:
- args:
- "server"
- "-p"
- "8080"
- "-t"
- "21d809ed61915c9177fbceeaa87e307e766be5f2"
image: inlets/inlets:2.6.3
imagePullPolicy: IfNotPresent
name: inlets-server
resources:
limits:
cpu: 50m
memory: 128Mi
requests:
cpu: 20m
memory: 64Mi
- args:
- "server"
- "--target"
- "127.0.0.1:8080"
- "--listen"
- "0.0.0.0:9443"
- "--cacert"
- "/etc/tls/ca.crt"
- "--cert"
- "/etc/tls/tls.crt"
- "--key"
- "/etc/tls/tls.key"
- "--disable-authentication"
image: "squareup/ghostunnel:v1.5.2"
imagePullPolicy: IfNotPresent
name: ghost-server
volumeMounts:
- name: inlets-tls
mountPath: "/etc/tls"
readOnly: true
resources:
limits:
cpu: 50m
memory: 128Mi
requests:
cpu: 20m
memory: 64Mi
- args:
- "sleep"
- "8000s"
image: busybox
imagePullPolicy: IfNotPresent
name: debug
resources:
limits:
cpu: 50m
memory: 128Mi
requests:
cpu: 20m
memory: 64Mi
volumes:
- name: inlets-tls
secret:
secretName: gardener-extension-webhook-cert
dnsPolicy: ClusterFirst
enableServiceLinks: true
restartPolicy: Always
EOF
}
waitForInletsPodToBeReady(){
namespace=${1:-}
[[ -z $namespace ]] && echo "Please specify extension namespace!" && exit 1
until test "$(kubectl -n $namespace get pods inlets-server --no-headers | awk '{print $2}')" = "3/3"
do
sleep 2s
done
}
cleanUP() {
namespace=${1:-}
[[ -z $namespace ]] && echo "Please specify the extension namespace!" && exit 1
echo "cleaning up local-dev setup.."
echo "Deleting inlets service..."
kubectl -n $namespace delete svc/inlets-lb
echo "Deleting the inlets pod..."
kubectl -n $namespace delete pod/inlets-server
echo "Re-applying old service values..."
kubectl apply -f $tmpService
kill -9 $(pgrep inlets) 2>/dev/null
exit 0
}
usage(){
echo "==================================================================DISCLAIMER============================================================================"
echo "This scripts needs to be run against the KUBECONFIG of a seed cluster, please set your KUBECONFIG accordingly"
echo "You also need to set the \`ignoreResources\` variable in your extension chart to \`true\`, generate and apply the corresponding controller-installation"
echo "========================================================================================================================================================"
echo ""
echo "===================================PRE-REQs========================================="
echo "\`host\` commands for DNS"
echo "\`inlets\` command. For mac, simply use \`brew install inlets\`, for linux \`curl -sLS https://get.inlets.dev | sudo sh\`"
echo "===================================================================================="
echo ""
echo "========================================================USAGE======================================================================"
echo "> ./hack/hook-me.sh <extension namespace e.g. extension-provider-aws-fpr6w> <provider e.g., aws> <webhookserver port e.g., 8443>"
echo "> \`make EXTENSION_NAMESPACE=<extension namespace e.g. extension-provider-aws-fpr6w> start-provider-<provider-name e.g.,aws>-local\`"
echo "=================================================================================================================================="
echo ""
echo "===================================CLEAN UP COMMANDS========================================="
echo "> kubectl -n $namespace delete svc/inlets-lb"
echo "> kubectl -n $namespace delete pod/inlets-server"
echo "============================================================================================="
exit 0
}
if [[ "${BASH_SOURCE[0]}" = "$0" ]]; then
if [ "$1" == "-h" ] ; then
usage
fi
providerName=${1:-}
[[ -z $providerName ]] && echo "Please specify the provider name (aws,gcp,azure,..etc.)!" && exit 1
namespace=${2:-}
[[ -z $namespace ]] && echo "Please specify the extension namespace!" && exit 1
webhookServerPort=${3:-}
[[ -z $webhookServerPort ]] && echo "Please specify webhook server port" && exit 1
trap 'cleanUP $namespace' SIGINT SIGTERM
while true; do
read -p "[STEP 0] Have you already set the \`ignoreResources\` chart value to \`true\` for your extension controller-registration?" yn
case $yn in
[Yy]* )
echo "[STEP 1] Checking Pre-reqs!"
checkPrereqs
echo "[STEP 2] Creating Inlets LB Service..!"
createInletsLB $namespace && sleep 2s
echo "[STEP 3] Waiting for Inlets LB Service to be created..!";
loadbalancerIPOrHostName=$(waitForInletsLBToBeReady $namespace $providerName)
echo "[Info] LB IP is $loadbalancerIPOrHostName"
echo "[STEP 4] Creating the server Pod for TLS Termination and Tunneling connection..!";
createServerPod $namespace $providerName
echo "[STEP 5] Waiting for Inlets Pod to be ready..!";
waitForInletsPodToBeReady $namespace
echo "[STEP 6] Creating WebhookSVC LB..!"
createOrUpdateWebhookSVC $namespace $providerName
echo "[STEP 7] Initializing the inlets client";
echo "[Info] Inlets initialized, you are ready to go ahead and run \"make EXTENSION_NAMESPACE=$namespace start-provider-$providerName-local\""
echo "[Info] It will take about 5 seconds for the connection to succeeed!"
inlets client --remote ws://$loadbalancerIPOrHostName:8000 --upstream https://localhost:$webhookServerPort --token=21d809ed61915c9177fbceeaa87e307e766be5f2
;;
[Nn]* ) echo "You need to set \`ignoreResources\` to true and generate the controller installlation first in your extension chart before proceeding!"; exit;;
* ) echo "Please answer yes or no.";;
esac
done
fi
|
<reponame>DLSZY/flwoable
import { parse } from 'querystring';
import pathRegexp from 'path-to-regexp';
/* eslint no-useless-escape:0 import/prefer-default-export:0 */
const reg = /(((^https?:(?:\/\/)?)(?:[-;:&=\+\$,\w]+@)?[A-Za-z0-9.-]+(?::\d+)?|(?:www.|[-;:&=\+\$,\w]+@)[A-Za-z0-9.-]+)((?:\/[\+~%\/.\w-_]*)?\??(?:[-\+=&;%@.\w_]*)#?(?:[\w]*))?)$/;
export const isUrl = (path: string): boolean => reg.test(path);
export const getPageQuery = () => parse(window.location.href.split('?')[1]);
/**
* 全局的返回code定义
*/
export const ReturnCode = {
SUCCESS: '100',
FAIL: '101',
};
/**
* props.route.routes
* @param router [{}]
* @param pathname string
*/
export const getAuthorityFromRouter = <T extends { path: string }>(
router: T[] = [],
pathname: string,
): T | undefined => {
const authority = router.find(({ path }) => path && pathRegexp(path).exec(pathname));
if (authority) return authority;
return undefined;
};
|
///
/// [SIMINOV FRAMEWORK]
/// Copyright [2014-2016] [Siminov Software Solution LLP|<EMAIL>]
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
#import "SICDatabase.h"
#import "Book.h"
#import "Shop.h"
//Table Name
static NSString * const BOOK_SHOP_MAPPING_TABLE_NAME = @"BOOK_SHOP_MAPPING";
@interface BookShopMapping : SICDatabase
{
//Variables
Book *book;
Shop *shop;
}
//Method Names
-(Book *)getBook;
-(void)setBook:(Book *)bk;
-(Shop *)getShop;
-(void)setShop:(Shop *)shop;
@end
|
#
# From:
#
# https://lobste.rs/s/xhtim1/problems_with_shells_test_builtin_what
# http://alangrow.com/blog/shell-quirk-assign-from-heredoc
#### Blog Post Example
paths=`tr '\n' ':' | sed -e 's/:$//'`<<EOPATHS
/foo
/bar
/baz
EOPATHS
echo "$paths"
## stdout: /foo:/bar:/baz
#### Blog Post Example Fix
paths=`tr '\n' ':' | sed -e 's/:$//'<<EOPATHS
/foo
/bar
/baz
EOPATHS`
echo "$paths"
## stdout-json: "/foo\n/bar\n/baz\n"
#### Rewrite of Blog Post Example
paths=$(tr '\n' ':' | sed -e 's/:$//' <<EOPATHS
/foo
/bar
/baz
EOPATHS
)
echo "$paths"
## stdout-json: "/foo\n/bar\n/baz\n"
#### Simpler example
foo=`cat`<<EOM
hello world
EOM
echo "$foo"
## stdout: hello world
#### ` after here doc delimiter
foo=`cat <<EOM
hello world
EOM`
echo "$foo"
## stdout: hello world
#### ` on its own line
foo=`cat <<EOM
hello world
EOM
`
echo "$foo"
## stdout: hello world
|
<filename>src/styleParser.js<gh_stars>0
const axios = window.axios
axios.interceptors.response.use(function (response) {
const data = response.data
if (data.success) {
if (data.payload.css.errors && data.payload.css.errors.length) {
return {
success: false,
error: data.payload.css.errors.map(item => item.message).join('\n')
}
} else {
return {
success: true,
style: data.payload.css.textOutput
}
}
} else {
return {
success: false,
error: data.errors.map(item => item.message).join('\n')
}
}
}, function (error) {
return Promise.resolve({
success: false,
error: error.message
})
})
export const getOne = (type, code) => axios({
method: 'post',
baseURL: 'https://wfwf9k3tn7.execute-api.us-west-2.amazonaws.com',
url: '/production/process/' + new Date().getTime() + parseInt(Math.random() * 1000),
data: {
'css': {
'contentType': 'css',
'id': 'css',
'options': {},
'syntax': type,
'version': 'default',
'textInput': code
}
}
})
export const getStyles = (styles) => {
styles = styles.map((item) => {
if (!item.lang) {
return Promise.resolve({
success: true,
style: item.content
})
} else if (['scss', 'sass', 'less', 'stylus'].includes(item.lang)) {
return getOne(item.lang, item.content)
} else {
return Promise.resolve({
success: false,
error: ''
})
}
})
return axios.all(styles)
}
|
#!/bin/bash
# tool: sudo apt-get install valgrind; sudo apt-get install tee
name_comp='HMC_Model_V2_$RUN.x'
name_exec='HMC_Model_V2_MemoryTest.x'
# Settings
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/netcdf-4.1.2/lib/
ulimit -s unlimited
# Control executable file existence
if [ -f $name_exec ];
then
echo "File $name_exec exists. Removing it ... "
rm $name_exec
echo "File $name_exec exists. Removing it ... OK "
fi
# Memory test (for output : 2>&1 | tee memory_check.txt
echo " MemoryTest ... "
cp $name_comp $name_exec
valgrind -v --track-origins=yes --tool=memcheck --leak-check=full ./$name_exec 30 3 0.6 0.015 marche 0.3 500 1 70
echo " MemoryTest ... OK"
|
<filename>android/guava/src/com/google/common/collect/RegularImmutableMap.java
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect;
import static com.google.common.base.Preconditions.checkElementIndex;
import static com.google.common.base.Preconditions.checkPositionIndex;
import static com.google.common.collect.CollectPreconditions.checkEntryNotNull;
import static java.util.Objects.requireNonNull;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.VisibleForTesting;
import java.util.AbstractMap;
import java.util.Arrays;
import java.util.Map.Entry;
import javax.annotation.CheckForNull;
import org.checkerframework.checker.nullness.qual.Nullable;
/**
* A hash-based implementation of {@link ImmutableMap}.
*
* @author <NAME>
*/
@GwtCompatible(serializable = true, emulated = true)
@ElementTypesAreNonnullByDefault
final class RegularImmutableMap<K, V> extends ImmutableMap<K, V> {
private static final byte ABSENT = -1;
// Max size is halved due to indexing into double-sized alternatingKeysAndValues
private static final int BYTE_MAX_SIZE = 1 << (Byte.SIZE - 1); // 2^7 = 128
private static final int SHORT_MAX_SIZE = 1 << (Short.SIZE - 1); // 2^15 = 32_768
private static final int BYTE_MASK = (1 << Byte.SIZE) - 1; // 2^8 - 1 = 255
private static final int SHORT_MASK = (1 << Short.SIZE) - 1; // 2^16 - 1 = 65_535
@SuppressWarnings("unchecked")
static final ImmutableMap<Object, Object> EMPTY =
new RegularImmutableMap<>(null, new Object[0], 0);
/*
* This is an implementation of ImmutableMap optimized especially for Android, which does not like
* objects per entry. Instead we use an open-addressed hash table. This design is basically
* equivalent to RegularImmutableSet, save that instead of having a hash table containing the
* elements directly and null for empty positions, we store indices of the keys in the hash table,
* and ABSENT for empty positions. We then look up the keys in alternatingKeysAndValues.
*
* (The index actually stored is the index of the key in alternatingKeysAndValues, which is
* double the index of the entry in entrySet.asList.)
*
* The basic data structure is described in https://en.wikipedia.org/wiki/Open_addressing.
* The pointer to a key is stored in hashTable[Hashing.smear(key.hashCode()) % table.length],
* save that if that location is already full, we try the next index, and the next, until we
* find an empty table position. Since the table has a power-of-two size, we use
* & (table.length - 1) instead of % table.length, though.
*/
@CheckForNull private final transient Object hashTable;
@VisibleForTesting final transient @Nullable Object[] alternatingKeysAndValues;
private final transient int size;
/*
* We have some considerable complexity in these create methods because of
* Builder.buildKeepingLast(). The same Builder might be called with buildKeepingLast() and then
* buildOrThrow(), or vice versa. So in particular, if we modify alternatingKeysAndValues to
* eliminate duplicate keys (for buildKeepingLast()) then we have to ensure that a later call to
* buildOrThrow() will still throw as if the duplicates had not been eliminated. And the exception
* message must mention two values that were associated with the duplicate key in two different
* calls to Builder.put (though we don't really care *which* two values if there were more than
* two). These considerations lead us to have a field of type DuplicateKey in the Builder, which
* will remember the first duplicate key we encountered. All later calls to buildOrThrow() can
* mention that key with its values. Further duplicates might be added in the meantime but since
* builders only ever accumulate entries it will always be valid to throw from buildOrThrow() with
* the first duplicate.
*/
// This entry point is for callers other than ImmutableMap.Builder.
static <K, V> RegularImmutableMap<K, V> create(
int n, @Nullable Object[] alternatingKeysAndValues) {
return create(n, alternatingKeysAndValues, /* builder= */ null);
}
// This entry point is used by the other create method but also directly by
// ImmutableMap.Builder, so that it can remember any DuplicateKey encountered and produce an
// exception for a later buildOrThrow(). If builder is null that means that a duplicate
// key will lead to an immediate exception. If it is not null then a duplicate key will instead be
// stored in the builder, which may use it to throw an exception later.
static <K, V> RegularImmutableMap<K, V> create(
int n, @Nullable Object[] alternatingKeysAndValues, @Nullable Builder<K, V> builder) {
if (n == 0) {
@SuppressWarnings("unchecked")
RegularImmutableMap<K, V> empty = (RegularImmutableMap<K, V>) EMPTY;
return empty;
} else if (n == 1) {
// requireNonNull is safe because the first `2*n` elements have been filled in.
checkEntryNotNull(
requireNonNull(alternatingKeysAndValues[0]), requireNonNull(alternatingKeysAndValues[1]));
return new RegularImmutableMap<K, V>(null, alternatingKeysAndValues, 1);
}
checkPositionIndex(n, alternatingKeysAndValues.length >> 1);
int tableSize = ImmutableSet.chooseTableSize(n);
// If there are no duplicate keys, hashTablePlus is the final hashTable value. If there *are*
// duplicate keys, hashTablePlus consists of 3 elements: [0] the hashTable; [1] the number of
// entries in alternatingKeysAndValues that are still valid after rewriting to remove
// duplicates; [2] a Builder.DuplicateKey that records the first duplicate key we encountered
// for possible later use in exceptions, perhaps straight away.
Object hashTablePlus = createHashTable(alternatingKeysAndValues, n, tableSize, 0);
Object hashTable;
if (hashTablePlus instanceof Object[]) {
Object[] hashTableAndSizeAndDuplicate = (Object[]) hashTablePlus;
Builder.DuplicateKey duplicateKey = (Builder.DuplicateKey) hashTableAndSizeAndDuplicate[2];
if (builder == null) {
throw duplicateKey.exception();
}
builder.duplicateKey = duplicateKey;
hashTable = hashTableAndSizeAndDuplicate[0];
n = (Integer) hashTableAndSizeAndDuplicate[1];
alternatingKeysAndValues = Arrays.copyOf(alternatingKeysAndValues, n * 2);
} else {
hashTable = hashTablePlus;
}
return new RegularImmutableMap<K, V>(hashTable, alternatingKeysAndValues, n);
}
/**
* Returns a hash table for the specified keys and values, and ensures that neither keys nor
* values are null. This method may update {@code alternatingKeysAndValues} if there are duplicate
* keys. If so, the return value will indicate how many entries are still valid, and will also
* include a {@link Builder.DuplicateKey} in case duplicate keys are not allowed now or will not
* be allowed on a later {@link Builder#buildOrThrow()} call.
*
* @param keyOffset 1 if this is the reverse direction of a BiMap, 0 otherwise.
* @return an {@code Object} that is a {@code byte[]}, {@code short[]}, or {@code int[]}, the
* smallest possible to fit {@code tableSize}; or an {@code Object[]} where [0] is one of
* these; [1] indicates how many element pairs in {@code alternatingKeysAndValues} are valid;
* and [2] is a {@link Builder.DuplicateKey} for the first duplicate key encountered.
*/
@CheckForNull
private static Object createHashTable(
@Nullable Object[] alternatingKeysAndValues, int n, int tableSize, int keyOffset) {
if (n == 1) {
// for n=1 we don't create a hash table, but we need to do the checkEntryNotNull check!
// requireNonNull is safe because the first `2*n` elements have been filled in.
checkEntryNotNull(
requireNonNull(alternatingKeysAndValues[keyOffset]),
requireNonNull(alternatingKeysAndValues[keyOffset ^ 1]));
return null;
}
int mask = tableSize - 1;
Builder.DuplicateKey duplicateKey = null;
if (tableSize <= BYTE_MAX_SIZE) {
/*
* Use 8 bits per entry. The value is unsigned to allow use up to a size of 2^8.
*
* The absent indicator of -1 signed becomes 2^8 - 1 unsigned, which reduces the actual max
* size to 2^8 - 1. However, due to a load factor < 1 the limit is never approached.
*/
byte[] hashTable = new byte[tableSize];
Arrays.fill(hashTable, ABSENT);
int outI = 0;
entries:
for (int i = 0; i < n; i++) {
int keyIndex = 2 * i + keyOffset;
int outKeyIndex = 2 * outI + keyOffset;
// requireNonNull is safe because the first `2*n` elements have been filled in.
Object key = requireNonNull(alternatingKeysAndValues[keyIndex]);
Object value = requireNonNull(alternatingKeysAndValues[keyIndex ^ 1]);
checkEntryNotNull(key, value);
for (int h = Hashing.smear(key.hashCode()); ; h++) {
h &= mask;
int previousKeyIndex = hashTable[h] & BYTE_MASK; // unsigned read
if (previousKeyIndex == BYTE_MASK) { // -1 signed becomes 255 unsigned
hashTable[h] = (byte) outKeyIndex;
break;
} else if (key.equals(alternatingKeysAndValues[previousKeyIndex])) {
duplicateKey =
new Builder.DuplicateKey(
key, value, requireNonNull(alternatingKeysAndValues[previousKeyIndex ^ 1]));
alternatingKeysAndValues[previousKeyIndex ^ 1] = value;
continue entries;
}
}
if (outI < i) { // if outI == i don't bother writing the values back where they came from
alternatingKeysAndValues[outKeyIndex] = key;
alternatingKeysAndValues[outKeyIndex ^ 1] = value;
}
outI++;
}
return outI == n ? hashTable : new Object[] {hashTable, outI, duplicateKey};
} else if (tableSize <= SHORT_MAX_SIZE) {
/*
* Use 16 bits per entry. The value is unsigned to allow use up to a size of 2^16.
*
* The absent indicator of -1 signed becomes 2^16 - 1 unsigned, which reduces the actual max
* size to 2^16 - 1. However, due to a load factor < 1 the limit is never approached.
*/
short[] hashTable = new short[tableSize];
Arrays.fill(hashTable, ABSENT);
int outI = 0;
entries:
for (int i = 0; i < n; i++) {
int keyIndex = 2 * i + keyOffset;
int outKeyIndex = 2 * outI + keyOffset;
// requireNonNull is safe because the first `2*n` elements have been filled in.
Object key = requireNonNull(alternatingKeysAndValues[keyIndex]);
Object value = requireNonNull(alternatingKeysAndValues[keyIndex ^ 1]);
checkEntryNotNull(key, value);
for (int h = Hashing.smear(key.hashCode()); ; h++) {
h &= mask;
int previousKeyIndex = hashTable[h] & SHORT_MASK; // unsigned read
if (previousKeyIndex == SHORT_MASK) { // -1 signed becomes 65_535 unsigned
hashTable[h] = (short) outKeyIndex;
break;
} else if (key.equals(alternatingKeysAndValues[previousKeyIndex])) {
duplicateKey =
new Builder.DuplicateKey(
key, value, requireNonNull(alternatingKeysAndValues[previousKeyIndex ^ 1]));
alternatingKeysAndValues[previousKeyIndex ^ 1] = value;
continue entries;
}
}
if (outI < i) { // if outI == i don't bother writing the values back where they came from
alternatingKeysAndValues[outKeyIndex] = key;
alternatingKeysAndValues[outKeyIndex ^ 1] = value;
}
outI++;
}
return outI == n ? hashTable : new Object[] {hashTable, outI, duplicateKey};
} else {
/*
* Use 32 bits per entry.
*/
int[] hashTable = new int[tableSize];
Arrays.fill(hashTable, ABSENT);
int outI = 0;
entries:
for (int i = 0; i < n; i++) {
int keyIndex = 2 * i + keyOffset;
int outKeyIndex = 2 * outI + keyOffset;
// requireNonNull is safe because the first `2*n` elements have been filled in.
Object key = requireNonNull(alternatingKeysAndValues[keyIndex]);
Object value = requireNonNull(alternatingKeysAndValues[keyIndex ^ 1]);
checkEntryNotNull(key, value);
for (int h = Hashing.smear(key.hashCode()); ; h++) {
h &= mask;
int previousKeyIndex = hashTable[h];
if (previousKeyIndex == ABSENT) {
hashTable[h] = outKeyIndex;
break;
} else if (key.equals(alternatingKeysAndValues[previousKeyIndex])) {
duplicateKey =
new Builder.DuplicateKey(
key, value, requireNonNull(alternatingKeysAndValues[previousKeyIndex ^ 1]));
alternatingKeysAndValues[previousKeyIndex ^ 1] = value;
continue entries;
}
}
if (outI < i) { // if outI == i don't bother writing the values back where they came from
alternatingKeysAndValues[outKeyIndex] = key;
alternatingKeysAndValues[outKeyIndex ^ 1] = value;
}
outI++;
}
return outI == n ? hashTable : new Object[] {hashTable, outI, duplicateKey};
}
}
@CheckForNull
static Object createHashTableOrThrow(
@Nullable Object[] alternatingKeysAndValues, int n, int tableSize, int keyOffset) {
Object hashTablePlus = createHashTable(alternatingKeysAndValues, n, tableSize, keyOffset);
if (hashTablePlus instanceof Object[]) {
Object[] hashTableAndSizeAndDuplicate = (Object[]) hashTablePlus;
Builder.DuplicateKey duplicateKey = (Builder.DuplicateKey) hashTableAndSizeAndDuplicate[2];
throw duplicateKey.exception();
}
return hashTablePlus;
}
private RegularImmutableMap(
@CheckForNull Object hashTable, @Nullable Object[] alternatingKeysAndValues, int size) {
this.hashTable = hashTable;
this.alternatingKeysAndValues = alternatingKeysAndValues;
this.size = size;
}
@Override
public int size() {
return size;
}
@SuppressWarnings("unchecked")
@Override
@CheckForNull
public V get(@CheckForNull Object key) {
Object result = get(hashTable, alternatingKeysAndValues, size, 0, key);
/*
* We can't simply cast the result of `RegularImmutableMap.get` to V because of a bug in our
* nullness checker (resulting from https://github.com/jspecify/checker-framework/issues/8).
*/
if (result == null) {
return null;
} else {
return (V) result;
}
}
@CheckForNull
static Object get(
@CheckForNull Object hashTableObject,
@Nullable Object[] alternatingKeysAndValues,
int size,
int keyOffset,
@CheckForNull Object key) {
if (key == null) {
return null;
} else if (size == 1) {
// requireNonNull is safe because the first 2 elements have been filled in.
return requireNonNull(alternatingKeysAndValues[keyOffset]).equals(key)
? requireNonNull(alternatingKeysAndValues[keyOffset ^ 1])
: null;
} else if (hashTableObject == null) {
return null;
}
if (hashTableObject instanceof byte[]) {
byte[] hashTable = (byte[]) hashTableObject;
int mask = hashTable.length - 1;
for (int h = Hashing.smear(key.hashCode()); ; h++) {
h &= mask;
int keyIndex = hashTable[h] & BYTE_MASK; // unsigned read
if (keyIndex == BYTE_MASK) { // -1 signed becomes 255 unsigned
return null;
} else if (key.equals(alternatingKeysAndValues[keyIndex])) {
return alternatingKeysAndValues[keyIndex ^ 1];
}
}
} else if (hashTableObject instanceof short[]) {
short[] hashTable = (short[]) hashTableObject;
int mask = hashTable.length - 1;
for (int h = Hashing.smear(key.hashCode()); ; h++) {
h &= mask;
int keyIndex = hashTable[h] & SHORT_MASK; // unsigned read
if (keyIndex == SHORT_MASK) { // -1 signed becomes 65_535 unsigned
return null;
} else if (key.equals(alternatingKeysAndValues[keyIndex])) {
return alternatingKeysAndValues[keyIndex ^ 1];
}
}
} else {
int[] hashTable = (int[]) hashTableObject;
int mask = hashTable.length - 1;
for (int h = Hashing.smear(key.hashCode()); ; h++) {
h &= mask;
int keyIndex = hashTable[h];
if (keyIndex == ABSENT) {
return null;
} else if (key.equals(alternatingKeysAndValues[keyIndex])) {
return alternatingKeysAndValues[keyIndex ^ 1];
}
}
}
}
@Override
ImmutableSet<Entry<K, V>> createEntrySet() {
return new EntrySet<>(this, alternatingKeysAndValues, 0, size);
}
static class EntrySet<K, V> extends ImmutableSet<Entry<K, V>> {
private final transient ImmutableMap<K, V> map;
private final transient @Nullable Object[] alternatingKeysAndValues;
private final transient int keyOffset;
private final transient int size;
EntrySet(
ImmutableMap<K, V> map,
@Nullable Object[] alternatingKeysAndValues,
int keyOffset,
int size) {
this.map = map;
this.alternatingKeysAndValues = alternatingKeysAndValues;
this.keyOffset = keyOffset;
this.size = size;
}
@Override
public UnmodifiableIterator<Entry<K, V>> iterator() {
return asList().iterator();
}
@Override
int copyIntoArray(Object[] dst, int offset) {
return asList().copyIntoArray(dst, offset);
}
@Override
ImmutableList<Entry<K, V>> createAsList() {
return new ImmutableList<Entry<K, V>>() {
@Override
public Entry<K, V> get(int index) {
checkElementIndex(index, size);
/*
* requireNonNull is safe because the first `2*(size+keyOffset)` elements have been filled
* in.
*/
@SuppressWarnings("unchecked")
K key = (K) requireNonNull(alternatingKeysAndValues[2 * index + keyOffset]);
@SuppressWarnings("unchecked")
V value = (V) requireNonNull(alternatingKeysAndValues[2 * index + (keyOffset ^ 1)]);
return new AbstractMap.SimpleImmutableEntry<K, V>(key, value);
}
@Override
public int size() {
return size;
}
@Override
public boolean isPartialView() {
return true;
}
};
}
@Override
public boolean contains(@CheckForNull Object object) {
if (object instanceof Entry) {
Entry<?, ?> entry = (Entry<?, ?>) object;
Object k = entry.getKey();
Object v = entry.getValue();
return v != null && v.equals(map.get(k));
}
return false;
}
@Override
boolean isPartialView() {
return true;
}
@Override
public int size() {
return size;
}
}
@Override
ImmutableSet<K> createKeySet() {
@SuppressWarnings("unchecked")
ImmutableList<K> keyList =
(ImmutableList<K>) new KeysOrValuesAsList(alternatingKeysAndValues, 0, size);
return new KeySet<K>(this, keyList);
}
static final class KeysOrValuesAsList extends ImmutableList<Object> {
private final transient @Nullable Object[] alternatingKeysAndValues;
private final transient int offset;
private final transient int size;
KeysOrValuesAsList(@Nullable Object[] alternatingKeysAndValues, int offset, int size) {
this.alternatingKeysAndValues = alternatingKeysAndValues;
this.offset = offset;
this.size = size;
}
@Override
public Object get(int index) {
checkElementIndex(index, size);
// requireNonNull is safe because the first `2*(size+offset)` elements have been filled in.
return requireNonNull(alternatingKeysAndValues[2 * index + offset]);
}
@Override
boolean isPartialView() {
return true;
}
@Override
public int size() {
return size;
}
}
static final class KeySet<K> extends ImmutableSet<K> {
private final transient ImmutableMap<K, ?> map;
private final transient ImmutableList<K> list;
KeySet(ImmutableMap<K, ?> map, ImmutableList<K> list) {
this.map = map;
this.list = list;
}
@Override
public UnmodifiableIterator<K> iterator() {
return asList().iterator();
}
@Override
int copyIntoArray(Object[] dst, int offset) {
return asList().copyIntoArray(dst, offset);
}
@Override
public ImmutableList<K> asList() {
return list;
}
@Override
public boolean contains(@CheckForNull Object object) {
return map.get(object) != null;
}
@Override
boolean isPartialView() {
return true;
}
@Override
public int size() {
return map.size();
}
}
@SuppressWarnings("unchecked")
@Override
ImmutableCollection<V> createValues() {
return (ImmutableList<V>) new KeysOrValuesAsList(alternatingKeysAndValues, 1, size);
}
@Override
boolean isPartialView() {
return false;
}
// This class is never actually serialized directly, but we have to make the
// warning go away (and suppressing would suppress for all nested classes too)
private static final long serialVersionUID = 0;
}
|
package refinedstorage.integration.tesla;
import net.darkhax.tesla.api.ITeslaConsumer;
import net.darkhax.tesla.api.ITeslaHolder;
import net.minecraft.item.ItemStack;
import refinedstorage.item.ItemWirelessGrid;
public class WirelessGridEnergyTesla implements ITeslaHolder, ITeslaConsumer {
private ItemWirelessGrid wirelessGrid;
private ItemStack stack;
public WirelessGridEnergyTesla(ItemWirelessGrid wirelessGrid, ItemStack stack) {
this.wirelessGrid = wirelessGrid;
this.stack = stack;
}
@Override
public long getStoredPower() {
return wirelessGrid.getEnergyStored(stack);
}
@Override
public long getCapacity() {
return wirelessGrid.getMaxEnergyStored(stack);
}
@Override
public long givePower(long power, boolean simulated) {
return wirelessGrid.receiveEnergy(stack, (int) power, simulated);
}
}
|
#!/usr/bin/env zsh
main() {
ask_for_sudo
clone_dotfiles_repo
install_homebrew
install_packages_with_brewfile
setup_macOS_defaults
configure_zsh
configure_git
configure_ssh
configure_vscode
finish
}
DOTFILES_REPO=$HOME/.dotfiles
function ask_for_sudo() {
step "Prompting for sudo password"
if sudo --validate; then
# Keep-alive
while true; do sudo --non-interactive true; \
sleep 10; kill -0 "$$" || exit; done 2>/dev/null &
success "Sudo password updated"
else
error "Sudo password update failed"
fi
}
function addToFileIfNeeded() {
createFileIfNeeded $3
step "Setting up ${2} in ${3}"
if grep -Fxq $1 $3; then
info "${2} already set up in ${3}"
else
if echo "\n${1}" >> $3; then
success "${2} successfully set up in ${3}"
else
error "Failed to set up ${2} in ${3}"
fi
fi
}
function addTemplateToFileIfNeeded() {
createFileIfNeeded $3
step "Setting up ${2} in ${3}"
if [[ -z $(comm -13 $3 $1) ]]; then
info "${2} already set up in ${3}"
else
if echo "$(cat ${1})" >> $3; then
success "${2} successfully set up in ${3}"
else
error "Failed to set up ${2} in ${3}"
fi
fi
}
function createFileIfNeeded() {
step "creating ${1} if needed"
if test -e $1; then
info "${1} already exists"
else
if touch $1; then
success "${1} created successfully"
else
error "${1} could not be created"
fi
fi
}
function finish() {
echo ""
success "Finished successfully!"
info "Please restart your Terminal for the applied changes to take effect."
}
function install_homebrew() {
step "Installing Homebrew"
if hash brew 2>/dev/null; then
info "Homebrew already exists"
else
if /usr/bin/ruby -e ${DOTFILES_REPO}/installers/homebrew; then
success "Homebrew installation succeeded"
else
error "Homebrew installation failed"
fi
fi
}
function configure_zsh() {
ZSH_PLUGIN_DIR="$DOTFILES_REPO/checkout"
SPACESHIP_DIR="$ZSH_PLUGIN_DIR/spaceship-prompt"
clone_or_update "Spaceship promt" $SPACESHIP_DIR "https://github.com/denysdovhan/spaceship-prompt.git"
step "Linking spaceship promt"
ZSH_FUNCTIONS_DIR="$HOME/.zfunctions"
if test -L "$ZSH_FUNCTIONS_DIR/prompt_spaceship_setup"; then
info "spaceship promt already linked"
else
step "creating $ZSH_FUNCTIONS_DIR dir"
if test -e $ZSH_FUNCTIONS_DIR; then
info "$ZSH_FUNCTIONS_DIR already exists"
else
if mkdir $ZSH_FUNCTIONS_DIR; then
success "$ZSH_FUNCTIONS_DIR dir created"
else
error "failed to create $ZSH_FUNCTIONS_DIR dir"
fi
if ln -sf "$SPACESHIP_DIR/spaceship.zsh" "$ZSH_FUNCTIONS_DIR/prompt_spaceship_setup"; then
success "spaceship promt linked"
else
error "spaceship promt linking failed"
fi
fi
fi
SYNTAX_HIGHLIGHTING_DIR="$ZSH_PLUGIN_DIR/zsh-syntax-highlighting"
clone_or_update "zsh-syntax-highlighting" $SYNTAX_HIGHLIGHTING_DIR "https://github.com/zsh-users/zsh-syntax-highlighting.git"
addToFileIfNeeded "source $DOTFILES_REPO/zsh/.zshrc" "link to .zshrc" $HOME/.zshrc
}
function configure_git() {
addTemplateToFileIfNeeded $DOTFILES_REPO/git/.gitconfig_template ".gitconfig include" $HOME/.gitconfig
}
function configure_ssh() {
addToFileIfNeeded "Include $DOTFILES_REPO/ssh/config" "ssh config include" $HOME/.ssh/config
}
function configure_vscode() {
copy_file "VSCode settings" $DOTFILES_REPO/vscode/settings.json $HOME/Library/Application\ Support/Code/User/settings.json
EXTENSIONS_INSTALLED=$(code --list-extensions)
for extension in `cat $DOTFILES_REPO/vscode/extensions.txt`
do
step "Installing VSCode extension $extension"
if echo $EXTENSIONS_INSTALLED | grep -c $extension &> /dev/null; then
info "VSCode extension $extension already installed"
else
if code --install-extension $extension &> /dev/null; then
success "VSCode extension $extension installed successfully"
else
error "Failed to install VSCode extension $extension"
fi
fi
done
}
function install_packages_with_brewfile() {
BREW_FILE_PATH="${DOTFILES_REPO}/brew/macOS.Brewfile"
step "Installing packages within ${BREW_FILE_PATH}"
if brew bundle check --file="$BREW_FILE_PATH" &> /dev/null; then
info "Brewfile's dependencies are already satisfied"
else
if brew bundle --file="$BREW_FILE_PATH"; then
success "Brewfile installation succeeded"
else
error "Brewfile installation failed"
fi
fi
}
function clone_dotfiles_repo() {
clone_or_update "Dotfiles" ${DOTFILES_REPO} "https://github.com/Nef10/dotfiles.git"
}
function copy_file() {
step "Copying ${1}"
if diff -q $2 $3 &> /dev/null; then
info "${1} already the same"
else
if cp $2 $3; then
success "${1} copied"
else
error "Failed to copy ${1}"
fi
fi
}
function clone_or_update() {
step "Cloning ${1} repository into ${2}"
if test -e $2; then
info "${2} already exists"
pull_latest $2
else
if git clone "$3" $2; then
success "${1} repository cloned into ${2}"
else
error "${1} repository cloning failed"
fi
fi
}
function pull_latest() {
step "Pulling latest changes in ${1} repository"
git -C $1 fetch &> /dev/null
if [ $(git -C $HOME/.dotfiles rev-parse HEAD) '==' $(git -C $HOME/.dotfiles rev-parse @{u}) ]; then
info "${1} already up to date"
else
if git -C $1 pull origin master &> /dev/null; then
success "Pull in ${1} successful"
else
error "Failed, Please pull latest changes in ${1} repository manually"
fi
fi
}
function setup_macOS_defaults() {
step "Updating macOS defaults"
current_dir=$(pwd)
cd ${DOTFILES_REPO}/macOS
if bash defaults.sh; then
cd $current_dir
success "macOS defaults updated successfully"
else
cd $current_dir
error "macOS defaults update failed"
fi
}
function step() {
print -P "%F{blue}=> $1%f"
}
function info() {
print -P "%F{white}===> $1%f"
}
function success() {
print -P "%F{green}===> $1%f"
}
function error() {
print -P "%F{red}===> $1%f"
print -P "%F{red}Aborting%f"
exit 1
}
main "$@"
|
<gh_stars>0
declare const webkitAudioContext: typeof AudioContext;
type F = ( ctx: AudioContext ) => void;
export default class Context {
private readyQueue: F[] = [];
private started: Promise<AudioContext>;
public isSafari = typeof AudioContext === 'undefined' && !!webkitAudioContext;
public context: AudioContext;
public initialized = false;
public start(): Promise<AudioContext> {
if ( !this.started ) {
this.started = new Promise( ( resolve, reject ) => {
try {
this.context = new ( this.isSafari ? webkitAudioContext : AudioContext )();
this.context.onstatechange = (): void => {
if ( this.context.state === 'running' ) {
this.readyQueue.forEach( f => f( this.context ) );
this.readyQueue = [];
resolve( this.context );
this.initialized = true;
}
};
this.context.resume();
} catch {
reject();
}
});
}
return this.started;
}
public ready( func: F ): void {
if ( this.context ) {
func( this.context );
} else {
this.readyQueue.push( func );
}
}
public getTime(): number {
if ( this.context ) {
return this.context.currentTime;
}
return 0;
}
}
|
import { StateController } from 'trimop';
export type DB = { readonly [key: string]: unknown };
export function clearKV(db: StateController<DB>): undefined {
return db.set({});
}
export function deleteRecordKV(db: StateController<DB>, key: string): undefined {
return db.set(Object.fromEntries(Object.entries(db.get()).filter(([elKey]) => elKey !== key)));
}
export function getRecordKV<T>(db: StateController<DB>, key: string): T | undefined {
return db.get()[key] as T | undefined;
}
export function setRecordKV<T>(db: StateController<DB>, key: string, value: T): undefined {
return db.set({
...db.get(),
[key]: value,
});
}
|
#!/bin/sh
openssl req -batch -config postgres-client.cnf -newkey rsa:2048 -sha256 -out postgresclientcert.csr -outform PEM -nodes
|
<reponame>git-lt/weui-react<gh_stars>10-100
import React from 'react'
import { PopupPicker, Group, Cell } from '../../../src'
import Page from '../../component/page'
import chinaAddress from '../../../src/components/picker/china_address.json'
var Demo = React.createClass({
getInitialState(){
// console.log(chinaAddress)
return {
data1: ['小米1', 'iPhone1', '华为1', '情怀1', '三星1', '其他1', '不告诉你1'],
value1: ['小米1'],
data2: [[2001, 2002, 2003],[1,2,3,4,5,6],[10,11,12,13,14]],
value2: ['2001', '2', '13'],
data3: chinaAddress,
}
},
change1(v){
this.setState({
value1: [v]
})
},
format2(v){
return `${v[0]}年${v[1]}月${v[2]}日`
},
formatCity(v){
var t1='', t2='';
chinaAddress.forEach(item=>{
item.value == v[0] && (t1 = item.name);
item.value == v[1] && (t2 = item.name);
})
return t1+' '+t2;
},
render() {
const { data1, value1, data2, value2, data3 } = this.state;
return (
<Page title="PopupPicker" subTitle="弹出层数据选择器">
<Group title="基本使用">
<PopupPicker
value={ value1}
data={ data1 }
label="品牌"
placeholder="点击请选择"
onChange={ this.change1 }>
</PopupPicker>
</Group>
<Group title="多列选择及格式化">
<PopupPicker
value={ value2}
data={ data2 }
label="生日"
placeholder="点击请选择"
displayFormat={ this.format2 }>
</PopupPicker>
</Group>
<Group title="联动选择">
<PopupPicker
value={ [] }
data={ data3 }
columns={2}
displayFormat={ this.formatCity }
label="地区"
placeholder="点击请选择">
</PopupPicker>
</Group>
</Page>
);
}
})
export default Demo
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-3103-1
#
# Security announcement date: 2016-10-13 00:00:00 UTC
# Script generation date: 2017-01-01 21:05:39 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - libdbd-mysql-perl:4.020-1ubuntu0.1
#
# Last versions recommanded by security team:
# - libdbd-mysql-perl:4.020-1ubuntu0.1
#
# CVE List:
# - CVE-2014-9906
# - CVE-2015-8949
# - CVE-2016-1246
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libdbd-mysql-perl=4.020-1ubuntu0.1 -y
|
#ifndef INOUT_H
#define INOUT_H
#include "sinais.h"
void salvarSinais(Sinais *sinaisSaida, FILE *arqSaida);
Sinais* carregaEntradas(FILE *arquivo);
extern "C" Sinais* carregaArquivoSinais(const char* path);
#endif // INOUT_H
|
<gh_stars>0
export default {
elem: 'svg',
attrs: {
xmlns: 'http://www.w3.org/2000/svg',
viewBox: '0 0 24 24',
width: 24,
height: 24,
},
content: [
{
elem: 'path',
attrs: {
d:
'M20.8 7L17 3.2c-.1-.1-.3-.2-.5-.2h-12C3.7 3 3 3.7 3 4.5v15c0 .8.7 1.5 1.5 1.5h15c.8 0 1.5-.7 1.5-1.5v-12c0-.2-.1-.4-.2-.5zM9 4.5h6v3H9v-3zm6 15H9v-6h6v6zm1.5 0v-6c0-.8-.7-1.5-1.5-1.5H9c-.8 0-1.5.7-1.5 1.5v6h-3v-15h3v3C7.5 8.3 8.2 9 9 9h6c.8 0 1.5-.7 1.5-1.5V4.8l3 3v11.7h-3z',
},
},
],
name: 'save',
size: 24,
};
|
import listChannels, { facts } from "../../api/methods/channel/list_channels"
import { TurboServer } from "../../turbo"
import { getRemoteIpAddress } from "../../remoteIpAddress"
export default (server: TurboServer) => {
server.get(facts, async (req, res, params) => {
const remoteIpAddress = getRemoteIpAddress(req.headers)
const channels = await listChannels(
{
sort_by: req.query.sort_by,
sort_order: req.query.sort_order,
},
remoteIpAddress,
params["authUser"]
)
return {
ok: true,
channels: channels.map((channel) => channel.toJsonObject()),
}
})
}
|
<gh_stars>1-10
// Hibernation Setup Tool
// Sets up a swap area suitable to hibernate a Linux system.
//
// Copyright (c) 2021 Microsoft Corp.
// Licensed under the terms of the MIT license.
#define _GNU_SOURCE
/* sys/mount.h has to be included before linux/fs.h
* https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=898743
*/
#include <sys/mount.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <ftw.h>
#include <linux/falloc.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/magic.h>
#include <linux/suspend_ioctls.h>
#include <mntent.h>
#include <spawn.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/auxv.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/statfs.h>
#include <sys/swap.h>
#include <sys/types.h>
#include <sys/utsname.h>
#include <sys/vfs.h>
#include <sys/wait.h>
#include <syscall.h>
#include <syslog.h>
#include <unistd.h>
#define MEGA_BYTES (1ul << 20)
#define GIGA_BYTES (1ul << 30)
#ifndef SEEK_HOLE
#define SEEK_HOLE 4
#endif
#ifndef IOPRIO_WHO_PROCESS
#define IOPRIO_WHO_PROCESS 1
#endif
#ifndef IOPRIO_CLASS_IDLE
#define IOPRIO_CLASS_IDLE 3
#endif
#ifndef IOPRIO_PRIO_VALUE
#define IOPRIO_PRIO_VALUE(klass, data) (((klass) << 13) | (data))
#endif
#ifndef XFS_SUPER_MAGIC
#define XFS_SUPER_MAGIC ('X' << 24 | 'F' << 16 | 'S' << 8 | 'B')
#endif
static const char swap_file_name[] = "/hibfile.sys";
/* Prefix is needed when not running as a service. Output from this the tool is fed to the
* system log while systemd is processing the request to hibernate. This makes it easier to
* grep for hibernation-setup-tool there too in case of a failure. */
static bool log_needs_prefix = false;
/* We don't always want to spam syslog: spamming stdout is fine as this is supposed to be
* executed as a daemon in systemd and this output will be stored in the journal. However,
* this agent can run as a hook and we want to make sure that the messages there are logged
* somewhere. */
static bool log_needs_syslog = false;
/* This is a link pointing to a file in a tmpfs filesystem and is mostly used to detect
* if we got a cold boot or not. */
static const char hibernate_lock_file_name[] = "/etc/hibernation-setup-tool.last_hibernation";
enum host_vm_notification {
HOST_VM_NOTIFY_COLD_BOOT, /* Sent every time system cold boots */
HOST_VM_NOTIFY_HIBERNATING, /* Sent right before hibernation */
HOST_VM_NOTIFY_RESUMED_FROM_HIBERNATION, /* Sent right after hibernation */
HOST_VM_NOTIFY_PRE_HIBERNATION_FAILED, /* Sent on errors when hibernating or resuming */
};
struct swap_file {
size_t capacity;
char path[];
};
static int ioprio_set(int which, int who, int ioprio) { return (int)syscall(SYS_ioprio_set, which, who, ioprio); }
static void log_impl(int log_level, const char *fmt, va_list ap)
{
if (log_needs_syslog)
vsyslog(log_level, fmt, ap);
flockfile(stdout);
if (log_needs_prefix)
printf("hibernation-setup-tool: ");
if (log_level & LOG_INFO)
printf("INFO: ");
else if (log_level & LOG_ERR)
printf("ERROR: ");
vprintf(fmt, ap);
printf("\n");
funlockfile(stdout);
}
__attribute__((format(printf, 1, 2))) static void log_info(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
log_impl(LOG_INFO, fmt, ap);
va_end(ap);
}
__attribute__((format(printf, 1, 2))) __attribute__((noreturn)) static void log_fatal(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
log_impl(LOG_ERR, fmt, ap);
va_end(ap);
exit(1);
__builtin_unreachable();
}
static char *next_field(char *current)
{
if (!current)
return NULL;
if (*current == '\0')
return NULL;
while (!isspace(*current))
current++;
*current = '\0'; /* 0-terminate the previous call to next_field() */
current++; /* skip the NUL terminator */
while (isspace(*current))
current++;
return current;
}
static size_t parse_size_or_die(const char *ptr, const char expected_end, char **endptr)
{
size_t parsed;
errno = 0;
if (sizeof(size_t) == sizeof(unsigned long long)) {
parsed = strtoull(ptr, endptr, 10);
} else if (sizeof(size_t) == sizeof(unsigned long)) {
parsed = strtoul(ptr, endptr, 10);
} else {
log_fatal("Invalid size of size_t: %zu", sizeof(size_t));
}
if (errno || (endptr && **endptr != expected_end))
log_fatal("Could not parse size: %s", strerror(errno));
return parsed;
}
static const char *find_executable_in_path(const char *name, const char *path_env, char path_buf[static PATH_MAX])
{
if (!path_env)
path_env = "/bin:/sbin:/usr/bin:/usr/sbin";
while (*path_env) {
const char *p = strchr(path_env, ':');
int ret;
if (!p) {
/* Last segment of $PATH */
ret = snprintf(path_buf, PATH_MAX, "%s/%s", path_env, name);
path_env = "";
} else if (p - path_env) {
/* Middle segments */
ret = snprintf(path_buf, PATH_MAX, "%.*s/%s", (int)(p - path_env), path_env, name);
path_env = p + 1;
} else {
/* Empty segment or non-root directory? Skip. */
path_env = p + 1;
continue;
}
if (ret < 0 || ret >= PATH_MAX)
log_fatal("Building path to determine if %s exists would overflow buffer", name);
if (path_buf[0] != '/')
continue;
if (!access(path_buf, X_OK))
return path_buf;
}
return NULL;
}
static bool is_exec_in_path(const char *name)
{
char path_buf[PATH_MAX];
return find_executable_in_path(name, getenv("PATH"), path_buf) != NULL;
}
static struct swap_file *new_swap_file(const char *path, size_t capacity)
{
struct swap_file *out;
out = malloc(sizeof(*out) + strlen(path) + 1);
if (!out)
log_fatal("Could not allocate memory for swap file information");
out->capacity = capacity;
memcpy(out->path, path, strlen(path) + 1);
return out;
}
static struct swap_file *find_swap_file(size_t needed_size)
{
char buffer[1024];
FILE *swaps;
struct swap_file *out = NULL;
swaps = fopen("/proc/swaps", "re");
if (!swaps)
log_fatal("Could not open /proc/swaps: is /proc mounted?");
/* Skip first line (header) */
if (!fgets(buffer, sizeof(buffer), swaps))
log_fatal("Could not skip first line from /proc/swaps");
while (fgets(buffer, sizeof(buffer), swaps)) {
char *filename = buffer;
char *type = next_field(filename);
if (!type) {
log_info("Couldn't get the second field while parsing /proc/swaps");
break;
}
if (!strcmp(type, "file")) {
char *size = next_field(type);
if (!size)
log_fatal("Malformed line in /proc/swaps: can't find size column");
size_t size_as_int = parse_size_or_die(size, ' ', NULL);
if (size_as_int < needed_size)
continue;
out = new_swap_file(filename, size_as_int);
break;
}
}
fclose(swaps);
if (!out) {
struct stat st;
if (stat(swap_file_name, &st) < 0)
return NULL;
if (!S_ISREG(st.st_mode))
return NULL;
return new_swap_file(swap_file_name, st.st_size);
}
return out;
}
static size_t physical_memory(void)
{
FILE *meminfo;
char buffer[256];
size_t total = 0;
meminfo = fopen("/proc/meminfo", "re");
if (!meminfo)
log_fatal("Could not determine physical memory size. Is /proc mounted?");
while (fgets(buffer, sizeof(buffer), meminfo)) {
static const size_t mem_total_len = sizeof("MemTotal: ") - 1;
if (!strncmp(buffer, "MemTotal: ", mem_total_len)) {
char *endptr;
total = parse_size_or_die(buffer + mem_total_len, ' ', &endptr);
if (!strcmp(endptr, " kB\n"))
total *= 1024;
else if (!strcmp(endptr, " MB\n"))
total *= MEGA_BYTES;
else if (!strcmp(endptr, " GB\n"))
total *= 1024 * MEGA_BYTES;
else if (!strcmp(endptr, " TB\n"))
total *= (size_t)MEGA_BYTES * (size_t)MEGA_BYTES;
else
log_fatal("Could not determine unit for physical memory information");
break;
}
}
fclose(meminfo);
return total;
}
static size_t swap_needed_size(size_t phys_mem)
{
/* This is using the recommendation from the Fedora project documentation. */
if (phys_mem <= 2 * GIGA_BYTES)
return 3 * phys_mem;
if (phys_mem <= 8 * GIGA_BYTES)
return 2 * phys_mem;
if (phys_mem <= 64 * GIGA_BYTES)
return (3 * phys_mem) / 2;
/* Note: Fedora documentation doesn't recommend hibernation for machines over 64GB of RAM,
* but we're extending this for a bit. */
if (phys_mem <= 256 * GIGA_BYTES)
return (5 * phys_mem) / 4;
log_fatal("Hibernation not recommended for a machine with more than 256GB of RAM");
}
static char *get_uuid_for_dev_path(const char *path)
{
struct stat dev_st;
struct dirent *ent;
char *uuid = NULL;
DIR *uuid_dir;
if (stat(path, &dev_st) < 0) {
log_info("Could not stat(%s): %s", path, strerror(errno));
return NULL;
}
uuid_dir = opendir("/dev/disk/by-uuid/");
if (!uuid_dir) {
log_info("Could not open directory /dev/disk/by-uuid/: %s", strerror(errno));
return NULL;
}
while ((ent = readdir(uuid_dir))) {
struct stat ent_st;
if (fstatat(dirfd(uuid_dir), ent->d_name, &ent_st, 0) < 0)
continue;
/* Shouldn't happen, but just in case */
if ((ent_st.st_mode & S_IFMT) != S_IFBLK)
continue;
if (ent_st.st_rdev == dev_st.st_rdev) {
uuid = strdup(ent->d_name);
break;
}
}
closedir(uuid_dir);
if (uuid)
log_info("UUID for device %s is %s", path, uuid);
return uuid;
}
static char *get_disk_uuid_for_file_path(const char *path)
{
FILE *mounts = setmntent("/proc/mounts", "re");
struct mntent *ent;
struct stat st;
if (!mounts)
return NULL;
if (stat(path, &st) < 0)
log_fatal("Could not stat(%s): %s", path, strerror(errno));
while ((ent = getmntent(mounts))) {
struct stat ent_st;
if (stat(ent->mnt_dir, &ent_st) < 0)
continue;
if (ent_st.st_dev == st.st_dev)
break;
}
endmntent(mounts);
if (!ent) {
log_info("Could not determine device for file in path %s", path);
return NULL;
}
return get_uuid_for_dev_path(ent->mnt_fsname);
}
static long determine_block_size_for_root_fs(void)
{
FILE *mtab = setmntent("/proc/mounts", "re");
struct mntent *mntent;
long sector_size = 0;
if (!mtab)
log_fatal("Could not determine mounted partitions. Is /proc mounted?");
while ((mntent = getmntent(mtab))) {
if (!strcmp(mntent->mnt_dir, "/")) {
int fd = open(mntent->mnt_fsname, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
log_fatal("Could not open %s to determine block size: %s", mntent->mnt_fsname, strerror(errno));
}
if (ioctl(fd, BLKSSZGET, §or_size) < 0)
sector_size = 0;
close(fd);
break;
}
}
endmntent(mtab);
if (sector_size) {
struct statfs sfs;
if (statfs("/", &sfs) < 0)
log_fatal("Could not determine optimal block size for root filesystem: %s", strerror(errno));
return sfs.f_bsize > sector_size ? sfs.f_bsize : sector_size;
}
log_fatal("Could not obtain sector size for root partition: %s", strerror(errno));
}
static char *read_first_line_from_file(const char *path, char buffer[static 1024])
{
FILE *f = fopen(path, "re");
if (!f)
return NULL;
bool did_read = fgets(buffer, 1024, f) != NULL;
fclose(f);
if (!did_read)
return NULL;
char *lf = strchr(buffer, '\n');
if (lf)
*lf = '\0';
return buffer;
}
static bool is_hyperv(void) { return !access("/sys/bus/vmbus", F_OK); }
static bool is_running_in_container(void)
{
FILE *cgroup;
char buffer[1024];
bool ret = false;
cgroup = fopen("/proc/1/cgroup", "re");
if (!cgroup)
log_fatal("Could not read /proc/1/cgroup to determine if we're running in a container");
while (fgets(buffer, sizeof(buffer), cgroup)) {
const char *first_colon = strchr(buffer, ':');
if (!first_colon)
continue;
/* When running in a container, PID 1 will have a line in
* /proc/1/cgroup with "0::/" as a content; whereas, when running in
* the host, it might have something like "0::/init.slice".
*
* Things might be different with anything other than systemd, but
* since we're supporting systemd-only distros at this point, it's
* safer to use this method rather than rely on things like the
* presence of /.dockerenv or something like that as the container
* runtime not be docker. */
if (!strcmp(first_colon, "::/\n")) {
ret = true;
break;
}
}
fclose(cgroup);
return ret;
}
static bool is_hibernation_enabled_for_vm(void)
{
char buffer[1024];
char *entry;
if (is_running_in_container()) {
log_info("We're running in a container; this isn't supported.");
return false;
}
if (access("/dev/snapshot", F_OK) != 0) {
log_info("Kernel does not support hibernation or /dev/snapshot has not been found.");
return false;
}
/* First, check if the kernel can hibernate. Don't even bother if the
* interface to hibernate it isn't available. */
entry = read_first_line_from_file("/sys/power/disk", buffer);
if (!entry) {
log_info("Kernel does not support hibernation (/sys/power/disk does not exist or can't be read).");
return false;
}
if (strstr(entry, "platform")) {
log_info("VM supports hibernation with platform-supported events.");
return true;
}
if (strstr(entry, "shutdown")) {
log_info("VM supports hibernation only with the shutdown method. This is not ideal.");
} else if (strstr(entry, "suspend")) {
log_info("VM supports hibernation only with the suspend method. This is not ideal.");
} else {
log_info("Unknown VM hibernation support mode found: %s", entry);
return false;
}
if (is_hyperv()) {
log_info("This is a Hyper-V VM, checking if hibernation is enabled through VMBus events.");
entry = read_first_line_from_file("/sys/bus/vmbus/hibernation", buffer);
if (entry) {
if (!strcmp(entry, "1")) {
log_info("Hibernation is enabled according to VMBus. This is ideal.");
return true;
}
log_info("Hibernation is disabled according to VMBus.");
}
}
log_info("Even though VM is capable of hibernation, it seems to be disabled.");
return false;
}
static uint32_t get_swap_file_offset(int fd)
{
uint32_t blksize;
if (ioctl(fd, FIGETBSZ, &blksize) < 0)
log_fatal("Could not get file block size: %s", strerror(errno));
uint32_t last = 0, first = 0, num_contiguous_blocks = 0;
uint32_t blocks_per_page = sysconf(_SC_PAGE_SIZE) / blksize;
uint32_t first_blknum = ~0;
for (uint32_t i = 0; i < blocks_per_page; i++) {
uint32_t blknum = i;
if (ioctl(fd, FIBMAP, &blknum) < 0)
log_fatal("Could not get filesystem block number for block #%d: %s", i, strerror(errno));
if (i == 0)
first_blknum = blknum;
if (last && blknum - last != 1) {
/* If we find a block that's not contiguous, bail out. We
* check below if we have enough contiguous blocks for hibernation
* to work. */
break;
}
if (!first)
first = blknum;
last = blknum;
num_contiguous_blocks++;
}
log_info("First %d blocks of %d bytes are contiguous", num_contiguous_blocks, blksize);
if (num_contiguous_blocks * blksize >= sysconf(_SC_PAGE_SIZE))
return first_blknum;
return ~0;
}
static bool try_zero_out_with_write(const char *path, off_t needed_size)
{
int fd = open(path, O_WRONLY | O_CLOEXEC);
int ret = true;
if (fd < 0) {
log_info("Could not open %s: %s", path, strerror(errno));
return false;
}
long block_size = determine_block_size_for_root_fs();
const uint32_t pattern = 'T' << 24 | 'F' << 16 | 'S' << 8 | 'M';
for (off_t offset = 0; offset < needed_size; offset += block_size) {
ssize_t written = pwrite(fd, &pattern, sizeof(pattern), offset);
if (written < 0) {
log_info("Could not write pattern to %s: %s", path, strerror(errno));
ret = false;
goto out;
}
}
fdatasync(fd);
out:
close(fd);
return ret;
}
static bool fs_set_flags(int fd, int flags_to_set, int flags_to_reset)
{
int current_flags;
if (ioctl(fd, FS_IOC_GETFLAGS, ¤t_flags) < 0)
return false;
current_flags |= flags_to_set;
current_flags &= ~flags_to_reset;
if (ioctl(fd, FS_IOC_SETFLAGS, ¤t_flags) < 0)
return false;
return true;
}
static bool is_file_on_fs(const char *path, __fsword_t magic)
{
struct statfs stfs;
if (!statfs(path, &stfs))
return stfs.f_type == magic;
return false;
}
static bool create_swap_file_with_size(const char *path, off_t size)
{
int fd = open(path, O_CLOEXEC | O_WRONLY | O_CREAT, 0600);
int rc;
if (fd < 0)
return false;
/* Disabling CoW is necessary on btrfs filesystems, but issue the
* ioctl regardless of the filesystem just in case.
* More information: https://wiki.archlinux.org/index.php/btrfs#Swap_file
*/
if (!fs_set_flags(fd, FS_NOCOW_FL, 0)) {
/* Some filesystems don't support CoW (EXT4 for instance), so don't bother
* giving an error message in those cases. */
if (errno != EOPNOTSUPP)
log_info("Could not disable CoW for %s: %s. Will try setting up swap anyway.", path, strerror(errno));
}
/* Disable compression, too. */
if (!fs_set_flags(fd, FS_NOCOMP_FL, FS_COMPR_FL)) {
/* Compression is optional, too, so don't bother giving an error message in
* case the filesystem doesn't support it. */
if (errno != EOPNOTSUPP)
log_info("Could not disable compression for %s: %s. Will try setting up swap anyway.", path, strerror(errno));
}
if (is_file_on_fs(path, XFS_SUPER_MAGIC)) {
rc = 0;
} else {
rc = ftruncate(fd, size);
if (rc < 0) {
if (errno == EPERM) {
log_info("Not enough disk space to create %s with %zu MB.", path, size / MEGA_BYTES);
} else {
log_info("Could not resize %s to %ld MB: %s", path, size / MEGA_BYTES, strerror(errno));
}
}
}
close(fd);
return rc == 0;
}
static bool try_zeroing_out_with_fallocate(const char *path, off_t size)
{
int fd = open(path, O_CLOEXEC | O_WRONLY);
if (fd < 0) {
log_info("Could not open %s for writing: %s", path, strerror(errno));
return false;
}
if (fallocate(fd, 0, 0, size) < 0) {
if (errno == ENOSPC) {
log_fatal("System ran out of disk space while allocating hibernation file. It needs %zd MiB", size / MEGA_BYTES);
} else {
log_fatal("Could not allocate %s: %s", path, strerror(errno));
}
}
close(fd);
return true;
}
static bool try_vspawn_and_wait(const char *program, int n_args, va_list ap)
{
pid_t pid;
int rc;
char **argv;
argv = calloc(n_args + 2, sizeof(char *));
if (!argv) {
log_info("Couldn't allocate memory for argument array");
return false;
}
argv[0] = (char *)program;
for (int i = 1; i <= n_args; i++)
argv[i] = va_arg(ap, char *);
rc = posix_spawnp(&pid, program, NULL, NULL, argv, NULL);
free(argv);
if (rc != 0) {
log_info("Could not spawn %s: %s", program, strerror(rc));
return false;
}
log_info("Waiting for %s (pid %d) to finish.", program, pid);
int wstatus;
if (waitpid(pid, &wstatus, 0) != pid) {
log_info("Couldn't wait for %s: %s", program, strerror(errno));
return false;
}
if (!WIFEXITED(wstatus)) {
log_info("%s ended abnormally: %s", program, strerror(errno));
return false;
}
if (WEXITSTATUS(wstatus) == 127) {
log_info("Failed to spawn %s", program);
return false;
}
if (WEXITSTATUS(wstatus) != 0) {
log_info("%s ended with unexpected exit code %d", program, WEXITSTATUS(wstatus));
return false;
}
log_info("%s finished successfully.", program);
return true;
}
static bool try_spawn_and_wait(const char *program, int n_args, ...)
{
va_list ap;
bool spawned;
va_start(ap, n_args);
spawned = try_vspawn_and_wait(program, n_args, ap);
va_end(ap);
return spawned;
}
static void spawn_and_wait(const char *program, int n_args, ...)
{
va_list ap;
bool spawned;
va_start(ap, n_args);
spawned = try_vspawn_and_wait(program, n_args, ap);
va_end(ap);
if (!spawned)
log_fatal("Aborting program due to error condition when spawning %s", program);
}
static void perform_fs_specific_checks(const char *path)
{
if (is_file_on_fs(path, EXT4_SUPER_MAGIC) && is_exec_in_path("e4defrag")) {
try_spawn_and_wait("e4defrag", 1, path);
return;
}
if (is_file_on_fs(path, BTRFS_SUPER_MAGIC)) {
struct utsname utsbuf;
if (uname(&utsbuf) < 0)
log_fatal("Could not determine Linux kernel version: %s", strerror(errno));
if (utsbuf.release[1] != '.')
log_fatal("Could not parse Linux kernel version");
if (utsbuf.release[0] < '5')
log_fatal("Swap files are not supported on Btrfs running on kernel %s", utsbuf.release);
if (is_exec_in_path("btrfs"))
try_spawn_and_wait("btrfs", 3, "filesystem", "defragment", path);
return;
}
if (is_file_on_fs(path, XFS_SUPER_MAGIC) && is_exec_in_path("xfs_fsr")) {
try_spawn_and_wait("xfs_fsr", 2, "-v", path);
return;
}
}
static struct swap_file *create_swap_file(size_t needed_size)
{
log_info("Creating hibernation file at %s with %zu MB.", swap_file_name, needed_size / MEGA_BYTES);
if (!create_swap_file_with_size(swap_file_name, needed_size))
log_fatal("Could not create swap file, aborting.");
/* Allocate the swap file with the lowest I/O priority possible to not thrash workload */
ioprio_set(IOPRIO_WHO_PROCESS, 0, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 7));
log_info("Ensuring %s has no holes in it.", swap_file_name);
bool swap_on_xfs = is_file_on_fs(swap_file_name, XFS_SUPER_MAGIC);
if (swap_on_xfs || !try_zeroing_out_with_fallocate(swap_file_name, needed_size)) {
if (swap_on_xfs)
log_info("Root partition is in a XFS filesystem; need to use slower method to allocate swap file");
else
log_info("Fast method failed; trying a slower method.");
if (!try_zero_out_with_write(swap_file_name, needed_size))
log_fatal("Could not create swap file.");
}
perform_fs_specific_checks(swap_file_name);
spawn_and_wait("mkswap", 1, swap_file_name);
struct swap_file *ret = malloc(sizeof(*ret) + sizeof(swap_file_name));
if (!ret)
log_fatal("Could not allocate memory to represent a swap file: %s", strerror(errno));
ret->capacity = needed_size;
memcpy(ret->path, swap_file_name, sizeof(swap_file_name));
return ret;
}
static bool is_kernel_cmdline_correct(const char *dev_uuid, off_t resume_offset)
{
char buffer[1024];
char *line = read_first_line_from_file("/proc/cmdline", buffer);
if (!line) {
log_info("Could not read /proc/cmdline; is /proc mounted? Assuming information is incorrect.");
return false;
}
char *resume_field = NULL;
char *resume_offset_field = NULL;
char *no_console_suspend_field = NULL;
for (char *field = line; field; field = next_field(field)) {
if (!strncmp(field, "resume=", sizeof("resume=") - 1))
resume_field = field + sizeof("resume=") - 1;
else if (!strncmp(field, "resume_offset=", sizeof("resume_offset=") - 1))
resume_offset_field = field + sizeof("resume_offset=") - 1;
else if (!strncmp(field, "no_console_suspend=", sizeof("no_console_suspend=") - 1))
no_console_suspend_field = field + sizeof("no_console_suspend=") - 1;
}
if (!resume_field)
return false;
if (!resume_offset_field)
return false;
if (!no_console_suspend_field)
return false;
char full_dev_path[PATH_MAX];
int r = snprintf(full_dev_path, PATH_MAX, "/dev/disk/by-uuid/%s", dev_uuid);
if (r < 0 || r >= PATH_MAX)
return false;
if (strcmp(resume_field, full_dev_path) != 0)
return false;
char offset_buf[3 * sizeof(size_t)];
snprintf(offset_buf, sizeof(offset_buf), "%zd", resume_offset);
if (strcmp(offset_buf, resume_offset_field) != 0)
return false;
if (strcmp(no_console_suspend_field, "1") != 0)
return false;
return true;
}
static struct resume_swap_area get_swap_area(const struct swap_file *swap)
{
int fd = open(swap->path, O_RDONLY | O_CLOEXEC);
struct stat st;
if (fd < 0)
log_fatal("Could not open %s: %s", swap->path, strerror(errno));
if (fstat(fd, &st) < 0)
log_fatal("Could not stat %s: %s", swap->path, strerror(errno));
if (!S_ISREG(st.st_mode))
log_fatal("Swap file %s is not a regular file", swap->path);
uint32_t offset = get_swap_file_offset(fd);
if (offset == ~0u)
log_fatal("Could not determine file system block number for %s, or file isn't contiguous", swap->path);
close(fd);
log_info("Swap file %s is at device %ld, offset %d", swap->path, st.st_dev, offset);
return (struct resume_swap_area){
.offset = offset,
.dev = st.st_dev,
};
}
static const char *find_grub_cfg_path(void)
{
static const char *possible_paths[] = {"/boot/grub2/grub.cfg", "/boot/grub/grub.cfg", NULL};
for (int i = 0; possible_paths[i]; i++) {
if (!access(possible_paths[i], F_OK))
return possible_paths[i];
}
log_info("Could not find GRUB configuration file. Is /boot mounted?");
return NULL;
}
static bool is_directory_empty(const char *path)
{
DIR *dir = opendir(path);
struct dirent *ent;
bool has_file = false;
if (!dir)
return true;
while ((ent = readdir(dir))) {
if (!strcmp(ent->d_name, "."))
continue;
if (!strcmp(ent->d_name, ".."))
continue;
has_file = true;
break;
}
closedir(dir);
return !has_file;
}
static bool update_kernel_cmdline_params_for_grub(
const char *dev_uuid, const struct resume_swap_area swap_area, bool has_grubby, bool has_update_grub2, bool has_grub2_mkconfig)
{
bool ret_value = true;
/* Doc:
* https://docs.fedoraproject.org/en-US/fedora/rawhide/system-administrators-guide/kernel-module-driver-configuration/Working_with_the_GRUB_2_Boot_Loader/#sec-Making_Persistent_Changes_to_a_GRUB_2_Menu_Using_the_grubby_Tool
*/
log_info("Kernel command line is missing parameters to resume from hibernation. Trying to patch grub configuration file.");
char *args;
if (asprintf(&args, "resume=/dev/disk/by-uuid/%s resume_offset=%lld no_console_suspend=1", dev_uuid, swap_area.offset) < 0) {
log_info("Could not allocate memory for kernel argument");
return false;
}
if (is_exec_in_path("update-initramfs")) {
log_info("Updating initramfs to include resume stuff");
FILE *conf = fopen("/etc/initramfs-tools/conf.d/resume", "we");
if (!conf)
log_fatal("Could not open initramfs-tools configuration file: %s", strerror(errno));
fprintf(conf, "# Updated automatically by hibernation-setup-tool. Do not modify.\n");
fprintf(conf, "RESUME=UUID=%s\n", dev_uuid);
fclose(conf);
spawn_and_wait("update-initramfs", 1, "-u");
}
if (has_grubby) {
log_info("Using grubby to patch GRUB configuration");
spawn_and_wait("grubby", 3, "--update-kernel=ALL", "--args", args);
} else if (has_update_grub2 || has_grub2_mkconfig) {
FILE *resume_cfg;
char *old_contents = NULL;
const char *grub_cfg_path;
size_t old_contents_len = 0;
char buffer[1024];
if (!is_directory_empty("/etc/default/grub.d")) {
/* If we find this directory, it might be possible that some of the configuration
* files there will override GRUB_CMDLINE_LINUX_DEFAULT. This seems to be the case
* in some Azure Marketplace images, with files such as "50-cloudimg-settings.cfg".
* If we find the directory, assume we can create our own with higher priority
* instead of modifying the global configuration file. */
grub_cfg_path = "/etc/default/grub.d/99-hibernate-settings.cfg";
} else if (!access("/etc/default/grub", F_OK)) {
grub_cfg_path = "/etc/default/grub";
} else {
log_fatal("Could not determine where the Grub configuration file is");
}
resume_cfg = fopen(grub_cfg_path, "re");
if (resume_cfg) {
bool in_az_hibernate_agent_block = false;
while (fgets(buffer, sizeof(buffer), resume_cfg)) {
if (in_az_hibernate_agent_block) {
if (strstr(buffer, "# hibernation-setup-tool:end"))
in_az_hibernate_agent_block = false;
continue;
}
if (strstr(buffer, "# hibernation-setup-tool:start")) {
in_az_hibernate_agent_block = true;
continue;
}
size_t buflen = strlen(buffer);
char *tmp = realloc(old_contents, old_contents_len + buflen + 1);
if (!tmp)
log_fatal("Could not allocate memory: %s", strerror(errno));
memcpy(tmp + old_contents_len, buffer, buflen + 1);
old_contents_len += buflen;
old_contents = tmp;
}
fclose(resume_cfg);
}
resume_cfg = fopen(grub_cfg_path, "we");
if (!resume_cfg)
log_fatal("Could not open %s for writing: %s", grub_cfg_path, strerror(errno));
if (old_contents) {
fwrite(old_contents, old_contents_len, 1, resume_cfg);
free(old_contents);
}
fprintf(resume_cfg, "\n# hibernation-setup-tool:start\n");
fprintf(resume_cfg, "GRUB_CMDLINE_LINUX_DEFAULT=\"$GRUB_CMDLINE_LINUX_DEFAULT %s\"\n", args);
fprintf(resume_cfg, "unset GRUB_FORCE_PARTUUID\n");
fprintf(resume_cfg, "# hibernation-setup-tool:end\n");
fclose(resume_cfg);
if (has_update_grub2) {
log_info("Using update-grub2 to patch GRUB configuration in %s", grub_cfg_path);
spawn_and_wait("update-grub2", 0);
} else if (has_grub2_mkconfig) {
const char *grub_cfg_path = find_grub_cfg_path();
if (!grub_cfg_path) {
ret_value = false;
} else {
log_info("Using grub2-mkconfig to patch GRUB configuration in %s", grub_cfg_path);
spawn_and_wait("grub2-mkconfig", 2, "-o", grub_cfg_path);
}
}
}
free(args);
return ret_value;
}
static bool update_swap_offset(const struct swap_file *swap)
{
bool ret = true;
log_info("Updating swap offset");
int fd = open("/dev/snapshot", O_RDONLY | O_CLOEXEC);
if (fd < 0) {
log_info("Could not open /dev/snapshot: %s", strerror(errno));
return false;
}
struct resume_swap_area swap_area = get_swap_area(swap);
if (ioctl(fd, SNAPSHOT_SET_SWAP_AREA, &swap_area) < 0) {
log_info("Could not set resume_swap_area parameters in /dev/snapshot: %s", strerror(errno));
close(fd);
return false;
}
close(fd);
char *dev_uuid = get_disk_uuid_for_file_path(swap->path);
if (!dev_uuid)
log_fatal("Could not determine device UUID for swap file %s", swap->path);
log_info("Swap file %s is in device UUID %s", swap->path, dev_uuid);
if (!is_kernel_cmdline_correct(dev_uuid, swap_area.offset)) {
log_info("Kernel command-line parameters need updating.");
bool has_grubby = is_exec_in_path("grubby");
bool has_update_grub2 = is_exec_in_path("update-grub2");
bool has_grub2_mkconfig = is_exec_in_path("grub2-mkconfig");
if (has_grubby || has_update_grub2 || has_grub2_mkconfig) {
ret = update_kernel_cmdline_params_for_grub(dev_uuid, swap_area, has_grubby, has_update_grub2, has_grub2_mkconfig);
} else {
log_info(
"Could not determine how system was booted to update kernel parameters for next boot. System won't be able to resume until you fix this.");
ret = false;
}
}
free(dev_uuid);
return ret;
}
static void ensure_swap_is_enabled(const struct swap_file *swap, bool created)
{
FILE *fstab;
char *old_contents = NULL;
size_t old_contents_len = 0;
char buffer[1024];
log_info("Ensuring swap file %s is enabled", swap->path);
if (chmod(swap->path, 0600) < 0)
log_fatal("Couldn't set correct permissions on %s: %s", swap->path, strerror(errno));
if (swapon(swap->path, 0) < 0) {
if (errno == EINVAL && !created)
log_fatal("%s exists but kernel isn't accepting it as a swap file. Try removing it and re-running the agent.", swap->path);
if (errno != EBUSY)
log_fatal("Could not enable swap file: %s", strerror(errno));
}
log_info("Updating /etc/fstab");
fstab = fopen("/etc/fstab", "re");
if (!fstab)
log_fatal("Could not open fstab: %s", strerror(errno));
while (fgets(buffer, sizeof(buffer), fstab)) {
if (strstr(buffer, swap->path))
continue;
size_t buflen = strlen(buffer);
char *tmp = realloc(old_contents, old_contents_len + buflen + 1);
if (!tmp)
log_fatal("Couldn't allocate memory");
memcpy(tmp + old_contents_len, buffer, buflen + 1);
old_contents_len += buflen;
old_contents = tmp;
}
fclose(fstab);
fstab = fopen("/etc/fstab", "we");
if (!fstab)
log_fatal("Could not open fstab for writing: %s", strerror(errno));
if (old_contents) {
fwrite(old_contents, old_contents_len, 1, fstab);
free(old_contents);
}
fprintf(fstab, "\n%s\tnone\tswap\tswap\t0\t0\n", swap->path);
fclose(fstab);
}
static void ensure_udev_rules_are_installed(void)
{
char systemctl_path_buf[PATH_MAX];
const char *udev_rule_path;
const char *systemctl_path;
systemctl_path = find_executable_in_path("systemctl", NULL, systemctl_path_buf);
if (!systemctl_path) {
log_info("systemctl not found or not executable, udev rule won't work");
return;
}
if (!is_exec_in_path("udevadm")) {
log_info("udevadm has not been found in $PATH; maybe system doesn't use systemd?");
return;
}
if (!access("/usr/lib/udev/rules.d", F_OK)) {
udev_rule_path = "/usr/lib/udev/rules.d/99-vm-hibernation.rules";
} else if (!access("/etc/udev/rules.d", F_OK)) {
udev_rule_path = "/etc/udev/rules.d/99-vm-hibernation.rules";
} else if (!access("/lib/udev/rules.d", F_OK)) {
udev_rule_path = "/lib/udev/rules.d/99-vm-hibernation.rules";
} else {
log_info("Couldn't find where udev stores the rules. VM may not hibernate.");
return;
}
FILE *rule_file = fopen(udev_rule_path, "we");
if (!rule_file)
log_fatal("Could not open '%s' for writing: %s", udev_rule_path, strerror(errno));
fprintf(rule_file,
"SUBSYSTEM==\"vmbus\", ACTION==\"change\", "
"DRIVER==\"hv_utils\", ENV{EVENT}==\"hibernate\", "
"RUN+=\"%s hibernate\"\n",
systemctl_path);
fclose(rule_file);
log_info("udev rule to hibernate with systemd set up in %s. Telling udev about it.", udev_rule_path);
/* This isn't strictly necessary, but do it anyway just in case. */
spawn_and_wait("udevadm", 2, "control", "--reload-rules");
spawn_and_wait("udevadm", 1, "trigger");
}
static const char *readlink0(const char *path, char buf[static PATH_MAX])
{
ssize_t len = readlink(path, buf, PATH_MAX - 1);
if (len < 0)
return NULL;
buf[len] = '\0';
return buf;
}
static bool is_cold_boot(void)
{
/* We detect cold boot by creating a file in a tmpfs filesystem right
* before asking system to hibernate. If this file is not there during
* agent startup, then it's a cold boot and resuming failed; if it's
* still there, then we successfully resumed. */
char lock_file_path_buf[PATH_MAX];
const char *lock_file_path;
lock_file_path = readlink0(hibernate_lock_file_name, lock_file_path_buf);
if (lock_file_path) {
unlink(hibernate_lock_file_name);
if (access(lock_file_path, F_OK) < 0)
return true;
unlink(lock_file_path);
}
return false;
}
static void notify_vm_host(enum host_vm_notification notification)
{
static const char *types[] = {
[HOST_VM_NOTIFY_COLD_BOOT] = "cold-boot",
[HOST_VM_NOTIFY_HIBERNATING] = "hibernating",
[HOST_VM_NOTIFY_RESUMED_FROM_HIBERNATION] = "resumed-from-hibernation",
[HOST_VM_NOTIFY_PRE_HIBERNATION_FAILED] = "pre-hibernation-failed",
};
log_info("Changed hibernation state to: %s", types[notification]);
}
static int recursive_rmdir_cb(const char *fpath, const struct stat *st, int typeflag, struct FTW *ftwbuf)
{
(void)st;
(void)ftwbuf;
switch (typeflag) {
case FTW_SLN: /* symbolic link pointing to a non-existing file */
case FTW_F: /* regular file */
return unlink(fpath) == 0 ? FTW_CONTINUE : FTW_STOP;
case FTW_D: /* directory */
return rmdir(fpath) == 0 ? FTW_CONTINUE : FTW_STOP;
case FTW_SL:
/* We refuse to follow symbolic links as it might lead us to some directory outside
* the one we're trying to remove. The only symlinks we care about are those that
* point to a path that does not exist, but we delete those before doing anything
* anyway. */
log_info("%s is a symbolic link. Refusing to follow.", fpath);
return FTW_STOP;
case FTW_DNR:
log_info("Can't read directory %s", fpath);
return FTW_STOP;
case FTW_NS:
log_info("stat() failed on %s", fpath);
return FTW_STOP;
default:
log_info("nftw callback called with unknown typeflag %d, stopping", typeflag);
return FTW_STOP;
}
}
static bool recursive_rmdir(const char *path) { return nftw(path, recursive_rmdir_cb, 16, FTW_DEPTH | FTW_PHYS | FTW_ACTIONRETVAL) != FTW_STOP; }
static int handle_pre_systemd_suspend_notification(const char *action)
{
if (!strcmp(action, "hibernate")) {
log_info("Running pre-hibernate hooks");
/* Creating this directory with the right permissions is racy as
* we're writing to tmp which is world-writable. So do our best
* here to ensure that if this for loop terminates normally, the
* directory is empty and readable/writable only by us. In normal
* scenarios (i.e. nobody else tried creating a directory with that
* name), this will succeed the first try. This directory will be
* removed when we resume. */
for (int try = 0;; try++) {
if (try > 10) {
notify_vm_host(HOST_VM_NOTIFY_PRE_HIBERNATION_FAILED);
log_fatal("Tried too many times to create /tmp/hibernation-setup-tool and failed. Giving up");
}
if (!mkdir("/tmp/hibernation-setup-tool", 0700))
break;
if (errno != EEXIST) {
notify_vm_host(HOST_VM_NOTIFY_PRE_HIBERNATION_FAILED);
log_fatal("Couldn't create location to store hibernation agent state: %s", strerror(errno));
}
struct stat st;
if (!stat("/tmp/hibernation-setup-tool", &st)) {
if (S_ISDIR(st.st_mode)) {
log_info("/tmp/hibernation-setup-tool exists, removing it");
if (umount2("/tmp/hibernation-setup-tool", UMOUNT_NOFOLLOW | MNT_DETACH) < 0) {
/* See comment related to the umount2() call in handle_post_systemd_suspend_notification(). */
if (errno != EINVAL)
log_fatal("Error while unmounting /tmp/hibernation-setup-tool: %s", strerror(errno));
}
if (recursive_rmdir("/tmp/hibernation-setup-tool"))
continue;
notify_vm_host(HOST_VM_NOTIFY_PRE_HIBERNATION_FAILED);
log_fatal("Couldn't remove /tmp/hibernation-setup-tool directory before proceeding");
}
log_info("/tmp/hibernation-setup-tool exists and isn't a directory! Removing it and trying again (try %d)", try);
if (!unlink("/tmp/hibernation-setup-tool"))
continue;
notify_vm_host(HOST_VM_NOTIFY_PRE_HIBERNATION_FAILED);
log_fatal("Couldn't remove the file: %s, giving up", strerror(errno));
}
log_info("/tmp/hibernation-setup-tool couldn't be found but mkdir() told us it exists, trying again (try %d)", try);
continue;
}
if (!is_file_on_fs("/tmp/hibernation-setup-tool", TMPFS_MAGIC)) {
log_info("/tmp isn't a tmpfs filesystem; trying to mount /tmp/hibernation-setup-tool as such");
if (mount("tmpfs", "/tmp/hibernation-setup-tool", "tmpfs", 0, NULL) < 0) {
notify_vm_host(HOST_VM_NOTIFY_PRE_HIBERNATION_FAILED);
log_fatal("Couldn't mount temporary filesystem: %s. We need this to detect cold boots!", strerror(errno));
}
}
char pattern[] = "/tmp/hibernation-setup-tool/hibernatedXXXXXX";
int fd = mkostemp(pattern, O_CLOEXEC);
if (fd < 0) {
notify_vm_host(HOST_VM_NOTIFY_PRE_HIBERNATION_FAILED);
log_fatal("Couldn't create a temporary file: %s", strerror(errno));
}
close(fd);
if (unlink(hibernate_lock_file_name) < 0) {
if (errno != EEXIST) {
notify_vm_host(HOST_VM_NOTIFY_PRE_HIBERNATION_FAILED);
log_fatal("Couldn't remove %s: %s", hibernate_lock_file_name, strerror(errno));
}
}
if (link(pattern, hibernate_lock_file_name) < 0) {
notify_vm_host(HOST_VM_NOTIFY_PRE_HIBERNATION_FAILED);
log_fatal("Couldn't link %s to %s: %s", pattern, hibernate_lock_file_name, strerror(errno));
}
notify_vm_host(HOST_VM_NOTIFY_HIBERNATING);
log_info("Pre-hibernation hooks executed successfully");
return 0;
}
log_fatal("Can't handle `pre %s' notifications", action);
return 1;
}
static int handle_post_systemd_suspend_notification(const char *action)
{
if (!strcmp(action, "hibernate")) {
char real_path_buf[PATH_MAX];
const char *real_path;
log_info("Running post-hibernate hooks");
real_path = readlink0(hibernate_lock_file_name, real_path_buf);
if (!real_path) {
/* No need to notify host VM here: if link wasn't there, it's most likely that the
* pre-hibernation hooks failed to create it in the first place. Log for debugging
* but keep going. */
log_info("This is probably fine, but couldn't readlink(%s): %s", hibernate_lock_file_name, strerror(errno));
} else if (unlink(real_path) < 0) {
log_info("This is fine, but couldn't remove %s: %s", real_path, strerror(errno));
}
if (unlink(hibernate_lock_file_name) < 0)
log_info("This is fine, but couldn't remove %s: %s", hibernate_lock_file_name, strerror(errno));
if (umount2("/tmp/hibernation-setup-tool", UMOUNT_NOFOLLOW | MNT_DETACH) < 0) {
/* EINVAL is returned if this isn't a mount point. This is normal if /tmp was already
* a tmpfs and we didn't create and mounted this directory in the pre-hibernate hook.
* Calling umount2() is cheaper than checking if this directory is indeed mounted as
* a tmpfs directory. */
if (errno != EINVAL)
log_info("While unmounting /tmp/hibernation-setup-tool: %s", strerror(errno));
}
if (!recursive_rmdir("/tmp/hibernation-setup-tool"))
log_info("While removing /tmp/hibernation-setup-tool: %s", strerror(errno));
notify_vm_host(HOST_VM_NOTIFY_RESUMED_FROM_HIBERNATION);
log_info("Post-hibernation hooks executed successfully");
return 0;
}
log_fatal("Can't handle `post %s' notifications", action);
return 1;
}
static int handle_systemd_suspend_notification(const char *argv0, const char *when, const char *action)
{
if (!getenv("SYSTEMD_SLEEP_ACTION")) {
log_fatal("These arguments can only be used when called from systemd");
return 1;
}
log_needs_prefix = true;
log_needs_syslog = true;
if (!strcmp(when, "pre"))
return handle_pre_systemd_suspend_notification(action);
if (!strcmp(when, "post"))
return handle_post_systemd_suspend_notification(action);
log_fatal("Invalid usage: %s %s %s", argv0, when, action);
return 1;
}
static void link_hook(const char *src, const char *dest)
{
if (link(src, dest) < 0) {
if (errno != EEXIST)
return log_fatal("Couldn't link %s to %s: %s", src, dest, strerror(errno));
}
log_info("Notifying systemd of new hooks");
spawn_and_wait("systemctl", 1, "daemon-reload");
}
static void ensure_systemd_hooks_are_set_up(void)
{
/* Although the systemd manual cautions against dropping executables or scripts in
* this directory, the proposed D-Bus interface (Inhibitor) is not sufficient for
* our use case here: we're not trying to inhibit hibernation/suspend, we're just
* trying to know when this happened.
*
* More info: https://www.freedesktop.org/software/systemd/man/systemd-suspend.service.html
*/
const char *execfn = (const char *)getauxval(AT_EXECFN);
const char *location_to_link = "/usr/lib/systemd/systemd-sleep";
struct stat st;
int r;
r = stat(location_to_link, &st);
if (r < 0 && errno == ENOENT) {
log_info("Attempting to create hibernate/resume hook directory: %s", location_to_link);
if (mkdir(location_to_link, 0755) < 0) {
log_info("Couldn't create %s: %s. VM host won't receive suspend/resume notifications.", location_to_link, strerror(errno));
return;
}
} else if (r < 0) {
log_info("Couldn't stat(%s): %s. We need to drop a file there to allow "
"the VM host to be notified of hibernation/resumes.",
location_to_link, strerror(errno));
return;
} else if (!S_ISDIR(st.st_mode)) {
log_info("%s isn't a directory, can't drop a link to the agent there to "
" notify host of hibernation/resume",
location_to_link);
return;
}
if (execfn)
return link_hook(execfn, location_to_link);
char self_path_buf[PATH_MAX];
const char *self_path = readlink0("/proc/self/exe", self_path_buf);
if (self_path)
return link_hook(self_path, location_to_link);
return log_fatal("Both getauxval() and readlink(/proc/self/exe) failed. "
"Couldn't determine location of this executable to install "
"systemd hooks");
}
int main(int argc, char *argv[])
{
if (geteuid() != 0) {
log_fatal("This program has to be executed with superuser privileges.");
return 1;
}
if (!is_hibernation_enabled_for_vm()) {
log_info("Hibernation not enabled for this VM.");
return 0;
}
if (is_hyperv()) {
/* We only handle these things here on Hyper-V VMs because it's the only
* hypervisor we know that might need these kinds of notifications. */
if (argc == 3)
return handle_systemd_suspend_notification(argv[0], argv[1], argv[2]);
if (is_cold_boot())
notify_vm_host(HOST_VM_NOTIFY_COLD_BOOT);
}
size_t total_ram = physical_memory();
if (!total_ram)
log_fatal("Could not obtain memory total from this computer");
size_t needed_swap = swap_needed_size(total_ram);
log_info("System has %zu MB of RAM; needs a swap area of %zu MB", total_ram / MEGA_BYTES, needed_swap / MEGA_BYTES);
struct swap_file *swap = find_swap_file(needed_swap);
if (swap) {
log_info("Swap file found with size %zu MB at %s", swap->capacity / MEGA_BYTES, swap->path);
} else {
log_info("Swap file not found");
}
if (swap && swap->capacity < needed_swap) {
log_info("Swap file %s has capacity of %zu MB but needs %zu MB. Recreating. "
"System will run without a swap file while this is being set up.",
swap->path, swap->capacity / MEGA_BYTES, needed_swap / MEGA_BYTES);
if (swapoff(swap->path) < 0) {
if (errno == EINVAL) {
log_info("%s is not currently being used as a swap partition. That's OK.", swap->path);
} else {
log_fatal("Could not disable swap file %s: %s", swap->path, strerror(errno));
}
}
if (unlink(swap->path) < 0) {
/* If we're trying to remove the file but it's not there anymore,
* that's fine... no need to error out. */
if (!access(swap->path, F_OK))
log_fatal("Could not remove swap file %s: %s", swap->path, strerror(errno));
}
free(swap);
swap = NULL;
}
bool created = false;
if (!swap) {
log_info("Creating swap file with %zu MB", needed_swap / MEGA_BYTES);
swap = create_swap_file(needed_swap);
if (!swap)
log_fatal("Could not create swap file");
created = true;
}
ensure_swap_is_enabled(swap, created);
if (!update_swap_offset(swap))
log_fatal("Could not update swap offset.");
if (is_hyperv()) {
ensure_udev_rules_are_installed();
ensure_systemd_hooks_are_set_up();
}
log_info("Swap file for VM hibernation set up successfully");
free(swap);
return 0;
}
|
import argparse
import os
import shutil
def main():
parser = argparse.ArgumentParser(description='File handling utility')
parser.add_argument('source', help='Path to the source file')
parser.add_argument('destination', help='Path to the destination file')
parser.add_argument('--overwrite', action='store_true', help='Overwrite the destination file if it already exists')
args = parser.parse_args()
source_path = args.source
destination_path = args.destination
overwrite = args.overwrite
if not os.path.exists(source_path):
print(f"Error: Source file '{source_path}' does not exist.")
return
if os.path.exists(destination_path) and not overwrite:
print(f"Destination file '{destination_path}' already exists. Use --overwrite option to replace it.")
return
try:
shutil.copyfile(source_path, destination_path)
print(f"File '{source_path}' successfully copied to '{destination_path}'.")
except Exception as e:
print(f"An error occurred while copying the file: {e}")
if __name__ == "__main__":
main()
|
require File.expand_path('../../../spec_helper', __FILE__)
require File.expand_path('../shared/succ', __FILE__)
describe "Symbol#succ" do
it_behaves_like :symbol_succ, :succ
end
|
class SqlInterface:
def __init__(self, database):
self.database = database
def query(self, query_string):
# Create and open a connection to the specified database
conn = sqlite3.connect(self.database)
# Create a cursor
c = conn.cursor()
# Execute the sql statement
c.execute(query_string)
# Retrieve the results of the query
results = c.fetchall()
# Close the connection
conn.close()
# Return the results
return results
|
<gh_stars>0
package com.trackorjargh.javaclass;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import javax.persistence.ElementCollection;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.OneToMany;
import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder;
import com.fasterxml.jackson.annotation.JsonView;
@Entity
public class User {
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
@JsonView(BasicInformation.class)
private Long id;
public interface BasicInformation {}
public interface NameUserInfo {}
public interface ActiveInformation {}
@JsonView({BasicInformation.class, NameUserInfo.class})
private String name;
private String password;
@JsonView(BasicInformation.class)
private String email;
@JsonView(BasicInformation.class)
private String image;
@JsonView({BasicInformation.class, ActiveInformation.class})
private boolean activatedUser;
@ElementCollection(fetch = FetchType.EAGER)
@JsonView(BasicInformation.class)
private List<String> roles;
@OneToMany(mappedBy="user")
private List<Lists> lists = new LinkedList<>();
public User() {
}
public User(String name, String password, String email, String image, boolean activatedUser, String... roles) {
this.name = name;
this.password = new <PASSWORD>PasswordEncoder().encode(password);
this.email = email;
this.image = image;
this.activatedUser = activatedUser;
this.roles = new ArrayList<>(Arrays.asList(roles));
}
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = new BCryptPasswordEncoder().encode(password);
}
public void setPasswordCodificate(String password) {
this.password = password;
}
public String getEmail() {
return email;
}
public void setEmail(String email) {
this.email = email;
}
public String getImage() {
return image;
}
public void setImage(String image) {
this.image = image;
}
public List<Lists> getLists() {
return lists;
}
public void setLists(List<Lists> lists) {
this.lists = lists;
}
public List<String> getRoles() {
return roles;
}
public void setRoles(List<String> roles) {
this.roles = roles;
}
public boolean isActivatedUser() {
return activatedUser;
}
public void setActivatedUser(boolean activatedUser) {
this.activatedUser = activatedUser;
}
}
|
import { useCreateQueryConfig, useMutation } from '@sarair/shared/hooks'
import { sarairRequest } from '@sarair/shared/request'
import { useTaskListQueryKey } from '.'
import type { Task } from '../../types'
export const useTaskCreate = () => {
const queryKey = useTaskListQueryKey()
const { isLoading, mutateAsync: create } = useMutation(
(params?: Partial<Task>) => sarairRequest.post('tasks', params),
useCreateQueryConfig(queryKey)
)
return {
isLoading,
methods: { create }
}
}
|
/* eslint-disable-next-line import/prefer-default-export,@typescript-eslint/no-explicit-any */
export type EnvOrDefault = (variableName: string, defaultValue?: any) => any;
|
#!/bin/bash
trap "echo exit;exit 0" SIGINT
apt-get install expect -y
echo "*/admin *" >> /etc/krb5kdc/kadm5.acl
yes CONTINUUM | krb5_newrealm
echo '
spawn kadmin.local
expect "kadmin.local:"
send "ank admin/admin\r"
expect "Enter password for principal"
send "CONTINUUM\r"
expect "Re-enter password for principal"
send "CONTINUUM\r"
expect "kadmin.local:"
exit 1' > cmd
expect cmd
yes CONTINUUM | kinit admin/admin
yes CONTINUUM | kadmin addprinc -pw CONTINUUM hdfs/kerb
yes CONTINUUM | kadmin addprinc -pw CONTINUUM yarn/kerb
yes CONTINUUM | kadmin addprinc -pw CONTINUUM http/kerb
yes CONTINUUM | kadmin addprinc -pw CONTINUUM user
yes CONTINUUM | kadmin xst -k hdfs-unmerged.keytab hdfs/kerb
yes CONTINUUM | kadmin xst -k yarn-unmerged.keytab yarn/kerb
yes CONTINUUM | kadmin xst -k http.keytab http/kerb
echo '
spawn ktutil
expect "ktutil"
send "rkt hdfs-unmerged.keytab\r"
expect "ktutil"
send "rkt http.keytab\r"
expect "ktutil"
send "wkt hdfs.keytab\r"
expect "ktutil"
send "clear\r"
expect "ktutil"
send "rkt yarn-unmerged.keytab\r"
expect "ktutil"
send "rkt http.keytab\r"
expect "ktutil"
send "wkt yarn.keytab\r"
exit 1' > cmd2
expect cmd2
mv *keytab /opt/hadoop/etc/hadoop
service ssh restart
ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
ssh-keyscan -H localhost >> ~/.ssh/known_hosts
ssh-keyscan -H 0.0.0.0 >> ~/.ssh/known_hosts
echo 'export JAVA_HOME='$JAVA_HOME | cat - /opt/hadoop/etc/hadoop/hadoop-env.sh > temp
rm /opt/hadoop/etc/hadoop/hadoop-env.sh
mv temp /opt/hadoop/etc/hadoop/hadoop-env.sh
chmod 0600 ~/.ssh/authorized_keys
start-dfs.sh
start-yarn.sh
hdfs dfs -mkdir /tmp
hdfs dfs -chmod 777 /tmp
while :
do
sleep 1
done
|
import {useEffect, useState} from "react";
export const useIsMobileView = (): boolean => {
const [, update] = useState();
useEffect(() => {
const listener = () => update({});
window.addEventListener("resize", listener);
return () => window.removeEventListener("resize", listener);
});
return window.innerWidth < 700 || window.innerHeight < 600; //TODO: boundaries to be decided
};
|
#!/usr/bin/env bash
set -e -o pipefail
# shellcheck source=_config.sh
source "$(dirname "${BASH_SOURCE[0]}")/_config.sh"
cd "$ROOT"
cd "$DEV_FIXTURES_SERVER_ROOT"
echo "launching fixtures server for development (silent mode) in '$DEV_FIXTURES_SERVER_ROOT'"
set -x
python -m http.server "$DEV_FIXTURES_SERVER_PORT" >/dev/null 2>&1
|
<filename>message_bridge/jenkins_message_source.py
#!/usr/bin/python
import subprocess
import json
import base64
import uuid
import os
import redis
class RedisQueue(object):
"""Simple Queue with Redis Backend"""
def __init__(self, name, namespace='queue', **redis_kwargs):
"""The default connection parameters are: host='localhost', port=6379, db=0"""
self.__db = redis.Redis(**redis_kwargs)
self.key = '%s:%s' %(namespace, name)
def qsize(self):
"""Return the approximate size of the queue."""
return self.__db.llen(self.key)
def empty(self):
"""Return True if the queue is empty, False otherwise."""
return self.qsize() == 0
def put(self, item):
"""Put item into the queue."""
self.__db.rpush(self.key, item)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args block is true and timeout is None (the default), block
if necessary until an item is available."""
if block:
item = self.__db.blpop(self.key, timeout=timeout)
else:
item = self.__db.lpop(self.key)
if item:
item = item[1]
return item
def get_nowait(self):
"""Equivalent to get(False)."""
return self.get(False)
q = RedisQueue('messages', namespace='ansible', host='10.0.1.4', port=6379, db=0)
response_id = str(uuid.uuid4())
q.put(json.dumps({
'job': 'build',
'build_id': 123,
'call_id': 1234,
'action': 'run_playbook',
'playbook': 'deploy',
'response_id': response_id,
'environment': 'qa',
'service': 'configuration-ms',
'image': 'srkay-on.azurecr.io/services/configuration-ms',
'git_commit': os.environ['GIT_COMMIT'],
'port': 3031,
#extra vars
'alpha': 1,
'beta': 2
}))
q = RedisQueue(response_id, namespace='ansible', host='10.0.1.4', port=6379, db=0)
while True:
log = q.get()
message = json.loads(log)
if message['type'] == 'end':
break
else:
print message['payload']
|
# generate_exceRpt_lsf_sub_v5.sh
# Dan Spakowicz
# 15 Jan 2017
# This script generates the submission scripts for all of the data
OUTPATH=$DEF_DIR
for D in `find Sample* -type d ` ; do
echo '#BSUB -M 96000' >> $OUTPATH/${D}_exceRpt.sh
echo '#BSUB -R "span[hosts=1]"' >> $OUTPATH/${D}_exceRpt.sh
echo "#BSUB -n 20" >> $OUTPATH/${D}_exceRpt.sh
echo "#BSUB -q gerstein" >> $OUTPATH/${D}_exceRpt.sh
echo "#BSUB -J ${D}" >> $OUTPATH/${D}_exceRpt.sh
echo "" >> $OUTPATH/${D}_exceRpt.sh
echo "# Produced by generate_exceRpt_lsf_sub_v5.sh" >> $OUTPATH/${D}_exceRpt.sh
echo "# Dan Spakowicz" >> $OUTPATH/${D}_exceRpt.sh
echo "# 15 Jan 2017" >> $OUTPATH/${D}_exceRpt.sh
echo "# Submission file for bulkRNAseq data on lsf with exceRpt. " >> $OUTPATH/${D}_exceRpt.sh
echo "" >> $OUTPATH/${D}_exceRpt.sh
echo "PIPELINE=/project/fas/gerstein/tg397/exceRpt_longRNA_dev_test_DS/new/exceRpt_longRNA_dev" >> $OUTPATH/${D}_exceRpt.sh
echo "make -f \$PIPELINE \\" >> $OUTPATH/${D}_exceRpt.sh
echo " EXE_DIR=/gpfs/scratch/fas/gerstein/rrk24/bin/smallRNAPipeline \\" >> $OUTPATH/${D}_exceRpt.sh
echo " N_THREADS=20 \\" >> $OUTPATH/${D}_exceRpt.sh
echo " ADAPTER_SEQ=none \\" >> $OUTPATH/${D}_exceRpt.sh
echo " OUTPUT_DIR=/project/fas/gerstein/djs88/chupp/bulkCellRNAseq/map_exogenous/Processed \\" >> $OUTPATH/${D}_exceRpt.sh
echo " MAIN_ORGANISM_GENOME_ID=hg38 \\" >> $OUTPATH/${D}_exceRpt.sh
echo " MIN_READ_LENGTH=20 \\" >> $OUTPATH/${D}_exceRpt.sh
echo " MAP_EXOGENOUS=on \\" >> $OUTPATH/${D}_exceRpt.sh
echo " JAVA_RAM=90G \\" >> $OUTPATH/${D}_exceRpt.sh
echo " REMOVE_LARGE_INTERMEDIATE_FILES=true \\" >> $OUTPATH/${D}_exceRpt.sh
echo " INPUT_FILE_PATH_R1=/project/fas/gerstein/djs88/chupp/bulkCellRNAseq/fastq/${D}/${D}.fq.gz" >> $OUTPATH/${D}_exceRpt.sh ;
done
cd $OUTPATH
chmod u+x *
|
#!/bin/bash
set -eu
source $(cd $(dirname $0) && pwd)/helpers.sh
REPO_PATH=$1
${VMTEST_ROOT}/checkout_latest_kernel.sh ${REPO_PATH}
cd ${REPO_PATH}
if [[ "${KERNEL}" = 'LATEST' ]]; then
travis_fold start build_kernel "Kernel build"
cp ${VMTEST_ROOT}/configs/latest.config .config
make -j $((4*$(nproc))) olddefconfig all
travis_fold end build_kernel
fi
|
def foo(my_list):
return [item for item in my_list if item % 2 == 0]
|
#!/usr/bin/env bash
#/ --update_script.sh--
#/ Pulls changes from remote main branch and then updates the local python package
#/
#/ Usage: update_script.sh [options]
#/
#/ Options
#/ -s|--skip-deps Skips update of dependencies.
#/ -v|--version Prints script name & version.
#/
# Calls the update_script file at whatever path the py-package-manager repo is in
PPM_PATH=../py-package-manager/update_script.sh
if [[ ! -z "${PPM_PATH}" ]]
then
ADDL="${@}" # Option for passing additional commands onwards
sh ${PPM_PATH} --config ./config.py ${ADDL}
else
echo -e "The py-package-manager repo is not in the expected path: ${PPM_PATH}\nAborting process..." && exit 1
fi
|
<filename>src/components/home/Product.tsx
import * as React from 'react';
import styled from 'styled-components';
import { ProductType } from '../../types';
import Img from 'gatsby-image';
import { StyledBtn } from '../styles/Buttons';
interface Props {
product: ProductType;
}
const StyledProduct = styled.section`
/* padding: 1rem; */
transition: ${({ theme }) => theme.transition.mainTransition};
position: relative;
box-shadow: ${({ theme }) => theme.shadow.lightShadow};
&:hover {
box-shadow: ${({ theme }) => theme.shadow.darkShadow};
&::after {
content: '';
background: rgb(2, 0, 36);
background: linear-gradient(
90deg,
rgba(2, 0, 36, 0.6671043417366946) 16%,
rgba(1, 9, 6, 0.639093137254902) 47%,
rgba(121, 56, 14, 0.5998774509803921) 100%
);
position: absolute;
top: 0;
left: 0;
height: 100%;
width: 100%;
}
.body {
display: flex;
flex-direction: column;
position: absolute;
top: 50%;
left: 50%;
width: 70%;
transform: translate(-50%, -50%);
background: ${({ theme }) => theme.colors.blackShadow};
padding: 1.5rem 2rem;
color: ${({ theme }) => theme.colors.white};
font-size: 2.2rem;
z-index: 3;
h4 {
margin-left: auto;
}
button {
width: 100%;
&:hover {
border: 2px solid ${({ theme }) => theme.colors.white};
background: ${({ theme }) => theme.colors.black};
color: ${({ theme }) => theme.colors.white};
}
}
}
}
.small-price {
display: none;
transition: ${({ theme }) => theme.transition.mainTransition};
}
.body {
display: none;
}
@media (max-width: 975px) {
.body {
h3,
h4 {
font-size: 1.2rem;
}
height: 10rem;
}
}
@media (max-width: 730px) {
.body {
width: 100% !important;
h3,
h4 {
font-size: 0.9rem;
}
height: 10rem;
button {
font-size: 1rem;
}
}
}
@media (max-width: 520px) {
position: relative;
&:hover {
.small-price {
display: inline-block;
display: flex;
justify-content: space-between;
h3,
h4 {
z-index: 5;
position: relative;
color: ${({ theme }) => theme.colors.white};
font-size: 1em;
padding: 0.3em;
}
}
}
.body {
h3,
h4 {
display: none;
}
background: none !important;
button {
font-size: 0.8rem;
padding: 0 0.5rem;
position: absolute;
top: 40%;
left: 50%;
transform: translate(-50%, 0);
/* display: none; */
}
}
}
`;
const Product: React.FC<Props> = ({ product }) => {
return (
<StyledProduct>
{' '}
<div className="head">
<Img fluid={product.image.fluid} />
</div>
<div className="body">
<h3>{product.title}</h3>
<h4>{product.price}$</h4>
<StyledBtn
className="snipcart-add-item"
data-item-id={product.id}
data-item-price={product.price}
data-item-image={product.image.fluid.src}
data-item-name={product.title}
data-item-url="https://marcell-ciszek-happy-caffe.netlify.com/"
data-item-description="Happy caffee"
>
Add to Cart
</StyledBtn>
</div>
<div className="small-price">
<h3>{product.title}</h3>
<h4>{product.price}$</h4>
</div>
</StyledProduct>
);
};
export default Product;
|
import { FileReader } from '../../utils';
import { Challenge } from '../../utils/Challenge';
export default class Challenge28 implements Challenge {
private readonly input: string[] = new FileReader(14).getFileAsArray().filter((line) => line !== '');
private template: string[] = [];
private occurrences: { [key: string]: number } = {};
private insertionRules: { [key: string]: string } = {};
private charCount: { [key: string]: number } = {};
constructor() {
for (let line of this.input) {
if (line.includes(' -> ')) {
// line is rule
let rule: [string, string] = line.split(' -> ') as [string, string];
this.insertionRules[rule[0]] = rule[1];
this.occurrences[rule[0]] = 0;
} else {
// line is template
this.template = line.split('');
}
}
// initialCharCount
for (let char of this.template) {
this.charCount[char] = (this.charCount[char] || 0) + 1;
}
this.parseTemplateAndAddToOcurrences(this.template);
}
parseTemplateAndAddToOcurrences(template: string[], multiplier: number = 1) {
for (let index = 0; index < template.length - 1; index += 1) {
const searchIndex = template[index] + template[index + 1];
this.occurrences[searchIndex] = (this.occurrences[searchIndex] || 0) + multiplier;
}
}
executeRulesOntoPolymers(): void {
const occurrences = Object.keys(this.occurrences).filter((item: string) => this.occurrences[item] > 0);
const occurrencesCopy: { [key: string]: number } = { ...this.occurrences };
this.occurrences = {};
for (let occurrence of occurrences) {
const insert = this.insertionRules[occurrence];
const newTempalte = [occurrence[0], insert, occurrence[1]];
const multiplier = occurrencesCopy[occurrence];
this.charCount[insert] = (this.charCount[insert] || 0) + multiplier;
this.parseTemplateAndAddToOcurrences(newTempalte, multiplier);
}
}
calculateElements(): number {
const counts = Object.values(this.charCount).sort((a, b) => a - b);
const most = counts.pop();
const least = counts.shift();
return most - least;
}
solve(): number {
const numOfSteps = 40;
for (let step = 0; step < numOfSteps; step += 1) {
this.executeRulesOntoPolymers();
}
return this.calculateElements();
}
}
|
#!/usr/bin/env bash
# Copyright 2020 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
export GO111MODULE=on
source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/library.sh
CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${REPO_ROOT_DIR}; ls -d -1 $(dirname $0)/../vendor/k8s.io/code-generator 2>/dev/null || echo ../../../k8s.io/code-generator)}
KNATIVE_CODEGEN_PKG=${KNATIVE_CODEGEN_PKG:-$(cd ${REPO_ROOT_DIR}; ls -d -1 $(dirname $0)/../vendor/knative.dev/pkg 2>/dev/null || echo ../pkg)}
chmod +x ${CODEGEN_PKG}/generate-groups.sh
chmod +x ${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh
# Just Sources
API_DIRS_SOURCES=(source/pkg )
for DIR in "${API_DIRS_SOURCES[@]}"; do
# generate the code with:
# --output-base because this script should also be able to run inside the vendor dir of
# k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir
# instead of the $GOPATH directly. For normal projects this can be dropped.
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
"knative.dev/eventing-rabbitmq/${DIR}/client" "knative.dev/eventing-rabbitmq/${DIR}/apis" \
"sources:v1alpha1" \
--go-header-file ${REPO_ROOT_DIR}/hack/boilerplate.go.txt
# Knative Injection
${KNATIVE_CODEGEN_PKG}/hack/generate-knative.sh "injection" \
"knative.dev/eventing-rabbitmq/${DIR}/client" "knative.dev/eventing-rabbitmq/${DIR}/apis" \
"sources:v1alpha1" \
--go-header-file ${REPO_ROOT_DIR}/hack/boilerplate.go.txt
done
# Depends on generate-groups.sh to install bin/deepcopy-gen
${GOPATH}/bin/deepcopy-gen \
-O zz_generated.deepcopy \
--go-header-file ${REPO_ROOT_DIR}/hack/boilerplate.go.txt \
-i knative.dev/eventing-rabbitmq/source/pkg/apis
# Make sure our dependencies are up-to-date
${REPO_ROOT_DIR}/hack/update-deps.sh
|
def lis(input_array):
# Length of input Array
n = len(input_array)
# Declare the list (array) for storing the increasing subsequence
lis = [1]*n
# Compute longest increasing subsequence values in bottom up manner
for i in range (1 , n):
for j in range(0 , i):
if input_array[i] > input_array[j] and lis[i]< lis[j] + 1 :
lis[i] = lis[j]+1
# Initialize maximum to 0 to get the maximum of all longest increasing subsequence
maximum = 0
# Pick maximum of all LIS values
for i in range(n):
maximum = max(maximum , lis[i])
return maximum
input_array = [10, 22, 9, 33, 21, 50, 41, 60]
longest_lis = lis(input_array)
print("The length of longest increasing subsequence is:", longest_lis)
|
# part of thin-farm.eye config
def thin(proxy, port)
name = "thin-#{port}"
opts = [
'-l thins.log',
"-p #{port}",
"-P #{name}.pid",
'-d',
'-R thin.ru',
"--tag #{proxy.app.name}.#{proxy.name}",
'-t 60',
"-e #{proxy.env['RAILS_ENV']}",
"-c #{proxy.working_dir}",
'-a 127.0.0.1'
]
proxy.process(name) do
pid_file "#{name}.pid"
start_command "#{BUNDLE} exec thin start #{opts * ' '}"
stop_signals [:QUIT, 2.seconds, :TERM, 1.seconds, :KILL]
stdall 'thin.stdall.log'
check :http, url: "http://127.0.0.1:#{port}/hello", pattern: /World/,
every: 5.seconds, times: [2, 3], timeout: 1.second
end
end
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3379-1
#
# Security announcement date: 2015-10-25 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:35 UTC
#
# Operating System: Debian 8 (Jessie)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - miniupnpc:1.9.20140610-2+deb8u1
# - libminiupnpc10:1.9.20140610-2+deb8u1
# - libminiupnpc-dev:1.9.20140610-2+deb8u1
# - python-miniupnpc:1.9.20140610-2+deb8u1
#
# Last versions recommanded by security team:
# - miniupnpc:1.9.20140610-2+deb8u1
# - libminiupnpc10:1.9.20140610-2+deb8u1
# - libminiupnpc-dev:1.9.20140610-2+deb8u1
# - python-miniupnpc:1.9.20140610-2+deb8u1
#
# CVE List:
# - CVE-2015-6031
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade miniupnpc=1.9.20140610-2+deb8u1 -y
sudo apt-get install --only-upgrade libminiupnpc10=1.9.20140610-2+deb8u1 -y
sudo apt-get install --only-upgrade libminiupnpc-dev=1.9.20140610-2+deb8u1 -y
sudo apt-get install --only-upgrade python-miniupnpc=1.9.20140610-2+deb8u1 -y
|
<filename>db/migrate/20151203001139_make_organisation_content_ids_not_null.rb<gh_stars>1-10
class MakeOrganisationContentIdsNotNull < ActiveRecord::Migration[5.0]
def change
change_column :organisations, :content_id, :string, limit: 255, null: false
end
end
|
//#####################################################################
// Copyright 2004. <NAME>, <NAME>.
// This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt.
//#####################################################################
#ifndef COMPILE_WITHOUT_READ_WRITE_SUPPORT
#include <PhysBAM_Tools/Parsing/PARAMETER_LIST_CONTAINER.h>
#include <PhysBAM_Tools/Parsing/STRING_UTILITIES.h>
#include <PhysBAM_Tools/Read_Write/Utilities/FILE_UTILITIES.h>
#include <fstream>
using namespace PhysBAM;
//#####################################################################
// Get_Next_Parameter_List
//#####################################################################
bool PARAMETER_LIST_CONTAINER::
Get_Next_Parameter_List(std::string& identifier,PARAMETER_LIST& list)
{
list.Clear();
identifier="";std::string rest;
while(true){ // Find opening brace
std::string::size_type start=partial_line.find("{");
if(start==std::string::npos){
identifier+=partial_line;++line;
if(line==lines.end()){STRING_UTILITIES::Strip_Whitespace(identifier);if(identifier=="")goto parse_done;else goto parse_error;}
partial_line=*line;}
else {identifier+=partial_line.substr(0,start);rest=partial_line.substr(start+1);break;}}
{std::string data;std::string::size_type end;
while((end=rest.find("}"))==std::string::npos){ //accumulate data until we find closing brace to end block
data+=rest+"\n";++line;if(line==lines.end())goto parse_error;else rest=*line;}
data+=rest.substr(0,end);partial_line=rest.substr(end+1);
std::istringstream string_stream(data.c_str());
list.Read(string_stream);
STRING_UTILITIES::Strip_Whitespace(identifier);
return true;}
parse_done:
return false;
parse_error:
LOG::cerr<<"PARAMETER_LIST_CONTAINER: Parse Error"<<std::endl;
return false;
}
//#####################################################################
// Pre_Process_File
//#####################################################################
// strip comments and do include files
void PARAMETER_LIST_CONTAINER::
Pre_Process_File(const std::string& filename)
{
std::ifstream input_stream(filename.c_str());
std::string directory=FILE_UTILITIES::Get_Base_Directory_Name(filename);
if(!input_stream)LOG::cerr<<"Could not open parameter list container file "<<filename<<std::endl;
else{
std::string line,statement;
while(std::getline(input_stream,line)){
std::string::size_type comment=line.find("//");
statement=(comment==std::string::npos)?line:line.substr(0,comment);
STRING_UTILITIES::Strip_Whitespace(statement);
if(statement.substr(0,9)=="#include "){
std::string::size_type start=statement.find("\""),end=statement.rfind("\"");
if(start==std::string::npos||end==std::string::npos)
LOG::cerr<<filename<<" line "<<line<<": Could not open included file "<<filename<<std::endl;
else Pre_Process_File(directory+"/"+statement.substr(start+1,end-start-1));}
else lines.Append(statement);}
input_stream.close();}
}
//#####################################################################
#endif
|
import React from 'react'
import Layout from '../components/layout'
import { Typography, Box } from '@material-ui/core'
import Spacer from '../components/basic/Spacer'
import SiteContainer from '../components/SiteContainer'
import gif from '../images/Garage Lights/screenshot.gif'
import ViewSourceCodeButton from '../components/basic/viewSourceCodeButton'
import screenshot from '../images/Garage Lights/screenshot.png'
import {links} from '../data/links'
import FullwidthVid from '../components/basic/fullwidthVid'
export default function contact() {
const imageWrapper = {
maxWidth: '500px',
}
return (
<Layout seoTitle="Garage Lights" usesHeader usesFooter>
<SiteContainer>
<Spacer />
<Typography variant="h2" component="h1">Garage Lights</Typography>
<Spacer rem={2} />
<Typography variant="h4" component="h3">A VST plugin that allows users to control a DMX lighting universe from their DAW (digital audio workstation).</Typography>
<Spacer rem={2} />
<ViewSourceCodeButton url={links.garageLights.github} />
<Spacer rem={2}/>
<Box width="100%" display="flex" justifyContent="center">
<div style={imageWrapper}>
<img width="100%" src={screenshot} alt="Garage Lights Screenshot"/>
</div>
</Box>
<Spacer />
<Typography variant="h3">Key Features</Typography>
<ul>
<li>Garage lights is programmed to communicate with any Open DMX compatible USB interface.</li>
<li>Users get a full 512 parameters that can be controlled/automated from within a DAW to control a DMX universe.</li>
</ul>
<Spacer rem={3}/>
<Typography variant="h3">What I learned</Typography>
<ul>
<li>C++</li>
<li>Object Oriented Programming</li>
<li>VST plugin development using the Juce application framework</li>
<li>Working with Serial Port devices</li>
</ul>
<Spacer />
<Box width="100%" display="flex" justifyContent="center">
<div style={imageWrapper}>
<img width="100%" src={gif} alt="Garage Lights Moving Screenshot GIF"/>
</div>
</Box>
<Spacer />
<Typography variant="h4">Garage Lights demo in Ableton Live</Typography>
<Typography variant="p">This software allows for live control of stage-lights with a midi controller</Typography>
<Spacer rem={1}/>
<FullwidthVid src="https://www.youtube.com/embed/se_ilqRNLNU" />
<Spacer />
</SiteContainer>
</Layout>
)
}
|
#!/bin/sh
MYABSPATH=$(readlink -f "$0")
PATCHBASE=$(dirname "$MYABSPATH")
CMBASE=$(readlink -f "$PATCHBASE/../../../../")
for i in $(find "$PATCHBASE"/* -type d); do
PATCHNAME=$(basename "$i")
PATCHTARGET=$PATCHNAME
for i in $(seq 4); do
PATCHTARGET=$(echo $PATCHTARGET | sed 's/_/\//')
if [ -d "$CMBASE/$PATCHTARGET" ]; then break; fi
done
echo "### Patches in $PATCHTARGET"
cd "$CMBASE/$PATCHTARGET" || exit 1
git am -3 "$PATCHBASE/$PATCHNAME"/* || exit 1
done
|
// Copyright (c) 2018-2019 WING All Rights Reserved.
//
// Author : yangping
// Email : <EMAIL>
//
// Prismy.No | Date | Modified by. | Description
// -------------------------------------------------------------------
// 00001 2019/05/22 yangping New version
// 00002 2019/06/30 zhaixing Add function from godfs
// -------------------------------------------------------------------
package invar
// Bool boolean type
type Bool int
const (
// BFalse mean false or FALSE
BFalse Bool = iota - 1
// BNone means default status, not inited
BNone
// BTrue means true or TRUE
BTrue
)
// Status status type
type Status int
const (
// StatePanic [-5], panic state
StatePanic Status = iota - 5
// StateException [-4], exception state
StateException
// StateTimeout [-3], failed state
StateTimeout
// StateFailed [-2], failed state
StateFailed
// StateError [-1], error state
StateError
// StateSuccess [ 0], success state
StateSuccess
// StateRecover [ 1], recover state
StateRecover
)
// The StaActivate value must be 0 that set in database initialize script,
// if you want change it, plase modify script together
const (
// StateActive [0], activate state
StateActive Status = iota
// StateFrozen [1], frozen state
StateFrozen
// StateDisabled [2], disable state
StateDisabled
)
// The StateUnpaid value must be 0 that set in database initialize script,
// if you want change it, plase modify script together
const (
// StateUnpaied [0], initialization state when trade just created
StateUnpaid Status = iota
// StatePaid [1], the transaction completed
StatePaid
// StateExpired [2], the transaction expired
StateExpired
// StateFailure [3], the transaction failed
StateFailure
)
// Box box type
type Box int
// The BoxDraft must be 0 that set in database initialize script,
// if you want change it, plase modify script together
const (
// BoxDraft [0], draft box
BoxDraft Box = iota
// BoxActive [1], active box
BoxActive
// BoxOffShelve [2], offshelve box
BoxOffShelve
// BoxSend [3], send box
BoxSend
// BoxReceive [4], receive box
BoxReceive
// BoxSending [5], sending box
BoxSending
// BoxDustbin [6], dustbin box
BoxDustbin
)
// Role role type
type Role int
const (
// RoleUser [ 0], user role
RoleUser Role = iota
// RoleAdmin [ 1], admin role
RoleAdmin
// RoleManager [ 2], manager role
RoleManager
// RoleSuper [ 3], super role
RoleSuper
// RoleConsumer [ 4], consumer role
RoleConsumer
// RoleSeller [ 5], seller role
RoleSeller
// RoleAgent [ 6], agent role
RoleAgent
// RoleVendor [ 7], vendor role
RoleVendor
// RoleOwner [ 8], owner role
RoleOwner
// RoleTarget [ 9], target role
RoleTarget
// RoleGuest [10], user role
RoleGuest
// RoleMaster [11], user role
RoleMaster
// RoleCaller [12], caller role
RoleCaller
// RoleCallee [13], callee role
RoleCallee
// RoleReception [14], company reception role
RoleReception
// RoleControl [15], control room role
RoleControl
// RoleRoom [16], gurst room role
RoleRoom
)
// Limit limit permission type
type Limit int
const (
// LimitAddible [0], addible permission
LimitAddible Limit = iota
// LimitMutable [1], mutable permission
LimitMutable
// LimitPayable [2], payable permission
LimitPayable
// LimitSettable [3], settable permission
LimitSettable
// LimitDeletable [4], deletable permission
LimitDeletable
)
// King category type
type Kind int
const (
// KindHome [0], home kind
KindHome Kind = iota
// KindCompany [1], company kind
KindCompany
// KindOthers [2], others kind
KindOthers
)
// Frame's status type
const (
// StatusSuccess [0],status success
StatusSuccess byte = iota
// StatusInternalErr [1],status internal error
StatusInternalErr
// StatusBadSecret [2],status bad secret
StatusBadSecret
// StatusFullConnectionPool [3],status full connection pool
StatusFullConnectionPool
// StatusFullConnectionPool [4],status instance id exist
StatusInstanceIdExist
)
|
import mongoose from 'mongoose';
import { ENV } from '@/utils/validateEnv';
const initDB = () => {
const options = {
autoIndex: false, // Don't build indexes
maxPoolSize: 10, // Maintain up to 10 socket connections
serverSelectionTimeoutMS: 5000, // Keep trying to send operations for 5 seconds
socketTimeoutMS: 45000, // Close sockets after 45 seconds of inactivity
family: 4, // Use IPv4, skip trying IPv6,
ssl: true,
};
const mongoDB = ENV.DB_URL;
mongoose.connect(mongoDB, options);
const db = mongoose.connection;
db.on('connected', () => console.log('Mongoose connected to DB cluster'));
db.on('error', () => console.error.bind(console, 'MongoDB connection error:'));
db.on('disconnected', () => console.log('Mongoose disconnected'));
}
export default initDB
|
<reponame>akokhanovskyi/kaa
/*
* Copyright 2014-2016 CyberVision, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kaaproject.kaa.client.bootstrap;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.kaaproject.kaa.client.FailureListener;
import org.kaaproject.kaa.client.channel.BootstrapTransport;
import org.kaaproject.kaa.client.channel.failover.FailoverDecision;
import org.kaaproject.kaa.client.channel.failover.FailoverManager;
import org.kaaproject.kaa.client.channel.failover.FailoverStatus;
import org.kaaproject.kaa.client.channel.GenericTransportInfo;
import org.kaaproject.kaa.client.channel.KaaInternalChannelManager;
import org.kaaproject.kaa.client.channel.ServerType;
import org.kaaproject.kaa.client.channel.TransportConnectionInfo;
import org.kaaproject.kaa.client.channel.TransportProtocolId;
import org.kaaproject.kaa.client.context.ExecutorContext;
import org.kaaproject.kaa.client.transport.TransportException;
import org.kaaproject.kaa.common.TransportType;
import org.kaaproject.kaa.common.endpoint.gen.ProtocolMetaData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Default {@link BootstrapManager} implementation
*
* @author <NAME>
*
*/
public class DefaultBootstrapManager implements BootstrapManager {
/** The Constant LOG. */
private static final Logger LOG = LoggerFactory.getLogger(DefaultBootstrapManager.class);
private FailureListener failureListener;
private BootstrapTransport transport;
private List<ProtocolMetaData> operationsServerList;
private KaaInternalChannelManager channelManager;
private FailoverManager failoverManager;
private Integer serverToApply;
private ExecutorContext executorContext;
private final Map<TransportProtocolId, List<ProtocolMetaData>> mappedOperationServerList = new HashMap<>();
private final Map<TransportProtocolId, Iterator<ProtocolMetaData>> mappedIterators = new HashMap<>();
public DefaultBootstrapManager(BootstrapTransport transport, ExecutorContext executorContext, FailureListener failureListener) {
this.transport = transport;
this.executorContext = executorContext;
this.failureListener = failureListener;
}
@Override
public void receiveOperationsServerList() throws TransportException {
LOG.debug("Going to invoke sync method of assigned transport");
transport.sync();
}
@Override
public void useNextOperationsServer(TransportProtocolId transportId, FailoverStatus status) {
if (mappedOperationServerList != null && !mappedOperationServerList.isEmpty()) {
if (mappedIterators.get(transportId).hasNext()) {
ProtocolMetaData nextOperationsServer = mappedIterators.get(transportId).next();
LOG.debug("New server [{}] will be user for [{}]",
nextOperationsServer.getAccessPointId(), transportId);
if (channelManager != null) {
channelManager.onTransportConnectionInfoUpdated(
new GenericTransportInfo(ServerType.OPERATIONS, nextOperationsServer));
} else {
LOG.error("Can not process server change. Channel manager was not specified");
}
} else {
LOG.warn("Failed to find server for channel [{}]", transportId);
resolveFailoverStatus(status);
}
} else {
throw new BootstrapRuntimeException("Operations Server list is empty");
}
}
@Override
public synchronized void setTransport(BootstrapTransport transport) {
this.transport = transport;
}
@Override
public synchronized void useNextOperationsServerByAccessPointId(int accessPointId) {
List<ProtocolMetaData> servers = getTransportsByAccessPointId(accessPointId);
if (servers != null && !servers.isEmpty()) {
notifyChannelManagerAboutServer(servers);
} else {
serverToApply = accessPointId;
transport.sync();
}
}
private void notifyChannelManagerAboutServer(List<ProtocolMetaData> transports) {
for (ProtocolMetaData transport : transports) {
LOG.debug("Applying new transport {}", transports);
channelManager.onTransportConnectionInfoUpdated(new GenericTransportInfo(ServerType.OPERATIONS, transport));
}
}
private List<ProtocolMetaData> getTransportsByAccessPointId(int accessPointId) {
if (operationsServerList == null || operationsServerList.isEmpty()) {
throw new BootstrapRuntimeException("Operations Server list is empty");
}
List<ProtocolMetaData> result = new ArrayList<>();
for (ProtocolMetaData transport : operationsServerList) {
if (transport.getAccessPointId().intValue() == accessPointId) {
result.add(transport);
}
}
return result;
}
@Override
public synchronized void setChannelManager(KaaInternalChannelManager manager) {
this.channelManager = manager;
}
@Override
public synchronized void setFailoverManager(FailoverManager failoverManager) {
this.failoverManager = failoverManager;
}
@Override
public synchronized void onProtocolListUpdated(List<ProtocolMetaData> list) {
LOG.trace("Protocol list was updated");
operationsServerList = list;
mappedOperationServerList.clear();
mappedIterators.clear();
if (operationsServerList == null || operationsServerList.isEmpty()) {
LOG.trace("Received empty operations service list");
resolveFailoverStatus(FailoverStatus.NO_OPERATION_SERVERS_RECEIVED);
return;
}
for (ProtocolMetaData server : operationsServerList) {
TransportProtocolId transportId = new TransportProtocolId(server.getProtocolVersionInfo().getId(), server.getProtocolVersionInfo().getVersion());
List<ProtocolMetaData> servers = mappedOperationServerList.get(transportId);
if (servers == null) {
servers = new LinkedList<>();
mappedOperationServerList.put(transportId, servers);
}
servers.add(server);
}
for (Map.Entry<TransportProtocolId, List<ProtocolMetaData>> entry : mappedOperationServerList.entrySet()) {
Collections.shuffle(entry.getValue());
mappedIterators.put(entry.getKey(), entry.getValue().iterator());
}
if (serverToApply != null) {
List<ProtocolMetaData> servers = getTransportsByAccessPointId(serverToApply);
if (servers != null && !servers.isEmpty()) {
notifyChannelManagerAboutServer(servers);
serverToApply = null;
}
} else {
for (Map.Entry<TransportProtocolId, Iterator<ProtocolMetaData>> entry : mappedIterators.entrySet()) {
TransportConnectionInfo info = new GenericTransportInfo(ServerType.OPERATIONS, entry.getValue().next());
channelManager.onTransportConnectionInfoUpdated(info);
}
}
}
private void resolveFailoverStatus(FailoverStatus status) {
FailoverDecision decision = failoverManager.onFailover(status);
switch (decision.getAction()) {
case NOOP:
LOG.warn("No operation is performed according to failover strategy decision");
break;
case RETRY:
long retryPeriod = decision.getRetryPeriod();
LOG.warn("Attempt to receive operations service list will be made in {} ms, " +
"according to failover strategy decision", retryPeriod);
executorContext.getScheduledExecutor().schedule(new Runnable() {
@Override
public void run() {
try {
receiveOperationsServerList();
} catch (TransportException e) {
LOG.error("Error while receiving operations service list", e);
}
}
}, retryPeriod, TimeUnit.MILLISECONDS);
break;
case USE_NEXT_BOOTSTRAP:
LOG.warn("Trying to switch to the next bootstrap service according to failover strategy decision");
retryPeriod = decision.getRetryPeriod();
failoverManager.onServerFailed(channelManager.getActiveServer(TransportType.BOOTSTRAP), status);
executorContext.getScheduledExecutor().schedule(new Runnable() {
@Override
public void run() {
try {
receiveOperationsServerList();
} catch (TransportException e) {
LOG.error("Error while receiving operations service list", e);
}
}
}, retryPeriod, TimeUnit.MILLISECONDS);
break;
case FAILURE:
LOG.warn("Calling failure listener according to failover strategy decision!");
failureListener.onFailure();
break;
default:
break;
}
}
}
|
<reponame>tiancihe/X6
import type { Morpher } from '../morpher/morpher'
export type PrepareMethod<TAnimator> = (
this: TAnimator,
animator: TAnimator,
) => any
export type RunMethod<TAnimator> = (
this: TAnimator,
animator: TAnimator,
positionOrDelta: number,
) => any
export type RetargetMethod<TAnimator, TTarget = any, TExtra = any> = (
this: TAnimator,
animator: TAnimator,
target: TTarget,
extra: TExtra,
) => any
interface Executor<TAnimator> {
prepare: PrepareMethod<TAnimator>
run: RunMethod<TAnimator>
retarget?: RetargetMethod<TAnimator> | null
ready: boolean
finished: boolean
isTransform?: boolean
}
export type Executors<TAnimator> = Executor<TAnimator>[]
export interface History<TAnimator> {
[method: string]: {
morpher: Morpher<any, any, any>
executor: Executor<TAnimator>
}
}
|
<filename>second-example-application/src/test/java/com/github/robindevilliers/welcometohell/steps/BalletQuestion.java
package com.github.robindevilliers.welcometohell.steps;
import com.github.robindevilliers.cascade.annotations.*;
import com.github.robindevilliers.welcometohell.Utilities;
import org.openqa.selenium.By;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import static org.junit.Assert.assertEquals;
@Step(OperaQuestion.HaveNotBeenToTheOpera.class)
@Narrative("Ballet question.")
public interface BalletQuestion {
@Narrative("Enter likes ballet and go to the dirty dog page.")
class LikesBallet implements BalletQuestion {
@Demands
private WebDriver webDriver;
@When
public void when() {
webDriver.findElements(By.cssSelector("input"))
.stream()
.filter(w -> w.getAttribute("value").equals("true"))
.findFirst()
.ifPresent(WebElement::click);
webDriver.findElement(By.cssSelector("button[type=submit]")).click();
Utilities.waitForPage(webDriver);
}
@Then
public void then() {
assertEquals("Welcome to Hell | Good Bye", webDriver.getTitle());
}
}
@Narrative("Enter hates ballet and go to the jurisdiction page.")
class HatesBallet implements BalletQuestion {
@Demands
private WebDriver webDriver;
@When
public void when() {
webDriver.findElements(By.cssSelector("input"))
.stream()
.filter(w -> w.getAttribute("value").equals("false"))
.findFirst()
.ifPresent(WebElement::click);
webDriver.findElement(By.cssSelector("button[type=submit]")).click();
Utilities.waitForPage(webDriver);
}
@Then
public void then() {
assertEquals("Welcome to Hell | Jurisdiction", webDriver.getTitle());
}
}
}
|
/********************************************************************************************************
* @file adc.h
*
* @brief for TLSR chips
*
* @author telink
*
* @par Copyright (c) Telink Semiconductor (Shanghai) Co., Ltd.
* All rights reserved.
*
* The information contained herein is confidential and proprietary property of Telink
* Semiconductor (Shanghai) Co., Ltd. and is available under the terms
* of Commercial License Agreement between Telink Semiconductor (Shanghai)
* Co., Ltd. and the licensee in separate contract or the terms described here-in.
* This heading MUST NOT be removed from this file.
*
* Licensees are granted free, non-transferable use of the information in this
* file under Mutual Non-Disclosure Agreement. NO WARRENTY of ANY KIND is provided.
*
*******************************************************************************************************/
/*
* adc.h
*
* Created on: 2015-12-10
* Author: Telink
*/
#ifndef ADC_H_
#define ADC_H_
#if(__TL_LIB_8266__ || MCU_CORE_TYPE == MCU_CORE_8266)
#include "adc_8266.h"
#elif(__TL_LIB_8263__ || MCU_CORE_TYPE == MCU_CORE_8263)
#include "adc_8263.h"
#elif(__TL_LIB_8267__ || MCU_CORE_TYPE == MCU_CORE_8267)
#include "adc_8267.h"
#elif(__TL_LIB_8269__ || MCU_CORE_TYPE == MCU_CORE_8269)
#include "adc_8269.h"
#elif(__TL_LIB_8258__ || MCU_CORE_TYPE == MCU_CORE_8258)
#include "adc_8258.h"
//#elif(__TL_LIB_8263__ || MCU_CORE_TYPE == MCU_CORE_8263)
//#include "adc_8263.h"
#endif
#endif /* ADC_H_ */
|
<filename>trash/_client/src/components/Board/List.js
import React from 'react';
import { Table, Row, Col, Button, Container, Badge } from 'react-bootstrap';
import { useHistory, useLocation } from "react-router-dom";
export default function List() {
const history = useHistory();
const location = useLocation();
return(
<>
<Container className="mt-3 mb-5" >
<Table bordered hover>
<thead>
<tr>
<th> </th>
<th>제목</th>
<th>작성자</th>
<th>등록일</th>
<th>조회수</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<Badge variant="danger">
답변 미완료
</Badge>
</td>
<td>동아리방 지도는 어디에서 확인할 수 있나여????????????????????????????????????????????????????????????????????</td>
<td>박지원지원</td>
<td>2021-08-01</td>
<td>10</td>
</tr>
<tr>
<td>
<Badge variant="success">
답변 완료
</Badge>
</td>
<td>Mark</td>
<td>Otto</td>
<td>Otto</td>
<td>@mdo</td>
</tr>
</tbody>
</Table>
</Container>
<Row>
<Col sm={10}/>
<Col sm={2}>
<Button onClick={() => {history.push(location.pathname+'/create')}}>
글 작성하기
</Button>
</Col>
</Row>
</>
);
}
|
import _ from 'lodash';
import { Validator } from 'vee-validate';
import BigNumber from 'bignumber.js';
import Numeral from './numeral';
import Localize from '../localize';
import LocalStorageUtils from '../LocalStorageUtils';
export default {
setI18nLocale (locale) {
LocalStorageUtils.saveItem('user.locale', {lang: locale});
window.i18n.locale = locale;
document.documentElement.setAttribute('lang', locale);
},
getTimzoneOffset () {
const date = new Date();
return date.getTimezoneOffset();
},
}
|
#!/bin/bash
# Get TF Lite model and labels
curl -O http://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip
unzip coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip
rm coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip
# Get a labels file with corrected indices, delete the other one
curl -O https://dl.google.com/coral/canned_models/coco_labels.txt
rm labelmap.txt
|
#!/usr/bin/env bash
# Exit immediately if a pipeline, which may consist of a single simple command,
# a list, or a compound command returns a non-zero status
set -e
readonly MONIKER=plantuml
readonly VERSION=1.2021.5
readonly STUFF=plantuml.$VERSION.jar
readonly INSTALLER_DIR=$(dirname "$(realpath "$0")")
readonly TARGET_DIR=$HOME/dev/$MONIKER
readonly START_SCRIPT=$TARGET_DIR/start-$MONIKER.sh
create_start_script() {
echo java -jar $TARGET_DIR/$STUFF '"$@"' > $START_SCRIPT
chmod +x $START_SCRIPT
}
create_desktop_entry() { # https://specifications.freedesktop.org/desktop-entry-spec/desktop-entry-spec-latest.html
echo "[Desktop Entry]
Type=Application
Categories=Development;
Name=PlantUML
Comment=
Icon=$TARGET_DIR/plantuml.png
Exec=$START_SCRIPT %u
Terminal=false" > $HOME/.local/share/applications/$MONIKER.desktop
}
if [ -d "$TARGET_DIR" ]; then
echo Directory exists: $TARGET_DIR >&2
exit 1
fi
mkdir --parents $TARGET_DIR
readonly TEMP_DIR=$(mktemp --directory -t delete-me-XXXXXXXXXX)
(
cd $TEMP_DIR
echo -n Downloading...
wget --quiet https://downloads.sourceforge.net/project/plantuml/$VERSION/$STUFF
echo done
echo -n Installing...
mv --force $STUFF $TARGET_DIR
cp --force $INSTALLER_DIR/plantuml.png $TARGET_DIR
create_start_script
create_desktop_entry
echo done
)
rm --recursive --force $TEMP_DIR
|
package com.li.excel.annotation;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import org.apache.poi.ss.usermodel.HorizontalAlignment;
/**
*
* @Title: ExcelField.java
* @Package com.li.excel.annotation
* @Description: 导入导出字段注解
* @author leevan
* @date 2018年11月14日 下午3:14:43
* @version 1.0.0
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface ExcelField {
/**
* 列名
*/
String name() default "";
/**
* 格式(作用于日期和小数点)
*/
String format() default "";
/**
* 宽度
*/
int width() default 0;
/**
* 水平对齐方式
*
* @return
*/
HorizontalAlignment align() default HorizontalAlignment.CENTER;
/**
* 顺序
*/
int order() default 0;
/**
* 为null时的默认值
*/
String defaultValue() default "";
/**
* 标记用于哪几个导出
* <p>
* 比如不同的业务要求导出不同列的数据
*/
int[] group() default 0;
/**
* 处理拼接(可以和format共存,比如Double类型,可以先format为两位小数,后在拼接)
* <p>
* 例:第{{value}}月
*/
String string() default "";
/**
* 分隔符
* 在合并字段时使用
*/
String separator() default "";
}
|
public class Person {
private String name;
private int age;
private String gender;
public Person(String name, int age, String gender) {
this.name = name;
this.age = age;
this.gender = gender;
}
public String getName() {
return name;
}
public int getAge() {
return age;
}
public String getGender() {
return gender;
}
}
|
import matplotlib.pyplot as plt
def generate_seismic_visualization(prop: str, tag: str, depth: float, lon: float, lat: float) -> None:
# Create a figure and axis for the plot
fig, ax = plt.subplots()
# Set the title of the plot
ax.set_title('CVM%s %.0f m depth' % (tag, depth))
# Set the axis limits based on longitude and latitude
ax.axis([lon, lat])
# Save the plot as an image file with a specific naming convention
file_name = 'CVM-Slice-%s-%s.png' % (prop, tag)
fig.savefig(file_name)
|
#!/usr/bin/env bash
cd www/ && npm run build && rm -rf ../docs && cp -r dist ../docs && cp -r roms ../docs
|
/*
* Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
*
* This file is part of Orfeo Toolbox
*
* https://www.orfeo-toolbox.org/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "otbCommandLineArgumentParser.h"
#include "otbVectorImage.h"
#include "otbImageFileReader.h"
#include "otbGCPsToRPCSensorModelImageFilter.h"
#include "otbGenericRSTransform.h"
#include "otbGeographicalDistance.h"
int otbGCPsToRPCSensorModelImageFilterCheckRpcModel(int argc, char * argv[])
{
// Set command line arguments
typedef otb::CommandLineArgumentParser ParserType;
ParserType::Pointer parser = ParserType::New();
parser->AddInputImage();
parser->AddOption("--DEMDirectory", "Path to the DEM directory ", "-dem", 1, false);
parser->AddOptionNParams("--GroudControlPoints",
"Ground Control Points to estimate sensor model a1x a1y b1x b1y b1z ... aNx aNy aNz bNx bNy bNz",
"-gcp", true);
parser->AddOption("--ErrorAllowed", "Error allowed to declare a point not good ", "-err", 1, false);
// Parse the command line
typedef otb::CommandLineArgumentParseResult ParserResultType;
ParserResultType::Pointer parseResult = ParserResultType::New();
try
{
parser->ParseCommandLine(argc, argv, parseResult);
}
catch ( itk::ExceptionObject & err )
{
std::string descriptionException = err.GetDescription();
if (descriptionException.find("ParseCommandLine(): Help Parser") != std::string::npos)
{
return EXIT_SUCCESS;
}
if (descriptionException.find("ParseCommandLine(): Version Parser") != std::string::npos)
{
return EXIT_SUCCESS;
}
return EXIT_FAILURE;
}
// Check if the number of gcp pairs point is consistent
unsigned int nbPoints = parseResult->GetNumberOfParameters("--GroudControlPoints");
if (nbPoints % 5 != 0)
{
std::cerr << "Inconsistent GCPs description!" << std::endl;
return EXIT_FAILURE;
}
typedef otb::VectorImage<float, 2> ImageType;
typedef otb::ImageFileReader<ImageType> ReaderType;
typedef otb::GCPsToRPCSensorModelImageFilter<ImageType> GCPsToSensorModelFilterType;
typedef GCPsToSensorModelFilterType::Point2DType Point2DType;
typedef GCPsToSensorModelFilterType::Point3DType Point3DType;
typedef otb::GenericRSTransform<double,3, 3> GenericRSTransformType;
typedef otb::GeographicalDistance<ImageType::PointType> GeoDistanceType;
ReaderType::Pointer reader = ReaderType::New();
reader->SetFileName(parseResult->GetInputImage());
reader->UpdateOutputInformation();
GCPsToSensorModelFilterType::Pointer rpcEstimator = GCPsToSensorModelFilterType::New();
rpcEstimator->SetInput(reader->GetOutput());
unsigned int nbGCPs = nbPoints / 5;
std::cout << "Receiving " << nbPoints << " from command line." << std::endl;
for (unsigned int gcpId = 0; gcpId < nbGCPs; ++gcpId)
{
Point2DType sensorPoint;
sensorPoint[0] = parseResult->GetParameterFloat("--GroudControlPoints", gcpId * 5);
sensorPoint[1] = parseResult->GetParameterFloat("--GroudControlPoints", 1 + gcpId * 5);
Point3DType geoPoint;
geoPoint[0] = parseResult->GetParameterFloat("--GroudControlPoints", 2 + gcpId * 5);
geoPoint[1] = parseResult->GetParameterFloat("--GroudControlPoints", 3 + gcpId * 5);
geoPoint[2] = parseResult->GetParameterFloat("--GroudControlPoints", 4 + gcpId * 5);
std::cout << "Adding GCP sensor: " << sensorPoint << " <-> geo: " << geoPoint << std::endl;
rpcEstimator->AddGCP(sensorPoint, geoPoint);
}
// Estimate the rpc model
rpcEstimator->GetOutput()->UpdateOutputInformation();
// Instancicate a GenericRSTransform in order to transform the
// indexes, using the rpcModel estimated, into geographical
// coordiantes.
// The test will check for nan coordinates, and the distance between
// geographical coordinates.
GenericRSTransformType::Pointer grsTrasnform = GenericRSTransformType::New();
grsTrasnform->SetInputKeywordList(rpcEstimator->GetKeywordlist());
std::cout<<rpcEstimator->GetKeywordlist()<<std::endl;
grsTrasnform->SetOutputProjectionRef("4326");
// Set the DEM Directory if any
if(parseResult->IsOptionPresent("--DEMDirectory"))
{
otb::DEMHandler::Instance()->OpenDEMDirectory(parseResult->GetParameterString("--DEMDirectory"));
}
else
{
otb::DEMHandler::Instance()->SetDefaultHeightAboveEllipsoid(0);
}
grsTrasnform->InstantiateTransform();
// Test
GeoDistanceType::Pointer geoDistance = GeoDistanceType::New();
bool isErrorDetected = false;
for (unsigned int gcpId = 0; gcpId < nbGCPs; ++gcpId)
{
Point3DType point;
point[0] = parseResult->GetParameterFloat("--GroudControlPoints", gcpId * 5);
point[1] = parseResult->GetParameterFloat("--GroudControlPoints", 1 + gcpId * 5);
point[2] = parseResult->GetParameterFloat("--GroudControlPoints", 4 + gcpId * 5);
Point3DType transformedPoint;
transformedPoint = grsTrasnform->TransformPoint(point);
Point2DType transformedPoint2D;
transformedPoint2D[0] = transformedPoint[0];
transformedPoint2D[1] = transformedPoint[1];
// reference point
Point2DType geoPoint;
geoPoint[0] = parseResult->GetParameterFloat("--GroudControlPoints", 2 + gcpId * 5);
geoPoint[1] = parseResult->GetParameterFloat("--GroudControlPoints", 3 + gcpId * 5);
// Search for nans
if ( vnl_math_isnan(transformedPoint2D[0]) || vnl_math_isnan(transformedPoint2D[1]) )
{
std::cout << "Reference : "<< geoPoint
<<" --> Result of the reprojection using the estimated RpcModel "<< transformedPoint2D
<< std::endl;
std::cout<<"The result of the projection is nan, there is a problem with the estimated RpcModel "
<< std::endl<<std::endl;
isErrorDetected = true;
}
// Search for wrong projection results
double residual = geoDistance->Evaluate(geoPoint, transformedPoint2D);
if( residual > parseResult->GetParameterFloat("--ErrorAllowed"))
{
std::cout << "Reference : "<< geoPoint
<<" --> Result of the reprojection using the estimated RpcModel "
<< transformedPoint2D
<< std::endl
<< " Residual ["<< residual << "] is higher than the tolerance ["
<< parseResult->GetParameterFloat("--ErrorAllowed")
<<"], there is a problem with the estimated RpcModel"
<<std::endl<<std::endl;
isErrorDetected = true;
}
}
// Is there an error
if ( isErrorDetected )
return EXIT_FAILURE;
return EXIT_SUCCESS;
}
|
cite about-plugin
about-plugin 'Better bash history behaviors'
## shell history
# Don't record some commands in history
HISTIGNORE='exit:fg:bg:ls:sl:pwd:history:clear'
# don't put duplicate lines in the history. See bash(1) for more options
HISTCONTROL=ignoredups
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=1000
# %F equivalent to %Y-%m-%d
# %T equivalent to %H:%M:%S (24-hours format)
HISTTIMEFORMAT='%F %T '
## Refer to https://sanctum.geek.nz/arabesque/bash-history-expansion/
# disable ! style history substitution
set +o histexpand
# Not allow re-edit history substitution.
shopt -u histreedit
# the history list is appended to the file named by the value of the HISTFILE
# variable when the shell exits, rather than overwriting the file.
# shopt -s histappend
# We have used `history -a` in PROMPT_COMMAND. So close histappend.
shopt -u histappend
# shopt -s histappend
# If set, and readline is being used, the results of history substitution are
# not immediately passed to the shell parser. Instead, the resulting line is
# loaded into the readline editing buffer, allowing further modification.
shopt -s histverify
# bash attempts to save all lines of a multiple-line command in the same
# history entry. This allows easy re-editing of multi-line commands.
shopt -s cmdhist
|
import greenfoot.*; // (World, Actor, GreenfootImage, Greenfoot and MouseInfo)
/**
* Write a description of class zomb_gen here.
*
* @author (your name)
* @version (a version number or a date)
*/
public class zomb_gen extends Actor
{
/**
* Act - do whatever the zomb_gen wants to do. This method is called whenever
* the 'Act' or 'Run' button gets pressed in the environment.
*/
public void act()
{
setLocation(getX(),getY()+1);
if (getY() == 600) {
((zombie_bg) getWorld()).gameOver();
}
}
public void kill()
{
//((zombie_bg) getWorld()).countkill();
getWorld().removeObject(this);
}
}
|
<reponame>jumpyapple/gallant-grasshoppers
import sys
from blessed import Terminal
class BasePage:
"""Base class for a page."""
def __init__(self, state: object, term: Terminal, renderstate: object):
self.state = state
self.term = term
self.renderstate = renderstate
def render(self) -> None:
"""A render method.
Get called from the main game loop.
"""
raise NotImplementedError()
def handle_input(self, key: str) -> None:
"""An input handler.
Get called from the main game loop.
"""
raise NotImplementedError()
sys.path.append("..")
from .game_page import GamePage # noqa: F401, E402
from .manual_phase import ManualPhasePage # noqa: F401, E402
from .start_page import StartPage # noqa: F401, E402
|
export KALDI_ROOT=/home/kaldi/
[ -f $KALDI_ROOT/tools/env.sh ] && . $KALDI_ROOT/tools/env.sh
export PATH=$PWD/utils/:$KALDI_ROOT/tools/openfst/bin:$PWD:$PATH
[ ! -f $KALDI_ROOT/tools/config/common_path.sh ] && echo >&2 "The standard file $KALDI_ROOT/tools/config/common_path.sh is not present -> Exit!" && exit 1
. $KALDI_ROOT/tools/config/common_path.sh
export LC_ALL=C
|
<filename>test/integration/auth/pages/login.tsx
import login from "app/mutations/login"
import logout from "app/mutations/logout"
import getCurrentUser from "app/queries/getCurrentUser"
import {useMutation, useQuery, useRouter} from "blitz"
import {Suspense, useState} from "react"
function Content() {
const router = useRouter()
const [error, setError] = useState(null)
const [userId] = useQuery(getCurrentUser, null)
const [loginMutation] = useMutation(login)
const [logoutMutation] = useMutation(logout)
if (error) return <div id="error">{error}</div>
return (
<div>
<div id="content">{userId ? "logged-in" : "logged-out"}</div>
{userId ? (
<button
id="logout"
onClick={async () => {
try {
await logoutMutation()
} catch (error) {
setError(error.toString())
}
}}
>
logout
</button>
) : (
<button
id="login"
onClick={async () => {
await loginMutation()
const next = router.query.next ? decodeURIComponent(router.query.next as string) : null
if (next) {
await router.push(next)
}
}}
>
login
</button>
)}
</div>
)
}
function Login() {
return (
<div id="page">
<Suspense fallback="Loading...">
<Content />
</Suspense>
</div>
)
}
export default Login
|
// Copyright 2021 Security Scorecard Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package githubrepo
import (
"context"
"fmt"
"github.com/shurcooL/githubv4"
"github.com/ossf/scorecard/v2/clients"
sce "github.com/ossf/scorecard/v2/errors"
)
const (
pullRequestsToAnalyze = 30
reviewsToAnalyze = 30
labelsToAnalyze = 30
commitsToAnalyze = 30
)
// nolint: govet
type graphqlData struct {
Repository struct {
IsArchived githubv4.Boolean
DefaultBranchRef struct {
Name githubv4.String
BranchProtectionRule struct {
RequiredApprovingReviewCount githubv4.Int
}
Target struct {
Commit struct {
History struct {
Nodes []struct {
CommittedDate githubv4.DateTime
Message githubv4.String
Oid githubv4.GitObjectID
Committer struct {
User struct {
Login githubv4.String
}
}
}
} `graphql:"history(first: $commitsToAnalyze)"`
} `graphql:"... on Commit"`
}
}
PullRequests struct {
Nodes []struct {
Number githubv4.Int
MergeCommit struct {
AuthoredByCommitter githubv4.Boolean
}
MergedAt githubv4.DateTime
Labels struct {
Nodes []struct {
Name githubv4.String
}
} `graphql:"labels(last: $labelsToAnalyze)"`
LatestReviews struct {
Nodes []struct {
State githubv4.String
}
} `graphql:"latestReviews(last: $reviewsToAnalyze)"`
}
} `graphql:"pullRequests(last: $pullRequestsToAnalyze, states: MERGED)"`
} `graphql:"repository(owner: $owner, name: $name)"`
}
type graphqlHandler struct {
client *githubv4.Client
data *graphqlData
prs []clients.PullRequest
commits []clients.Commit
defaultBranchRef clients.BranchRef
archived bool
}
func (handler *graphqlHandler) init(ctx context.Context, owner, repo string) error {
vars := map[string]interface{}{
"owner": githubv4.String(owner),
"name": githubv4.String(repo),
"pullRequestsToAnalyze": githubv4.Int(pullRequestsToAnalyze),
"reviewsToAnalyze": githubv4.Int(reviewsToAnalyze),
"labelsToAnalyze": githubv4.Int(labelsToAnalyze),
"commitsToAnalyze": githubv4.Int(commitsToAnalyze),
}
handler.data = new(graphqlData)
if err := handler.client.Query(ctx, handler.data, vars); err != nil {
// nolint: wrapcheck
return sce.Create(sce.ErrScorecardInternal, fmt.Sprintf("githubv4.Query: %v", err))
}
handler.archived = bool(handler.data.Repository.IsArchived)
handler.prs = pullRequestFrom(handler.data)
handler.defaultBranchRef = defaultBranchRefFrom(handler.data)
handler.commits = commitsFrom(handler.data)
return nil
}
func (handler *graphqlHandler) getMergedPRs() ([]clients.PullRequest, error) {
return handler.prs, nil
}
func (handler *graphqlHandler) getDefaultBranch() (clients.BranchRef, error) {
return handler.defaultBranchRef, nil
}
func (handler *graphqlHandler) getCommits() ([]clients.Commit, error) {
return handler.commits, nil
}
func (handler *graphqlHandler) isArchived() (bool, error) {
return handler.archived, nil
}
func pullRequestFrom(data *graphqlData) []clients.PullRequest {
ret := make([]clients.PullRequest, len(data.Repository.PullRequests.Nodes))
for i, pr := range data.Repository.PullRequests.Nodes {
toAppend := clients.PullRequest{
Number: int(pr.Number),
MergedAt: pr.MergedAt.Time,
MergeCommit: clients.Commit{
AuthoredByCommitter: bool(pr.MergeCommit.AuthoredByCommitter),
},
}
for _, label := range pr.Labels.Nodes {
toAppend.Labels = append(toAppend.Labels, clients.Label{
Name: string(label.Name),
})
}
for _, review := range pr.LatestReviews.Nodes {
toAppend.Reviews = append(toAppend.Reviews, clients.Review{
State: string(review.State),
})
}
ret[i] = toAppend
}
return ret
}
func defaultBranchRefFrom(data *graphqlData) clients.BranchRef {
return clients.BranchRef{
Name: string(data.Repository.DefaultBranchRef.Name),
BranchProtectionRule: clients.BranchProtectionRule{
RequiredApprovingReviewCount: int(
data.Repository.DefaultBranchRef.BranchProtectionRule.RequiredApprovingReviewCount),
},
}
}
func commitsFrom(data *graphqlData) []clients.Commit {
ret := make([]clients.Commit, 0)
for _, commit := range data.Repository.DefaultBranchRef.Target.Commit.History.Nodes {
ret = append(ret, clients.Commit{
CommittedDate: commit.CommittedDate.Time,
Message: string(commit.Message),
SHA: string(commit.Oid),
Committer: clients.User{
Login: string(commit.Committer.User.Login),
},
})
}
return ret
}
|
import { BasePage } from "./base";
import { ProductDetailsModel } from "../models/productDetailsModel";
export class ProductDetailsPage extends BasePage {
addToCart(){
$('button[name="add_cart_product"]').click();
browser.pause(1000);
}
public getProductPrice(): number {
return parseFloat($('#box-product').getAttribute('data-price'));
}
public getProductName(): string {
return $('h1.title').getText();
}
public getProductQuantity(): number {
return parseInt($('.buy_now [name=quantity]').getValue());
}
getProductDetails(): ProductDetailsModel {
const productDetails = new ProductDetailsModel();
productDetails.name = this.getProductName();
productDetails.price = this.getProductPrice();
productDetails.quantity = this.getProductQuantity();
return productDetails;
}
openDiscountedProduct(){
super.open(this.productCatalog.discountedProduct);
}
openRegularProduct(){
super.open(this.productCatalog.regularProduct);
}
openParametrizedProduct(){
super.open(this.productCatalog.parametrizedProduct);
}
private productCatalog = {
regularProduct: '/rubber-ducks-c-1/red-duck-p-3',
discountedProduct: '/rubber-ducks-c-1/blue-duck-p-4',
parametrizedProduct: '/rubber-ducks-c-1/premium-ducks-c-2/vip-yellow-duck-p-6'
};
saleAttributesShown(): boolean {
return ($('* #box-product .sale').isDisplayed() && $('* #box-product .campaign-price').isDisplayed());
}
sizeSelectorShown(): boolean {
return ($('.buy_now .select-wrapper').isDisplayed());
}
selectProductSize(productSize: string) {
$('.buy_now .select-wrapper .form-control').selectByAttribute('value', productSize);
}
setProductQuantity(productQantity: number) {
$('.buy_now [name=quantity]').setValue(productQantity);
}
getPriceAdjust(dropDownItem: WebdriverIO.Element): number {
return parseFloat(dropDownItem.getAttribute('data-price-adjust'));
}
}
export const ProductDetails = new ProductDetailsPage();
|
import re
def extract_information(html_content: str) -> (int, str):
# Extract GitHub stars
stars_match = re.search(r'<gh_stars>(\d+)', html_content)
stars = int(stars_match.group(1)) if stars_match else 0
# Extract welcome message
message_match = re.search(r'<span style="color:#fff;">(.*?) </span>', html_content)
message = message_match.group(1) if message_match else ""
return stars, message
|
<gh_stars>1000+
//
// CSVConfiguration.h
// Table Tool
//
// Created by <NAME> on 21.07.15.
// Copyright (c) 2015 Egger Apps. All rights reserved.
//
#import <Foundation/Foundation.h>
@interface CSVConfiguration : NSObject <NSCopying>
@property NSStringEncoding encoding;
@property NSString *columnSeparator;
@property NSString *quoteCharacter;
@property NSString *escapeCharacter;
@property NSString *decimalMark;
@property BOOL firstRowAsHeader;
+(NSArray<NSArray*>*)supportedEncodings;
@end
|
<reponame>ooooo-youwillsee/leetcode
/**
* @author ooooo
* @date 2021/3/6 13:05
*/
#ifndef CPP_0503__SOLUTION1_H_
#define CPP_0503__SOLUTION1_H_
#include <iostream>
#include <vector>
#include <stack>
using namespace std;
// 单调栈 ,底部元素最大
class Solution {
public:
vector<int> nextGreaterElements(vector<int> &nums) {
stack<int> s;
int n = nums.size();
vector<int> ans(n, -1);
// 对最后一个元素特殊处理
for (int i = n - 2; i >= 0; --i) {
while (!s.empty() && nums[i] >= s.top()) {
s.pop();
}
s.push(nums[i]);
}
for (int i = n - 1; i >= 0; --i) {
while (!s.empty() && nums[i] >= s.top()) {
s.pop();
}
if (!s.empty()) {
ans[i] = s.top();
}
s.push(nums[i]);
}
return ans;
}
};
#endif //CPP_0503__SOLUTION1_H_
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package brooklyn.entity.nosql.solr;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;
import java.util.Map;
import org.apache.solr.common.SolrDocument;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import brooklyn.entity.proxying.EntitySpec;
import brooklyn.entity.trait.Startable;
import brooklyn.test.EntityTestUtils;
import brooklyn.util.collections.MutableMap;
import brooklyn.util.text.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
/**
* Solr live tests.
*
* Test the operation of the {@link SolrServer} class using the jclouds {@code rackspace-cloudservers-uk}
* and {@code aws-ec2} providers, with different OS images. The tests use the {@link SolrJSupport} class
* to exercise the node, and will need to have {@code brooklyn.jclouds.provider.identity} and {@code .credential}
* set, usually in the {@code .brooklyn/brooklyn.properties} file.
*/
public class SolrServerLiveTest extends AbstractSolrServerTest {
private static final Logger log = LoggerFactory.getLogger(SolrServerLiveTest.class);
@DataProvider(name = "virtualMachineData")
public Object[][] provideVirtualMachineData() {
return new Object[][] { // ImageId, Provider, Region, Description (for logging)
new Object[] { "eu-west-1/ami-0307d674", "aws-ec2", "eu-west-1", "Ubuntu Server 14.04 LTS (HVM), SSD Volume Type" },
new Object[] { "LON/f9b690bf-88eb-43c2-99cf-391f2558732e", "rackspace-cloudservers-uk", "", "Ubuntu 12.04 LTS (Precise Pangolin)" },
new Object[] { "LON/a84b1592-6817-42da-a57c-3c13f3cfc1da", "rackspace-cloudservers-uk", "", "CentOS 6.5 (PVHVM)" },
};
}
@Test(groups = "Live", dataProvider = "virtualMachineData")
protected void testOperatingSystemProvider(String imageId, String provider, String region, String description) throws Exception {
log.info("Testing Solr on {}{} using {} ({})", new Object[] { provider, Strings.isNonEmpty(region) ? ":" + region : "", description, imageId });
Map<String, String> properties = MutableMap.of("imageId", imageId);
testLocation = app.getManagementContext().getLocationRegistry()
.resolve(provider + (Strings.isNonEmpty(region) ? ":" + region : ""), properties);
solr = app.createAndManageChild(EntitySpec.create(SolrServer.class)
.configure(SolrServer.SOLR_CORE_CONFIG, ImmutableMap.of("example", "classpath://solr/example.tgz")));
app.start(ImmutableList.of(testLocation));
EntityTestUtils.assertAttributeEqualsEventually(solr, Startable.SERVICE_UP, true);
SolrJSupport client = new SolrJSupport(solr, "example");
Iterable<SolrDocument> results = client.getDocuments();
assertTrue(Iterables.isEmpty(results));
client.addDocument(MutableMap.<String, Object>of("id", "1", "description", "first"));
client.addDocument(MutableMap.<String, Object>of("id", "2", "description", "second"));
client.addDocument(MutableMap.<String, Object>of("id", "3", "description", "third"));
client.commit();
results = client.getDocuments();
assertEquals(Iterables.size(results), 3);
}
}
|
#!/bin/sh
set -e
ROOTDIR=dist
BUNDLE="${ROOTDIR}/Safecrex-Qt.app"
CODESIGN=codesign
TEMPDIR=sign.temp
TEMPLIST=${TEMPDIR}/signatures.txt
OUT=signature.tar.gz
OUTROOT=osx
if [ ! -n "$1" ]; then
echo "usage: $0 <codesign args>"
echo "example: $0 -s MyIdentity"
exit 1
fi
rm -rf ${TEMPDIR} ${TEMPLIST}
mkdir -p ${TEMPDIR}
${CODESIGN} -f --file-list ${TEMPLIST} "$@" "${BUNDLE}"
grep -v CodeResources < "${TEMPLIST}" | while read i; do
TARGETFILE="${BUNDLE}/`echo "${i}" | sed "s|.*${BUNDLE}/||"`"
SIZE=`pagestuff "$i" -p | tail -2 | grep size | sed 's/[^0-9]*//g'`
OFFSET=`pagestuff "$i" -p | tail -2 | grep offset | sed 's/[^0-9]*//g'`
SIGNFILE="${TEMPDIR}/${OUTROOT}/${TARGETFILE}.sign"
DIRNAME="`dirname "${SIGNFILE}"`"
mkdir -p "${DIRNAME}"
echo "Adding detached signature for: ${TARGETFILE}. Size: ${SIZE}. Offset: ${OFFSET}"
dd if="$i" of="${SIGNFILE}" bs=1 skip=${OFFSET} count=${SIZE} 2>/dev/null
done
grep CodeResources < "${TEMPLIST}" | while read i; do
TARGETFILE="${BUNDLE}/`echo "${i}" | sed "s|.*${BUNDLE}/||"`"
RESOURCE="${TEMPDIR}/${OUTROOT}/${TARGETFILE}"
DIRNAME="`dirname "${RESOURCE}"`"
mkdir -p "${DIRNAME}"
echo "Adding resource for: "${TARGETFILE}""
cp "${i}" "${RESOURCE}"
done
rm ${TEMPLIST}
tar -C "${TEMPDIR}" -czf "${OUT}" .
rm -rf "${TEMPDIR}"
echo "Created ${OUT}"
|
/*
* Copyright 2017-2018 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package newrelic
import (
"context"
"fmt"
)
type Script struct {
ScriptText *string `json:"scriptText,omitempty"`
}
type ScriptService service
func (s *ScriptService) GetByID(ctx context.Context, id string) (*Script, *Response, error) {
u := fmt.Sprintf("%v/script/", id)
//fmt.Println("path : ", u)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
fmt.Println(err)
return nil, nil, err
}
script := new(Script)
resp, err := s.client.Do(ctx, req, script)
if err != nil {
return nil, resp, err
}
return script, resp, nil
}
func (s *ScriptService) UpdateByID(ctx context.Context, scriptText *Script, id string) (*Response, error) {
u := fmt.Sprintf("%v/script/", id)
req, err := s.client.NewRequest("PUT", u, scriptText)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, scriptText)
return resp, err
}
|
<gh_stars>10-100
/*
* The MIT License - http://www.opensource.org/licenses/mit-license
* Copyright (c) 2017 <NAME>.
*/
/*global WorldWind*/
/**
* BookmarkDialog presents a dialog that displays a given url and
* provides a button that copies it to the clipboard.
*
* @param {Knockout} ko
* @param {JQuery} $
* @returns {BookmarkDialog}
*/
define(['knockout', 'jquery', 'jqueryui', 'jquery-growl'],
function (ko, $) {
"use strict";
/**
* @constructor
* @param {Object} viewFragment
* @returns {BookmarkDialog}
*/
function BookmarkDialog(viewFragment) {
var self = this;
this.bookmark = ko.observable("");
// Load the view fragment into the DOM's body.
// Wrap the view in a hidden div for use in a JQuery UI dialog.
var $view = $('<div style="display: none"></div>')
.append(viewFragment)
.appendTo($('body'));
this.view = $view.children().first().get(0);
/**
* Opens a dialog used to copy a URL to the clipboard
* @param {String} url URL to display/edit
*/
this.open = function (url) {
self.bookmark(url);
// Open the copy-bookmark dialog
var $view = $(self.view);
$view.dialog({
autoOpen: false,
title: "Bookmark"
});
$view.dialog("open");
};
/**
*
* Copies the text from the dialogs bookmark-url input element
* to the clipboard
*/
this.copyUrlToClipboard = function () {
var $bookmarkUrl = $("#bookmark-url");
// Select the URL text so it can be copied
$bookmarkUrl.select();
try {
// Copy the current selection to the clipboard
var successful = document.execCommand('copy');
if (successful) {
$.growl({
title: "Bookmark Copied",
message: "The link was copied to the clipboard"});
$(self.view).dialog("close");
} else {
$.growl.warning({
title: "Bookmark Not Copied!",
message: "The link could not be copied"});
}
} catch (err) {
console.error('Unable to copy bookmark link.', err.message);
$.growl.error({
title: "Error",
message: "Unable to copy link"});
}
};
// Bind the view to this view model
ko.applyBindings(this, this.view);
}
return BookmarkDialog;
}
);
|
from __future__ import absolute_import, division, print_function, unicode_literals
from decimal import Decimal
import unittest
from amaascore.assets.synthetic import Synthetic
from amaascore.tools.generate_asset import generate_synthetic
class SyntheticTest(unittest.TestCase):
def setUp(self):
self.longMessage = True # Print complete error message on failure
self.synthetic = generate_synthetic()
self.asset_id = self.synthetic.asset_id
def tearDown(self):
pass
def test_Synthetic(self):
self.assertEqual(type(self.synthetic), Synthetic)
if __name__ == '__main__':
unittest.main()
|
class StateMachine:
def __init__(self, state_action_type):
self.state_action_type = state_action_type
self.column = 0
def state(self, action):
if self.state_action_type == 1:
if action == 1:
return (True, 2)
elif action == 2:
return (True, 3)
elif action == 3:
return (True, 1)
elif self.state_action_type == 2:
if action == 1:
return (True, 3)
elif action == 2:
return (True, 1)
elif action == 3:
return (True, 2)
elif self.state_action_type == 3:
if action == 1:
return (True, 1)
elif action == 2:
return (True, 2)
elif action == 3:
return (True, 3)
return (False, self.state_action_type)
# Example usage
algorithm = [1, 2, 3, 1, 2, 3]
initial_state = 1
sm = StateMachine(initial_state)
for action in algorithm:
goto_next, new_state = sm.state(action)
if goto_next:
sm.column += 1
|
<reponame>nervmaster/minesweeper<gh_stars>0
module MineSweeper
require_relative "PrettyPrinter"
require_relative "minesweeper"
require_relative "States_Constants"
def self.parse_bombs(board_state)
bomb_cells = []
board_state.each_index do |x|
board_state[0].each_index do |y|
if board_state[x][y] == BOMB_CELL
bomb_cells.push [x,y]
end
end
end
return bomb_cells
end
game = Minesweeper.new(10,10,10)
puts "Células de Bombas:"
sheet = parse_bombs(game.board_state(xray: true))
print sheet
puts
PrettyPrinter.new.print(game.board_state(xray: true))
for i in 0..10
for j in 0..10
if not sheet.include?([i,j])
game.play(i,j)
else
game.flag(i,j)
end
end
end
puts
if game.victory?
puts "Você Venceu!"
else
puts "Você perdeu!"
end
PrettyPrinter.new.print(game.board_state(xray: true))
puts "Novo jogo com jogadas randômicas"
game = Minesweeper.new(10,10,10)
PrettyPrinter.new.print(game.board_state(xray: true))
while(game.still_playing?)
game.play(rand(10), rand(10))
end
if game.victory?
puts "Você Venceu!"
else
puts "Você perdeu!"
end
PrettyPrinter.new.print(game.board_state(xray: true))
end # module
|
export { default } from 'components/Dropdown/Dropdown';
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.