text
stringlengths 27
775k
|
|---|
class DestinyActivityModeCategory {
DestinyActivityModeCategory._();
static const int None = 0;
static const int PvE = 1;
static const int PvP = 2;
static const int PvECompetitive = 3;
}
|
// SPDX-License-Identifier: GPL-2.0+
/**
* ufs.c - UFS specific U-boot commands
*
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
*
*/
#include <common.h>
#include <command.h>
#include <ufs.h>
static int do_ufs(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[])
{
int dev, ret;
if (argc >= 2) {
if (!strcmp(argv[1], "init")) {
if (argc == 3) {
dev = simple_strtoul(argv[2], NULL, 10);
ret = ufs_probe_dev(dev);
if (ret)
return CMD_RET_FAILURE;
} else {
ufs_probe();
}
return CMD_RET_SUCCESS;
}
}
return CMD_RET_USAGE;
}
U_BOOT_CMD(ufs, 3, 1, do_ufs,
"UFS sub system",
"init [dev] - init UFS subsystem\n"
);
|
const { createLeader, STANCES, FACTIONS, SPECIAL_RULES } = require('./../scenarioConstants');
const { GAME_PHASES } = require('./../../gameConstants');
const THEODEN = createLeader('Theodén', './assets/avatarBackgrounds/lotr/theoden.png');
const DENETHOR = createLeader('Denethor II', './assets/avatarBackgrounds/lotr/denethor.png');
const SAURON = createLeader('Sauron', './assets/avatarBackgrounds/lotr/sauron.png');
const SARUMAN = createLeader('Saruman', './assets/avatarBackgrounds/lotr/saruman.png');
const WAR_OF_THE_RING_SCENARIO = {
id: 'WAR_OF_THE_RING',
disabled: true,
displayName: 'War of the one ring',
setupPhase: GAME_PHASES.PLAYER_SETUP,
map: 'middleEarth.svg',
background: 'assets/scenarios/warofthering.jpg',
description: 'Play either as the forces of good or evil in the battle for Middle Earth.',
specialRules: [
SPECIAL_RULES.ALLIES,
SPECIAL_RULES.FORTS,
SPECIAL_RULES.CAPITALS,
SPECIAL_RULES.HEROES,
],
factions: [
FACTIONS.NONE,
FACTIONS.FORCES_OF_SAURON,
FACTIONS.FREE_PEOPLES_OF_MIDDLE_EARTH
],
playableCountries: [
{ name: 'Rohan', stance: STANCES.AT_WAR, faction: FACTIONS.FREE_PEOPLES_OF_MIDDLE_EARTH, LEADER: THEODEN },
{ name: 'Gondor', stance: STANCES.AT_WAR, faction: FACTIONS.FREE_PEOPLES_OF_MIDDLE_EARTH, LEADER: DENETHOR },
{ name: 'Mordor', stance: STANCES.AT_WAR, faction: FACTIONS.FORCES_OF_SAURON, LEADER: SAURON },
{ name: 'Isengard', stance: STANCES.AT_WAR, faction: FACTIONS.FORCES_OF_SAURON, LEADER: SARUMAN }
],
nonPlayableCountries: [
{ name: 'Shire', stance: STANCES.AT_WAR, faction: FACTIONS.FREE_PEOPLES_OF_MIDDLE_EARTH },
{ name: 'Rivendell', stance: STANCES.AT_WAR, faction: FACTIONS.FREE_PEOPLES_OF_MIDDLE_EARTH },
{ name: 'Khand', stance: STANCES.AT_WAR, faction: FACTIONS.FORCES_OF_SAURON },
{ name: 'Harad', stance: STANCES.AT_WAR, faction: FACTIONS.FORCES_OF_SAURON },
{ name: 'Rhun', stance: STANCES.AT_WAR, faction: FACTIONS.FORCES_OF_SAURON }
]
};
module.exports = WAR_OF_THE_RING_SCENARIO;
|
package driver
import (
"fmt"
"github.com/jinzhu/gorm"
_ "github.com/lib/pq" // initialize postgres driver
)
func OpenCn(host string, port string, user string, password string, dbName string, debug bool) (*gorm.DB, error) {
cn := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=disable", host, port, user, password, dbName)
db, err := gorm.Open("postgres", cn)
if err != nil {
return nil, fmt.Errorf("db connection failed: %w", err)
}
db.SingularTable(true)
db.LogMode(debug)
return db, nil
}
|
package com.github.lmdb4s
import bindings.Library
import jnr.ffi.{ Pointer => JnrPointer }
object KeyVal {
private val MEM_MGR = Library.RUNTIME.getMemoryManager
}
/**
* Represents off-heap memory holding a key and value pair.
*
* @tparam T buffer type
*/
final class KeyVal[T] private[lmdb4s](val proxy: BufferProxy[T,JnrPointer]) extends IKeyVal[T,JnrPointer] {
import BufferProxyLike._
require(proxy != null, "proxy is null")
private var k: T = proxy.allocate()
private var v: T = proxy.allocate()
private var closed = false
private val ptrKey: JnrPointer = KeyVal.MEM_MGR.allocateTemporary(MDB_VAL_STRUCT_SIZE, false)
private val ptrKeyAddr = ptrKey.address()
private val ptrArray: JnrPointer = KeyVal.MEM_MGR.allocateTemporary(MDB_VAL_STRUCT_SIZE * 2, false)
private val ptrVal: JnrPointer = ptrArray.slice(0, MDB_VAL_STRUCT_SIZE)
private val ptrValAddr = ptrVal.address()
override def close(): Unit = {
if (closed) return
closed = true
proxy.deallocate(k)
proxy.deallocate(v)
}
private[lmdb4s] def key: T = k
private[lmdb4s] def getKeyAsBytes(): Array[Byte] = proxy.getBytes(key)
private[lmdb4s] def keyIn(key: T): Unit = {
proxy.in(key, ptrKey, ptrKeyAddr)
}
private[lmdb4s] def keyOut: T = {
k = proxy.out(k, ptrKey, ptrKeyAddr)
k
}
private[lmdb4s] def pointerKey: JnrPointer = ptrKey
private[lmdb4s] def pointerVal: JnrPointer = ptrVal
private[lmdb4s] def `val`: T = v
private[lmdb4s] def valIn(`val`: T): Unit = {
proxy.in(`val`, ptrVal, ptrValAddr)
}
private[lmdb4s] def valIn(size: Int): Unit = {
proxy.in(v, size, ptrVal, ptrValAddr)
}
/**
* Prepares an array suitable for presentation as the data argument to a
* <code>MDB_MULTIPLE</code> put.
*
* <p>
* The returned array is equivalent of two <code>MDB_val</code>s as follows:
*
* <ul>
* <li>ptrVal1.data = pointer to the data address of passed buffer</li>
* <li>ptrVal1.size = size of each individual data element</li>
* <li>ptrVal2.data = unused</li>
* <li>ptrVal2.size = number of data elements (as passed to this method)</li>
* </ul>
*
* @param val a user-provided buffer with data elements (required)
* @param elements number of data elements the user has provided
* @return a properly-prepared pointer to an array for the operation
*/
private[lmdb4s] def valInMulti(`val`: T, elements: Int): JnrPointer = {
val ptrVal2SizeOff = MDB_VAL_STRUCT_SIZE + STRUCT_FIELD_OFFSET_SIZE
ptrArray.putLong(ptrVal2SizeOff, elements) // ptrVal2.size
proxy.in(`val`, ptrVal, ptrValAddr) // ptrVal1.data
val totalBufferSize = ptrVal.getLong(STRUCT_FIELD_OFFSET_SIZE)
val elemSize = totalBufferSize / elements
ptrVal.putLong(STRUCT_FIELD_OFFSET_SIZE, elemSize) // ptrVal1.size
ptrArray
}
private[lmdb4s] def valOut: T = {
v = proxy.out(v, ptrVal, ptrValAddr)
v
}
}
|
import {Directive, EventEmitter, HostBinding, HostListener, Input, Output} from '@angular/core';
import {isNil} from 'lodash';
@Directive({
selector: '[ngxDropdown]',
exportAs: 'ngxDropdown'
})
export class DropdownDirective {
toggleElement: any;
// tslint:disable-next-line:no-input-rename
@Input('open') internalOpen = false;
@Output() openChange = new EventEmitter<boolean>();
@HostBinding('class.show') get isOpen(): boolean {
return this.internalOpen;
}
@HostListener('keyup.esc')
onKeyupEsc() {
this.close();
}
@HostListener('document:click', ['$event'])
onDocumentClick(event: MouseEvent) {
if (event.button !== 2 && !this.isEventFromToggle(event)) {
this.close();
}
}
open() {
if (!this.internalOpen) {
this.internalOpen = true;
this.openChange.emit(true);
}
}
close() {
if (this.internalOpen) {
this.internalOpen = false;
this.openChange.emit(false);
}
}
toggle() {
if (this.isOpen) {
this.close();
} else {
this.open();
}
}
private isEventFromToggle(event: MouseEvent) {
return !isNil(this.toggleElement) && this.toggleElement.contains(event.target);
}
}
|
### 简介
使用的是 [nuka-carousel](https://github.com/react-component/nuka-carousel)
### API
|
using System;
namespace MicaForEveryone.Win32
{
public class WndProcEventArgs : EventArgs
{
public WndProcEventArgs(IntPtr windowHandle)
{
WindowHandle = windowHandle;
}
public IntPtr WindowHandle { get; }
}
}
|
using System.Threading.Tasks;
using Newtonsoft.Json;
using Spectacles.NET.Types;
namespace Skyra.Core.Cache.Models
{
public sealed class CoreGuild : ICoreBaseStructure<CoreGuild>
{
public CoreGuild(IClient client, ulong id, string name, string region, string? icon, Permission? permissions,
int? memberCount, string ownerId)
{
Client = client;
Id = id;
Name = name;
Region = region;
Icon = icon;
Permissions = permissions;
MemberCount = memberCount;
OwnerId = ownerId;
}
[JsonProperty("id")]
public ulong Id { get; set; }
[JsonProperty("n")]
public string Name { get; set; }
[JsonProperty("r")]
public string Region { get; set; }
[JsonProperty("i")]
public string? Icon { get; set; }
[JsonProperty("p")]
public Permission? Permissions { get; set; }
[JsonProperty("m")]
public int? MemberCount { get; set; }
[JsonProperty("o")]
public string OwnerId { get; set; }
[JsonIgnore]
public IClient Client { get; }
public CoreGuild Patch(CoreGuild value)
{
Name = value.Name;
Region = value.Region;
Icon = value.Icon;
Permissions = value.Permissions;
MemberCount = value.MemberCount;
OwnerId = value.OwnerId;
return this;
}
public CoreGuild Clone()
{
return new CoreGuild(Client,
Id,
Name,
Region,
Icon,
Permissions,
MemberCount,
OwnerId);
}
public async Task<CoreGuildChannel[]> GetChannelsAsync()
{
return await Client.Cache.GuildChannels.GetAllAsync(Id.ToString());
}
public async Task<CoreGuildRole[]> GetRolesAsync()
{
return await Client.Cache.GuildRoles.GetAllAsync(Id.ToString());
}
public async Task<CoreGuildMember[]> GetMembersAsync()
{
return await Client.Cache.GuildMembers.GetAllAsync(Id.ToString());
}
public static CoreGuild From(IClient client, Guild guild)
{
return new CoreGuild(client, ulong.Parse(guild.Id), guild.Name, guild.Region, guild.Icon, guild.Permissions,
guild.MemberCount, guild.OwnerId);
}
}
}
|
#!/usr/bin/env bash
set -e
DOWNLOAD_PATH=$1
echo "下载 gradle"
if [[ ! -f "$DOWNLOAD_PATH/gradle-6.3-all.zip" ]];then
axel -n8 https://mirrors.aliyun.com/macports/distfiles/gradle/gradle-6.3-all.zip \
--output=${DOWNLOAD_PATH}/gradle-6.3-all.zip
fi
|
// this is reviewing by following along the below article
// https://codeburst.io/learn-let-var-and-const-in-easiest-way-with-guarantee-e6ecf551018a
function adult5(age) {
if (age > 18) {
var status = 'adult';
}
console.log(status);
}
// adult5(20);
function adult6(age) {
if (age > 18) {
let status = 'adult';
}
console.log(status);
}
// adult6(20);
// ES5
var num = 10;
for (var num = 0; num < 3; num++) {
console.log('num inside for loop is: ' + num)
}
console.log('num outside for loop ' + num);
// ES6
let num = 10;
for (let num = 0; num < 3; num++) {
console.log('num inside for loop is: ' + num)
}
console.log('num outside for loop ' + num);
|
+++
title = "7.1 Get Vault Vault Token"
chapter = false
weight = 1
+++
[Deep Link to AWS Secrets Manager](https://console.aws.amazon.com/secretsmanager/home?region=us-west-2#/listSecrets)

Select the __Secret name__ (Vault-Workshop-vault-secrets-?????)

Click __Retrieve secret value__

{{% notice tip %}}
Leave this tab open and Switch back to Terraform Cloud
{{% /notice %}}
|
#!/bin/bash -e
docker images
source rr_version
echo "log into tascape (https://hub.docker.com/u/tascape/)"
docker login -u tascape
echo "push images to tascape"
for IMG in nginx tomee mysql; do
docker push tascape/reactor-report-$IMG:${RR_VERSION}
docker push tascape/reactor-report-$IMG:latest
done
|
//
// MiniMapPointObject.cs
// ProductName Ling
//
// Created by on 2021.09.13
//
using UnityEngine;
using Utility;
using Zenject;
using Utility.ShaderEx;
namespace Ling.Map
{
/// <summary>
/// ミニマップ上のオブジェクト
/// </summary>
[RequireComponent(typeof(MeshRenderer))]
public class MiniMapPointObject : MonoBehaviour
{
#region 定数, class, enum
#endregion
#region public 変数
#endregion
#region private 変数
[Inject] private Utility.ShaderEx.IShaderContainer _shaderContainer;
[SerializeField] private Color _color;
#endregion
#region プロパティ
#endregion
#region public, protected 関数
public void SetFollowObject(GameObject follow)
{
_follower.SetFolow(follow.transform);
}
#endregion
#region private 関数
private MiniMapObjectFollower _follower;
private Material _material;
#endregion
#region MonoBegaviour
/// <summary>
/// 初期処理
/// </summary>
void Awake()
{
_follower = gameObject.GetOrAddComponent<MiniMapObjectFollower>();
var meshRenderer = GetComponent<MeshRenderer>();
_material = new Material(_shaderContainer.GetOrCreateCache(ShaderName.SurfaceLightOff));
_material.SetColor(ShaderProperty.Color, _color);
meshRenderer.material = _material;
}
void OnDestroy()
{
GameObject.Destroy(_material);
}
#endregion
}
}
|
<?php
use yii\db\Migration;
/**
* Handles the creation of table `{{%mp_users}}`.
*/
class m200421_134513_create_mp_users_table extends Migration
{
/**
* {@inheritdoc}
*/
public function safeUp()
{
$this->createTable('{{%mp_users}}', [
'id' => $this->primaryKey(),
'username' => $this->string(255)->notNull(),
'password' => $this->string(255)->notNull(),
'activation_code' => $this->string(60)->defaultValue(NULL),
'status_activation_code' => $this->integer(11)->defaultValue(0),
'oauth_provider' => $this->string(20)->notNull(),
'oauth_uid' => $this->string(100),
'authKey' => $this->string(255)->defaultValue(NULL),
'accessToken' => $this->string(255)->defaultValue(NULL),
'first_name' => $this->string(255)->defaultValue(NULL),
'last_name' => $this->string(255)->defaultValue(NULL),
'email' => $this->string(255)->defaultValue(NULL),
'picture' => $this->string(255)->defaultValue(NULL),
]);
}
/**
* {@inheritdoc}
*/
public function safeDown()
{
$this->dropTable('{{%mp_users}}');
}
}
|
cd 'C:\Users\crbk01\OneDrive - Region Gotland\Till Github\Mapinfo\MapInfoTabToCsv'
ls *.mb | ForEach-Object{ sc $_ -encoding utf8 -value(gc $_)}
|
db "RENDEZVOUS@" ; species name
db "Its heart-shaped"
next "body makes it"
next "popular. In some"
page "regions, you would"
next "give a LUVDISC to"
next "someone you love.@"
|
object Naturals {
trait NAT {
type a[s[_ <: NAT] <: NAT, z <: NAT] <: NAT
type v = a[SUCC, ZERO]
}
final class ZERO extends NAT {
type a[s[_ <: NAT] <: NAT, z <: NAT] = z
}
final class SUCC[n <: NAT] extends NAT {
type a[s[_ <: NAT] <: NAT, z <: NAT] = s[n#a[s, z]]
}
type _0 = ZERO
type _1 = SUCC[_0]
type _2 = SUCC[_1]
type _3 = SUCC[_2]
type _4 = SUCC[_3]
type _5 = SUCC[_4]
type _6 = SUCC[_5]
// crashes scala-2.8.0 beta1
trait MUL[n <: NAT, m <: NAT] extends NAT {
trait curry[n[_, _], s[_]] { type f[z <: NAT] = n[s, z] }
type a[s[_ <: NAT] <: NAT, z <: NAT] = n#a[curry[m#a, s]#f, z]
}
}
object Test {
trait Bar[X[_]]
trait Baz[S[_] <: Bar[S]] {
type Apply[T]
}
trait Foo[V[_] <: Bar[V]] extends Bar[Baz[V]#Apply]
}
|
package stasis.server.api.routes
import akka.actor.typed.scaladsl.LoggerOps
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.Materializer
import stasis.server.model.staging.ServerStagingStore
import stasis.server.security.CurrentUser
import stasis.shared.api.responses.DeletedPendingDestaging
class Staging()(implicit ctx: RoutesContext) extends ApiRoutes {
import de.heikoseeberger.akkahttpplayjson.PlayJsonSupport._
import stasis.shared.api.Formats._
override implicit protected def mat: Materializer = ctx.mat
def routes(implicit currentUser: CurrentUser): Route =
concat(
pathEndOrSingleSlash {
get {
resource[ServerStagingStore.View.Service] { view =>
view.list().map { pendingDestagingOps =>
log.debugN(
"User [{}] successfully retrieved [{}] pending destaging operations",
currentUser,
pendingDestagingOps.size
)
discardEntity & complete(pendingDestagingOps.values)
}
}
}
},
path(JavaUUID) { crateId =>
concat(
delete {
resource[ServerStagingStore.Manage.Service] { manage =>
manage.drop(crateId).map { deleted =>
if (deleted) {
log.debugN("User [{}] successfully deleted destaging operation for crate [{}]", currentUser, crateId)
} else {
log.warnN("User [{}] failed to delete destaging operation for crate [{}]", currentUser, crateId)
}
discardEntity & complete(DeletedPendingDestaging(existing = deleted))
}
}
}
)
}
)
}
object Staging {
def apply()(implicit ctx: RoutesContext): Staging = new Staging()
}
|
---
layout: default
title: "K sum Copy"
categories: algos
author: lazydeveloper
permalink: /testt
---
sdfds
|
#!/usr/bin/env bash
# Crash on error
set -e
# Get Working Directory
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
# Create a directory to store the logs
LOGS="$DIR/../logs/$(date +'%Y_%m_%d_%H:%M')"
mkdir -p "$LOGS"
echo "Starting with a clean directory"
rm -rf $DIR/../{nsolid,nsolid-console,nsolid-hub}
echo "Generating image file..."
$DIR/gen-images.sh > $DIR/../templates/images.js
echo "Running npm install..."
cd $DIR
npm install
echo "Generating Dockerfiles..."
$DIR/gen-dockerfiles.js
echo "Building etcd-ports tests..."
cd $DIR/../tests/etcd-ports
make
cd $DIR
# Build images and push them to the registry
echo "Running dante..."
cd $DIR/../
dante test -r 2 > "$LOGS/output.md"
dante push -r 2 > "$LOGS/push.md"
cd $DIR
|
/***************************************************************************
* GroupProgressManager.cs
*
* Copyright (C) 2007 Michael C. Urbanski
* Written by Mike Urbanski <michael.c.urbanski@gmail.com>
****************************************************************************/
/* THIS FILE IS LICENSED UNDER THE MIT LICENSE AS OUTLINED IMMEDIATELY BELOW:
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
using System;
using System.ComponentModel;
using System.Collections.Generic;
namespace Migo.TaskCore
{
public class GroupProgressManager<T> where T : Task
{
private int progress;
private int oldProgress;
private int totalTicks;
private int currentTicks;
private Dictionary<T,int> progDict;
public event EventHandler<ProgressChangedEventArgs> ProgressChanged;
public GroupProgressManager ()
{
progDict = new Dictionary<T,int> ();
}
public virtual void Add (T task)
{
Add (task, true);
}
public virtual void Add (IEnumerable<T> tasks)
{
foreach (T task in tasks) {
Add (task, false);
}
OnProgressChanged ();
}
protected virtual void Add (T task, bool update)
{
if (progDict.ContainsKey (task)) {
throw new ArgumentException ("Task was added previously");
} else if (task.Progress != 0 ||
task.Status != TaskStatus.Ready) {
throw new InvalidOperationException (
"Progress Manager: Task has already been, or is currently being executed"
);
}
totalTicks += 100;
progDict.Add (task, 0);
if (update) {
OnProgressChanged ();
}
}
public virtual void Remove (T task)
{
Remove (task, true);
}
public virtual void Remove (IEnumerable<T> tasks)
{
foreach (T task in tasks) {
try {
Remove (task, false);
} catch { continue; }
}
OnProgressChanged ();
}
protected virtual void Remove (T task, bool update)
{
if (task.Progress == 100) {
if (progDict.ContainsKey (task)) {
progDict.Remove (task);
}
} else {
int prog = 0;
if (progDict.ContainsKey (task)) {
prog = progDict[task];
progDict.Remove (task);
currentTicks -= prog;
totalTicks -= 100;
}
if (update) {
OnProgressChanged ();
}
}
}
public virtual void Reset ()
{
progress = 0;
oldProgress = 0;
totalTicks = 0;
currentTicks = 0;
progDict.Clear ();
}
public virtual void Update (T task, int newProg)
{
if (newProg < 0) {
throw new ArgumentOutOfRangeException (
"newProg must be greater than or equal to 0"
);
}
int delta = 0;
if (progDict.ContainsKey (task)) {
int prog = progDict[task];
if (prog != newProg) {
progDict[task] = newProg;
delta = newProg - prog;
}
}
if (delta != 0) {
currentTicks += delta;
OnProgressChanged ();
}
}
protected virtual void OnProgressChanged ()
{
if (totalTicks == 0) {
progress = 0;
} else {
progress = Convert.ToInt32 (
(currentTicks * 100) / totalTicks
);
}
if (progress != oldProgress) {
oldProgress = progress;
EventHandler<ProgressChangedEventArgs> handler = ProgressChanged;
if (handler != null) {
handler (
this, new ProgressChangedEventArgs (progress, null)
);
}
}
}
}
}
|
---
layout: docs
title: 'Collection.until()'
---
Stop iterating the collection once given filter returns true.
### Syntax
```javascript
collection.until(filterFunction, bIncludeStopEntry)
```
### Parameters
<table>
<tr><td>filterFunction: Function</td><td>function (item) {} that when returns a truthy value will stop the rest of the iteration</td></tr>
<tr><td>bIncludeStopEntry: Boolean</td><td><i>(Optional)</i> If true, the collection will include the stop entry on which the filter function returns true</td></tr>
</table>
### Remarks
Works similary to [limit()](/docs/Collection/Collection.limit()) but rather than specifying a number, you specify a filter function to execute on each item and when it returns true, the iteration will stop.
### Sample
```javascript
let cancelled = false;
function getLogs() {
cancelled = false;
return db.logEntries
.where('date').between(yesterday, today)
.until(() => cancelled)
.toArray();
}
// To cancel the iteration, set cancelled = true
function cancel() {
cancelled = true;
}
```
### Return Value
This Collection instance (**this**)
|
import { Component, OnInit } from '@angular/core';
import { Team } from '../../shared/models/team';
import { Competitions } from '../../shared/models/competitions';
import { Area } from '../../shared/models/area';
import { ApiFootballCompetitionsService } from '../../shared/service/api-football-competitions.service';
import { ApiFotballAreasService } from '../../shared/service/api-fotball-areas.service';
import { AppiFootballTeamsService } from '../../shared/service/appi-football-teams.service';
@Component({
selector: 'fb-aside',
templateUrl: './aside.component.html',
styleUrls: ['./aside.component.scss']
})
export class AsideComponent implements OnInit {
competencias:Array<Competitions> = new Array<Competitions>();
areas:Array<Area> = new Array<Area>();
equipos:Array<Team> = new Array<Team>();
constructor(private afcs: ApiFootballCompetitionsService,
private afas: ApiFotballAreasService,
private afts: AppiFootballTeamsService) { }
ngOnInit() {
this.getCompetitions();
this.getAreas();
this.getEquipos();
}
getCompetitions(){
this.afcs.getCompetitions()
.subscribe( (data) => {
// console.log('Conexion exitosa a COMPETENCIAS:');
data.competitions.forEach(competition => {
this.competencias.push(competition);
});
}, error => {
console.log('error al conectarse al servidor');
console.log(error);
});
}
getAreas(){
this.afas.getAreas()
.subscribe( (data) => {
data.areas.forEach( area => {
this.areas.push(area);
});
}, error => {
console.log('Error al traer las areas');
console.log(error);
});
}
getEquipos(){
this.afts.getTeams()
.subscribe( (data) => {
data.teams.forEach(equipo => {
this.equipos.push(equipo);
});
console.log('Equipos');
console.log(this.equipos);
}, error => {
console.log('Error al traer los equipos');
console.log(error);
})
}
}
|
module Carto
class OrganizationPermission
def add_read_permission(table)
table.add_organization_read_permission
end
def add_read_write_permission(table)
table.add_organization_read_write_permission
end
end
end
|
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
//package cc.factorie.optimize
//
//import junit.framework._
//import Assert._
//import org.scalatest.junit.JUnitSuite
//import cc.factorie.maths.ArrayOps
//import cc.factorie.la._
//import util.Random
//import cc.factorie.optimize._
//
///**
// * Created by IntelliJ IDEA.
// * User: gdruck
// * Date: Sep 30, 2010
// * Time: 2:02:12 PM
// * To change this template use File | Settings | File Templates.
// */
//
//class TestOptimize extends TestCase {
//
// def testLineOptimizer = {
// var function = new SimpleFunction()
// var optimizer = new BackTrackLineOptimizer(function)
// // with a step of 1, this should jump over the
// // maximum and need to backtrack
// val gradient = new Array[Double](1)
// function.getOptimizableGradient(gradient)
// optimizer.optimize(gradient,1)
// println(function.optimizableParameter(0))
// assertEquals(function.optimizableParameter(0),2.5,1e-6)
// }
//
// def testLMBFGS = {
//
// val rand = new Random(1)
//
// for (i <- 0 until 10) {
// val a = rand.nextInt(10) + 1.0
// val b = rand.nextInt(10) + 1.0
// val c = rand.nextInt(10) + 1.0
// val d = rand.nextInt(10) + 1.0
//
// var function = new BivariateQuadraticFunction(a,b,c,d)
// // this value is far enough away from the max that
// // optimizing will require at least two iterations
// var optimizer = new LimitedMemoryBFGS(function)
// optimizer.tolerance = 1e-8
// optimizer.gradientTolerance = 1e-8
// optimizer.optimize()
//
// //println(function.optimizableParameter(0) + " " + c/(2*a))
// //println(function.optimizableParameter(1) + " " + d/(2*b))
//
// assertEquals(function.optimizableParameter(0),c/(2*a),1e-4)
// assertEquals(function.optimizableParameter(1),d/(2*b),1e-4)
// }
// }
//
// def testGradientAscent = {
//
// val rand = new Random(1)
//
// for (i <- 0 until 10) {
// val a = rand.nextInt(10) + 1.0
// val b = rand.nextInt(10) + 1.0
// val c = rand.nextInt(10) + 1.0
// val d = rand.nextInt(10) + 1.0
//
// var function = new BivariateQuadraticFunction(a,b,c,d)
// // this value is far enough away from the max that
// // optimizing will require at least two iterations
// var optimizer = new GradientAscent(function)
//
// // reduce all tolerances to make sure
// // we do not stop until actually at the
// // maximum
// optimizer.tolerance = 1e-12
// optimizer.gradientTolerance = 1e-8
// optimizer.lineOptimizer.absTolx = 1e-8
// optimizer.lineOptimizer.relTolx = 1e-8
// optimizer.optimize()
//
// //println(function.optimizableParameter(0) + " " + c/(2*a))
// //println(function.optimizableParameter(1) + " " + d/(2*b))
//
// assertEquals(function.optimizableParameter(0),c/(2*a),1e-4)
// assertEquals(function.optimizableParameter(1),d/(2*b),1e-4)
// }
// }
//
// def testConjugateGradient = {
//
// val rand = new Random(1)
//
// for (i <- 0 until 10) {
// val a = rand.nextInt(10) + 1.0
// val b = rand.nextInt(10) + 1.0
// val c = rand.nextInt(10) + 1.0
// val d = rand.nextInt(10) + 1.0
//
// var function = new BivariateQuadraticFunction(a,b,c,d)
// // this value is far enough away from the max that
// // optimizing will require at least two iterations
// var optimizer = new ConjugateGradient(function)
// optimizer.tolerance = 1e-12
// optimizer.gradientTolerance = 1e-8
// optimizer.lineOptimizer.absTolx = 1e-8
// optimizer.lineOptimizer.relTolx = 1e-8
// try {
// optimizer.optimize()
// }
// catch {
// case e:Exception => e.printStackTrace()
// }
//
// //println(function.optimizableParameter(0) + " " + c/(2*a))
// //println(function.optimizableParameter(1) + " " + d/(2*b))
//
// assertEquals(function.optimizableParameter(0),c/(2*a),1e-4)
// assertEquals(function.optimizableParameter(1),d/(2*b),1e-4)
// }
// }
//}
//
//class BivariateQuadraticFunction(var a : Double, var b : Double, var c : Double, var d : Double)
// extends OptimizableByValueAndGradient {
// var x = new Array[Double](2)
//
// def numOptimizableParameters : Int = 2
//
// def getOptimizableParameters(a:Array[Double]) = {
// assertTrue(a.length == 2)
// Array.copy(x, 0, a, 0, x.length)
// }
//
// def setOptimizableParameters(a:Array[Double]) = {
// assertTrue(a.length == 2)
// Array.copy(a, 0, x, 0, a.length)
// }
//
// def optimizableParameter(index:Int): Double = {
// assertTrue(index < 2)
// x(index)
// }
//
// def optimizableParameter_=(index:Int, d:Double): Unit ={
// assertTrue(index < 2);
// x(index) = d
// }
//
// def optimizableValue : Double = {
// - a * x(0) * x(0) - b * x(1) * x(1) + c * x(0) + d * x(1)
// }
//
// def getOptimizableGradient(gradient:Array[Double]) = {
// assertTrue(gradient.length == 2)
// gradient(0) = -2 * a * x(0) + c
// gradient(1) = - 2 * b * x(1) + d
// }
//
//}
//
//class SimpleFunction extends OptimizableByValueAndGradient {
// var x : Double = 0.0
//
// def numOptimizableParameters : Int = 1
//
// def getOptimizableParameters(a:Array[Double]) = {
// assertTrue(a.length == 1)
// a(0) = x
// a
// }
//
// def setOptimizableParameters(a:Array[Double]) = {
// assertTrue(a.length == 1)
// x = a(0)
// }
//
// def optimizableParameter(index:Int): Double = {
// assertTrue(index == 0)
// x
// }
//
// def optimizableParameter_=(index:Int, d:Double): Unit ={
// assertTrue(index == 0);
// x = d
// }
//
// def optimizableValue : Double = {
// -x*x + 5 * x
// }
//
// def getOptimizableGradient(a:Array[Double]) = {
// a(0) = -2 * x + 5
// a
// }
//}
//
//object TestOptimizeRunner {
// def suite: TestSuite = {
// val suite = new TestSuite
// suite.addTestSuite(classOf[TestOptimize])
// suite
// }
//
// def main(args: Array[String]) {
// junit.textui.TestRunner.run(suite)
// }
//}
|
module Oxidized
class CSV < Source
def initialize
@cfg = Oxidized.config.source.csv
super
end
def setup
if @cfg.empty?
Oxidized.asetus.user.source.csv.file = File.join(Config::Root, 'router.db')
Oxidized.asetus.user.source.csv.delimiter = /:/
Oxidized.asetus.user.source.csv.map.name = 0
Oxidized.asetus.user.source.csv.map.model = 1
Oxidized.asetus.user.source.csv.gpg = false
Oxidized.asetus.save :user
raise NoConfig, 'no source csv config, edit ~/.config/oxidized/config'
end
require 'gpgme' if @cfg.gpg?
end
def load(_node_want = nil)
nodes = []
open_file.each_line do |line|
next if line =~ /^\s*#/
data = line.chomp.split(@cfg.delimiter, -1)
next if data.empty?
# map node parameters
keys = {}
@cfg.map.each do |key, position|
keys[key.to_sym] = node_var_interpolate data[position]
end
keys[:model] = map_model keys[:model] if keys.has_key? :model
keys[:group] = map_group keys[:group] if keys.has_key? :group
# map node specific vars
vars = {}
@cfg.vars_map.each do |key, position|
vars[key.to_sym] = node_var_interpolate data[position]
end
keys[:vars] = vars unless vars.empty?
nodes << keys
end
nodes
end
private
def open_file
file = File.expand_path(@cfg.file)
if @cfg.gpg?
crypto = GPGME::Crypto.new password: @cfg.gpg_password
crypto.decrypt(File.open(file)).to_s
else
File.open(file)
end
end
end
end
|
package iri
// Unique IRIs for Semantic Web Entailment Regimes.
//
// For further details, see http://www.w3.org/ns/entailment/
const (
// Semantic Web Entailment Regimes.
ENTAILMENT_NS = "http://www.w3.org/ns/entailment/"
ENTAILMENT_Simple = ENTAILMENT_NS + "Simple"
ENTAILMENT_RDF = ENTAILMENT_NS + "RDF"
ENTAILMENT_RDFS = ENTAILMENT_NS + "RDFS"
ENTAILMENT_D = ENTAILMENT_NS + "D"
ENTAILMENT_OWL_Direct = ENTAILMENT_NS + "OWL-Direct"
ENTAILMENT_OWL_RDF_Based = ENTAILMENT_NS + "OWL-RDF-Based"
ENTAILMENT_RIF = ENTAILMENT_NS + "RIF"
)
|
require 'configcat'
require 'simplecov'
require 'codecov'
require 'webmock/rspec'
WebMock.allow_net_connect!
ConfigCat.logger.level = Logger::WARN
SimpleCov.start
SimpleCov.formatter = SimpleCov::Formatter::Codecov
|
drop schema if exists ers cascade;
create schema ers;
set schema 'ers';
create table reimbursement_status(
"status_id" serial primary key,
"reimb_status" text
check ("reimb_status" like 'Pending' or "reimb_status" like 'Approved'
or "reimb_status" like 'Denied') not null
);
create table reimbursement_type(
"type_id" serial primary key,
"reimb_type" text
check ("reimb_type" like 'Lodging' or "reimb_type" like 'Travel'
or "reimb_type" like 'Food' or "reimb_type" like 'Other') not null
);
create table user_roles(
"role_id" serial primary key,
"role" text
check ("role" like 'Employee' or "role" like 'Finance manager') not null
);
create table users(
"user_id" serial primary key,
"username" text unique not null,
"password" text not null,
"first_name" text not null,
"last_name" text not null,
"email" text unique not null,
"role_id" int references user_roles("role_id") not null,
unique("username", "email")
);
create table reimbursements(
"reimb_id" serial primary key,
"amount" numeric(12,2) not null,
"submitted" timestamp not null,
"resolved" timestamp,
"description" text,
--"receipt" bytea,
"author_id" int references users("user_id") not null,
"resolver_id" int references users("user_id"),
"status_id" int references reimbursement_status("status_id") not null,
"type_id" int references reimbursement_type("type_id") not null
);
-- ========== default data ==========
insert into reimbursement_status("reimb_status") values('Approved'), ('Pending'), ('Denied');
insert into reimbursement_type("reimb_type") values('Lodging'), ('Travel'), ('Food'), ('Other');
insert into user_roles("role") values('Employee'), ('Finance manager');
-- =========== Employees ===========
insert into users("username", "password","first_name", "last_name", "email", "role_id")
values('anjian', 'anjian','andy','jian','andyjian@gmail.com', 1);
insert into users ("username", "password","first_name", "last_name", "email", "role_id")
values ('johndoe', 'johndoe', 'John', 'Doe', 'johndoe@gmail.com', 1);
insert into users ("username", "password","first_name", "last_name", "email", "role_id")
values ('joesmith', 'joesmith', 'Joe', 'Smith', 'joesmith@gmail.com.com', 1);
-- =========== managers ========
insert into users ("username", "password","first_name", "last_name", "email", "role_id")
values ('alecbatson', 'alecbatson', 'Alec', 'Batson', 'alecbatson@gmail.com', 2);
insert into users ("username", "password","first_name", "last_name", "email", "role_id")
values ('nicfleury', 'nicfleury', 'Nicolas', 'Fleury', 'nicfleury@gmail.com', 2);
-- =========== reimbursements ===========
-- Pending
insert into reimbursements("amount", "submitted", "description", "author_id", "status_id", "type_id")
values (10.00, '2020-12-25 15:00:00', 'Lorem ipsum dolor sit amet.', 1, 2, 2);
-- approved/denied
insert into reimbursements("amount", "submitted", "resolved", "description", "author_id", "resolver_id",
"status_id", "type_id")
values(10.00, '2020-12-16 12:00:00', '2020-12-16 13:00:00',
'Vestibulum aliquet velit libero, in.', 1, 4, 1, 3);
insert into reimbursements("amount", "submitted", "resolved", "description", "author_id", "resolver_id",
"status_id", "type_id")
values(10.00, '2020-11-30 8:00:00', '2020-11-30 10:00:00',
'Proin elementum urna lectus, condimentum.', 2, 4, 3, 1);
insert into reimbursements("amount", "submitted", "resolved", "description", "author_id", "resolver_id",
"status_id", "type_id")
values(10.00, '2020-11-11 10:00:00', '2020-11-12 12:00:00',
'Interdum et malesuada fames ac.', 3, 5, 1, 3);
select * from users;
select * from reimbursements;
select * from user_roles;
select * from reimbursement_type;
select * from reimbursement_status;
|
'use strict';
var glslify = require('glslify');
var Pass = require('../../Pass');
var vertex = glslify('../../shaders/vertex/ortho.glsl');
function GenericPass(fragment) {
Pass.call(this);
this.setShader(vertex, fragment);
}
module.exports = GenericPass;
GenericPass.prototype = Object.create(Pass.prototype);
GenericPass.prototype.constructor = GenericPass;
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace ElevatorSimulator
{
class Floor
{
public double Height { get; private set; }
private List<Person> _occupants = new List<Person>();
private bool _downButtonPressed;
private bool _upButtonPressed;
public IEnumerable<Person> Occupants
{
get
{
return _occupants;
}
}
}
}
|
require 'rack/test'
require 'net/http'
RSpec.describe SidekiqAlive::Server do
include Rack::Test::Methods
subject(:app) { described_class }
let(:token) { SidekiqAlive.config.token }
describe 'responses' do
describe "/-/liveness" do
it "responds with success" do
get "/-/liveness?token=#{token}"
expect(last_response).to be_ok
expect(last_response.body).to eq('OK')
end
it "responds with success when the token header is used" do
header 'TOKEN', token
get "/-/liveness"
expect(last_response).to be_ok
expect(last_response.body).to eq('OK')
end
it "responds with 401 if token is invalid" do
get "/-/liveness?token=foo"
expect(last_response).not_to be_ok
expect(last_response.body).to eq("")
end
end
describe "/-/readiness" do
it "responds with ok if the service is ready" do
allow(SidekiqAlive).to receive(:ready?) { true }
get "/-/readiness?token=#{token}"
expect(last_response).to be_ok
expect(last_response.body).to eq("OK")
end
it "responds with ok if the service is ready and the token header is used" do
allow(SidekiqAlive).to receive(:ready?) { true }
header 'TOKEN', token
get "/-/readiness"
expect(last_response).to be_ok
expect(last_response.body).to eq("OK")
end
it "responds with an error when the service is not ready" do
allow(SidekiqAlive).to receive(:ready?) { false }
get "/-/readiness?token=#{token}"
expect(last_response).not_to be_ok
expect(last_response.body).to eq("KO")
end
it "responds with 401 if token is invalid" do
get "/-/readiness?token=foo"
expect(last_response).not_to be_ok
expect(last_response.body).to eq("")
end
end
end
describe 'SidekiqAlive setup' do
before do
ENV['SIDEKIQ_ALIVE_PORT'] = '4567'
SidekiqAlive.config.set_defaults
end
after do
ENV['SIDEKIQ_ALIVE_PORT'] = nil
end
it 'respects the SIDEKIQ_ALIVE_PORT environment variable' do
expect( described_class.port ).to eq '4567'
end
end
end
|
<?php
namespace app\modules\core\backend\components\CKEditor;
use Yii;
use yii\web\UploadedFile;
class FileUploadAction extends \yii\base\Action
{
/**
* Name of the param to handle
* @var string
*/
public $uploadName = 'upload';
/**
* Name of the param to handle
* @var string
*/
public $uploadPath = '@uploads/';
/**
* Relative path to web-accessible folder
* @var string
*/
public $relativePath = '/uploads/';
/**
* Private variable for getter
* @var \yii\web\UploadedFile
*/
private $_file;
/**
* @inheritdoc
*/
public function run()
{
$response = '';
if (null !== $this->file) {
$fileName = $this->file->baseName . '.' . $this->file->extension;
$filePath = Yii::getAlias($this->uploadPath . $fileName);
if ($this->file->saveAs($filePath)) {
$funcNum = Yii::$app->getRequest()->get('CKEditorFuncNum');
$imageUrl = Yii::getAlias($this->relativePath . $fileName);
$response =
'window.parent.CKEDITOR.tools.callFunction(' .
$funcNum . ', "' . $imageUrl . '", ' . '"Изображение загружено!"' .
');';
}
} else {
$response = 'alert("Что-то пошло не так.")';
}
return '<script>'.$response.'</script>';
}
/**
* Function to handle UploadedFile
* @return \yii\web\UploadedFile|null Instance of UploadedFile class or null if is not handled
*/
public function getFile()
{
return isset($this->_file) ? $this->_file :
$this->_file = UploadedFile::getInstanceByName($this->uploadName);
}
}
|
import express = require('express');
import Boom = require('boom');
import Joi = require('joi');
import { BaseController } from '../base.controller';
import { IUserDocument } from '../../models/user.model';
import { IRequestWithUserId } from '../request.interface';
import { Developer } from '../../models/developer.model';
import { Recruiter } from '../../models/recruiter.model';
import { emailConfirmTokenLength } from '../../helpers/constants';
abstract class VerifyEmailController extends BaseController {
protected readonly req: IRequestWithUserId;
protected readonly schema = Joi.object().keys({
token: Joi.string().length(emailConfirmTokenLength)
}).requiredKeys(['token']);
protected async checkToken(user: IUserDocument) {
const token = this.req.body.token;
if (!user) {
throw Boom.unauthorized('User not found');
}
if (user.emailConfirmed) {
return;
}
if (user.isEmailVerifyTokenExpired()) {
throw Boom.badRequest('Token expired');
}
if (!user.isEmailVerifyTokenEqual(token)) {
throw Boom.badRequest('Token is wrong');
}
user.setEmailConfirmed();
await user.save();
}
protected response() {
this.res.status(200).send({message: 'Email is confirmed'});
}
}
class DeveloperVerifyEmailController extends VerifyEmailController {
public handler() {
const result = this.validate(this.req.body);
if (result) {
this.errorHandler(result);
return;
}
Developer.findById(this.req.userId).exec()
.then(this.checkToken.bind(this))
.then(this.response.bind(this))
.catch(this.errorHandler.bind(this));
}
}
class RecruiterVerifyEmailController extends VerifyEmailController {
public handler() {
const result = this.validate(this.req.body);
if (result) {
this.errorHandler(result);
return;
}
Recruiter.findById(this.req.userId).exec()
.then(this.checkToken.bind(this))
.then(this.response.bind(this))
.catch(this.errorHandler.bind(this));
}
}
/**
* @swagger
* definitions:
* VerifyEmail:
* type: 'object'
* properties:
* token:
* type: 'string'
* required:
* - token
*/
/**
* @swagger
* definitions:
* VerifyEmailResponse:
* type: 'object'
* properties:
* message:
* type: 'string'
* required:
* - message
*/
/**
* @swagger
* /auth/developer/verify-email:
* post:
* summary: 'Verify user email'
* description: ''
* tags: [Auth, Developer]
* consumes:
* - application/json
* produces:
* - application/json
* parameters:
* - in: 'body'
* name: 'body'
* description: ''
* required: true
* schema:
* $ref: '#/definitions/VerifyEmail'
* responses:
* 200:
* description: 'Verify email successful'
* schema:
* type: 'object'
* $ref: '#/definitions/VerifyEmailResponse'
* security:
* - Authorization: []
*/
export function developerVerifyEmailHandler(req: express.Request, res: express.Response, next: express.NextFunction) {
const developerVerifyEmailController = new DeveloperVerifyEmailController(req, res, next);
developerVerifyEmailController.handler();
}
/**
* @swagger
* /auth/recruiter/verify-email:
* post:
* summary: 'Verify user email'
* description: ''
* tags: [Auth, Recruiter]
* consumes:
* - application/json
* produces:
* - application/json
* parameters:
* - in: 'body'
* name: 'body'
* description: ''
* required: true
* schema:
* $ref: '#/definitions/VerifyEmail'
* responses:
* 200:
* description: 'Verify email successful'
* schema:
* type: 'object'
* $ref: '#/definitions/VerifyEmailResponse'
* security:
* - Authorization: []
*/
export function recruiterVerifyEmailHandler(req: express.Request, res: express.Response, next: express.NextFunction) {
const recruiterVerifyEmailController = new RecruiterVerifyEmailController(req, res, next);
recruiterVerifyEmailController.handler();
}
|
<?php
/*
* Esta clase para ahorrar tiempo
* Evitando escribir los combos
*/
namespace frontend\modules\inter\helpers;
class FileHelper extends \common\helpers\FileHelper
{
public static function urlFlag($codpais,$tamano=32) {
return '@web/img/flags/'.$tamano.'/'.$codpais.'.png';
}
}
|
#!/usr/bin/env ruby
# MixServer 2
# Danassis Panayiotis
# Matikas George
load 'rsaLib.rb'
require 'socket'
# Make two big primes: p and q
p = create_random_prime(512)
q = create_random_prime(512)
# Make n (the public key) now
n = p*q
# Public exponent
e = 0x10001
# Private exponent
d = get_d(p,q,e)
server = TCPServer.new 2002
votes = Array.new
loop do
client = server.accept
proc = client.gets
if proc == "read\n"
client.puts e
client.puts n
end
if proc == "mix\n"
k = client.gets.to_i
i = 0
while i < k
v = client.gets.to_i
v = mod_pow(v,d,n)
votes.push(v)
i = i + 1
end
votes.shuffle(random: Random.new(1))
file = File.new("votes", "w")
i = 0
while i < k
file.puts(votes[i])
i = i + 1
end
file.close
end
if proc == "end\n"
client.close
break
end
client.close
end
|
package com.apollographql.apollo.cache.normalized.lru
import com.apollographql.apollo.cache.ApolloCacheHeaders
import com.apollographql.apollo.cache.CacheHeaders
import com.apollographql.apollo.cache.normalized.CacheKey
import com.apollographql.apollo.cache.normalized.NormalizedCache
import com.apollographql.apollo.cache.normalized.Record
import com.nytimes.android.external.cache.Cache
import com.nytimes.android.external.cache.CacheBuilder
import com.nytimes.android.external.cache.Weigher
import java.nio.charset.Charset
import java.util.concurrent.Callable
import kotlin.reflect.KClass
/**
* A [NormalizedCache] backed by an in memory [Cache]. Can be configured with an optional secondaryCache [ ], which will be used as a backup if a [Record] is not present in the primary cache.
*
*
* A common configuration is to have secondary SQL cache.
*/
class LruNormalizedCache internal constructor(evictionPolicy: EvictionPolicy) : NormalizedCache() {
private val lruCache: Cache<String, Record> =
CacheBuilder.newBuilder().apply {
if (evictionPolicy.maxSizeBytes != null) {
maximumWeight(evictionPolicy.maxSizeBytes).weigher(
Weigher { key: String, value: Record ->
key.toByteArray(Charset.defaultCharset()).size + value.sizeEstimateBytes()
}
)
}
if (evictionPolicy.maxEntries != null) {
maximumSize(evictionPolicy.maxEntries)
}
if (evictionPolicy.expireAfterAccess != null) {
expireAfterAccess(evictionPolicy.expireAfterAccess, evictionPolicy.expireAfterAccessTimeUnit!!)
}
if (evictionPolicy.expireAfterWrite != null) {
expireAfterWrite(evictionPolicy.expireAfterWrite, evictionPolicy.expireAfterWriteTimeUnit!!)
}
}.build()
override fun loadRecord(key: String, cacheHeaders: CacheHeaders): Record? {
return try {
lruCache.get(key, Callable {
nextCache?.loadRecord(key, cacheHeaders)
})
} catch (ignored: Exception) { // Thrown when the nextCache's value is null
return null
}.also {
if (cacheHeaders.hasHeader(ApolloCacheHeaders.EVICT_AFTER_READ)) {
lruCache.invalidate(key)
}
}
}
override fun clearAll() {
nextCache?.clearAll()
clearCurrentCache()
}
override fun remove(cacheKey: CacheKey, cascade: Boolean): Boolean {
var result: Boolean = nextCache?.remove(cacheKey, cascade) ?: false
val record = lruCache.getIfPresent(cacheKey.key)
if (record != null) {
lruCache.invalidate(cacheKey.key)
result = true
if (cascade) {
for (cacheReference in record.referencedFields()) {
result = result && remove(CacheKey(cacheReference.key), true)
}
}
}
return result
}
internal fun clearCurrentCache() {
lruCache.invalidateAll()
}
override fun performMerge(apolloRecord: Record, oldRecord: Record?, cacheHeaders: CacheHeaders): Set<String> {
return if (oldRecord == null) {
lruCache.put(apolloRecord.key, apolloRecord)
apolloRecord.keys()
} else {
oldRecord.mergeWith(apolloRecord).also {
//re-insert to trigger new weight calculation
lruCache.put(apolloRecord.key, oldRecord)
}
}
}
@OptIn(ExperimentalStdlibApi::class)
override fun dump() = buildMap<KClass<*>, Map<String, Record>> {
put(this@LruNormalizedCache::class, lruCache.asMap())
putAll(nextCache?.dump().orEmpty())
}
}
|
class LeetCode1657 {
fun closeStrings(word1: String, word2: String): Boolean {
if (word1.length != word2.length) {
return false
}
val arr1 = IntArray(26)
val arr2 = IntArray(26)
val set1 = mutableSetOf<Char>()
val set2 = mutableSetOf<Char>()
for (i in word1.indices) {
arr1[word1[i] - 'a']++
arr2[word2[i] - 'a']++
}
word1.toCollection(set1)
word2.toCollection(set2)
val list1 = arr1.filter { it != 0 }.toList().sorted()
val list2 = arr2.filter { it != 0 }.toList().sorted()
return list1 == list2 && set1 == set2
}
}
|
Simple command line tool for AES encryption written in Python.
Author of Algoritm: Bo Zhu http://about.bozhu.me
Bundled to application: Jan Gabriel
|
var db = require('../../db');
module.exports = function(cb) {
db.select().table('projects').orderBy('id')
.then(function(rows) {
rows.forEach(function(row) {
if (row._date_created) {
row.date_created = row._date_created.toISOString();
delete row._date_created;
}
if (row._date_updated) {
row.date_updated = row._date_updated.toISOString();
delete row._date_updated;
}
});
cb(null, {
valid: rows,
invalid: {
id: 'thisisastring',
title: 12345,
username: 23241,
isPublic: null
}
});
})
.catch(cb);
};
|
import React from 'react';
import {
render, fireEvent, BoundFunction, GetByRole,
} from 'test-utils';
import { spy, SinonSpy } from 'sinon';
import { strictEqual } from 'assert';
import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
import { faEdit } from '@fortawesome/free-solid-svg-icons';
import IconLink from '../IconLink';
describe('Icon Link', function () {
let getByRole: BoundFunction<GetByRole>;
let clickSpy: SinonSpy;
beforeEach(function () {
clickSpy = spy();
({ getByRole } = render(
<IconLink title="Edit account information" alt="Edit account information" clickHandler={clickSpy}>
<FontAwesomeIcon icon={faEdit} />
</IconLink>
));
});
afterEach(function () {
clickSpy.resetHistory();
});
it('renders', function () {
getByRole('link');
});
it('calls the click handler when clicked', function () {
fireEvent.click(getByRole('link'));
strictEqual(clickSpy.callCount, 1);
});
it('renders the correct icon', function () {
const iconName = getByRole('img', { hidden: true });
const classes = Array.from(iconName.classList);
strictEqual(classes.includes('fa-edit'), true);
});
});
|
#!/bin/bash
cd "$Test_BUILD_DIR"
make test
ctestlog=Testing/Temporary/LastTest.log
cat $ctestlog | grep -i fail -B 25 -A 3
cat $ctestlog | grep -i fail
# last exit status 0 means, grep found failures!
if [ $? == 0 ]; then
exit 1;
else
echo 0;
fi
|
using NUnit.Framework;
using System;
using System.Globalization;
namespace Phnx.Tests.Extensions.Time.DateTimeExtensionsTests
{
public class AsDateStringTests
{
[Test]
public void AsDateString_WithoutFormatProviderAndShortFormat_FormatsAsShortLocalDate()
{
DateTime sampleNow = new DateTime(2012, 9, 4, 12, 15, 36);
var shortDate = sampleNow.AsDateString();
var shortDateShouldBe = sampleNow.ToShortDateString();
Assert.AreEqual(shortDateShouldBe, shortDate);
}
[Test]
public void AsDateString_WithoutFormatProviderAndLongFormat_FormatsAsLongLocalDate()
{
DateTime sampleNow = new DateTime(2012, 9, 4, 12, 15, 36);
var shortDate = sampleNow.AsDateString(false);
var shortDateShouldBe = sampleNow.ToLongDateString();
Assert.AreEqual(shortDateShouldBe, shortDate);
}
[Test]
public void AsDateString_WithFormatProviderUSAndShortFormat_FormatsAsShortUSDate()
{
var formatProviderUS = CultureInfo.GetCultureInfo("en-US").DateTimeFormat;
DateTime sampleNow = new DateTime(2012, 9, 4, 12, 15, 36);
var shortDate = sampleNow.AsDateString(formatProviderUS);
var shortDateShouldBe = sampleNow.ToString("d", formatProviderUS);
Assert.AreEqual(shortDateShouldBe, shortDate);
}
[Test]
public void AsDateString_WithFormatProviderGBAndShortFormat_FormatsAsShortGBDate()
{
var formatProviderUK = CultureInfo.GetCultureInfo("en-GB").DateTimeFormat;
DateTime sampleNow = new DateTime(2012, 9, 4, 12, 15, 36);
var shortDate = sampleNow.AsDateString(formatProviderUK);
var shortDateShouldBe = sampleNow.ToString("d", formatProviderUK);
Assert.AreEqual(shortDateShouldBe, shortDate);
}
[Test]
public void AsDateString_WithFormatProviderUSAndLongFormat_FormatsAsLongUSDate()
{
var formatProviderUS = CultureInfo.GetCultureInfo("en-US").DateTimeFormat;
DateTime sampleNow = new DateTime(2012, 9, 4, 12, 15, 36);
var longDate = sampleNow.AsDateString(formatProviderUS, false);
var longDateShouldBe = sampleNow.ToString("D", formatProviderUS);
Assert.AreEqual(longDateShouldBe, longDate);
}
[Test]
public void AsDateString_WithFormatProviderGBAndLongFormat_FormatsAsLongUKDate()
{
var formatProviderUK = CultureInfo.GetCultureInfo("en-GB").DateTimeFormat;
DateTime sampleNow = new DateTime(2012, 9, 4, 12, 15, 36);
var longDate = sampleNow.AsDateString(formatProviderUK, false);
var longDateShouldBe = sampleNow.ToString("D", formatProviderUK);
Assert.AreEqual(longDateShouldBe, longDate);
}
[Test]
public void AsDateString_WithNullFormatProvider_ThrowsArgumentNullException()
{
IFormatProvider formatProvider = null;
DateTime sampleNow = new DateTime(2012, 9, 4, 12, 15, 36);
Assert.Throws<ArgumentNullException>(() => sampleNow.AsDateString(formatProvider));
}
}
}
|
<?php
/** @var \Illuminate\Database\Eloquent\Factory $factory */
use App\Models\Admin\ProductCategory;
use Faker\Generator as Faker;
use Illuminate\Support\Str;
$factory->define( ProductCategory::class, function ( Faker $faker ) {
return [
'name' => $name = $faker->name,
'slug' => Str::slug( $name ),
'description' => $faker->paragraph( rand( 5, 15 ) ),
'thumbnail' => $faker->image( 'public/uploads/images/product-categories', 300, 300, 'transport', false ),
];
} );
|
/**
* Vrapi utility functions
*/
@file:JvmName("OvrplUtilities")
package org.godotengine.plugin.vr.oculus.platform.api
import org.godotengine.plugin.vr.oculus.platform.OvrPlatformPlugin
// removed the utility functions not needed in platform sdk
|
# 编译 OpenArray CXX
请提前准备编译需要的环境,目前支持并测试的环境有:
- [openSUSE](./setup-opensuse-builder.md)
- [CentOS](./setup-builder-centos.md)
- [Ubuntu](./setup-builder-ubuntu.md)
## 准备
下载最新源码:
```shell
cd
git clone https://github.com/hxmhuang/OpenArray_CXX.git
cd OpenArray_CXX/
```
如果当前目录没有 `configure` 文件, 请执行下面命令创建:
```shell
aclocal
autoconf
automake --add-missing
```
## 编译
```shell
./configure --prefix=${HOME}/install
time make -j$(nproc)
```
说明:
1. 如果需要指定 MPI 目录,请定义 MPI_DIR 变量,如:`./configure MPI_DIR=/usr/lib/x86_64-linux-gnu/openmpi/`
## 安装
```shell
make install
```
## 测试
```shell
# 编译 manual_main
make manual_main
# 执行测试
time mpirun -n 2 ./manual_main
```
|
package com.harry0000.kancolle.ac
import com.typesafe.config.ConfigFactory
object Config {
private lazy val config = ConfigFactory.load()
def distPath = config.getString("dist.path")
}
|
import { Photo } from '../../models';
export interface ListItemData {
title?: string;
photos?: Photo[];
key: string;
}
export function getItemSize(item: ListItemData) {
return item.title ? 50 : 100;
}
|
// Copyright (c) 2012-2022 Supercolony
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the"Software"),
// to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#![feature(min_specialization)]
#[cfg(feature = "timelock_controller")]
#[brush::contract]
mod timelock_controller {
use ::ink_env::DefaultEnvironment;
use brush::test_utils::{
accounts,
change_caller,
};
use contracts::timelock_controller::*;
use ink_env::test::DefaultAccounts;
use ink_lang as ink;
use ink::codegen::{
EmitEvent,
Env,
};
/// Emitted when a call is scheduled as part of operation `id`.
#[ink(event)]
pub struct CallScheduled {
#[ink(topic)]
pub id: OperationId,
#[ink(topic)]
pub index: u8,
pub transaction: Transaction,
pub predecessor: Option<OperationId>,
pub delay: Timestamp,
}
/// Emitted when a call is performed as part of operation `id`.
#[ink(event)]
pub struct CallExecuted {
#[ink(topic)]
pub id: OperationId,
#[ink(topic)]
pub index: u8,
pub transaction: Transaction,
}
/// Emitted when operation `id` is cancelled.
#[ink(event)]
pub struct Cancelled {
#[ink(topic)]
pub id: OperationId,
}
/// Emitted when the minimum delay for future operations is modified.
#[ink(event)]
pub struct MinDelayChange {
pub old_delay: Timestamp,
pub new_delay: Timestamp,
}
#[ink(storage)]
#[derive(Default, TimelockControllerStorage)]
pub struct TimelockControllerStruct {
#[TimelockControllerStorageField]
timelock: TimelockControllerData,
}
type Event = <TimelockControllerStruct as ::ink_lang::reflect::ContractEventBase>::Type;
impl AccessControl for TimelockControllerStruct {}
impl TimelockController for TimelockControllerStruct {}
impl TimelockControllerInternal for TimelockControllerStruct {
fn _emit_min_delay_change_event(&self, old_delay: Timestamp, new_delay: Timestamp) {
self.env().emit_event(MinDelayChange { old_delay, new_delay })
}
fn _emit_call_scheduled_event(
&self,
id: OperationId,
index: u8,
transaction: Transaction,
predecessor: Option<OperationId>,
delay: Timestamp,
) {
self.env().emit_event(CallScheduled {
id,
index,
transaction,
predecessor,
delay,
})
}
fn _emit_cancelled_event(&self, id: OperationId) {
self.env().emit_event(Cancelled { id })
}
fn _emit_call_executed_event(&self, id: OperationId, index: u8, transaction: Transaction) {
self.env().emit_event(CallExecuted { id, index, transaction })
}
}
impl TimelockControllerStruct {
#[ink(constructor)]
pub fn new(admin: AccountId, delay: Timestamp, proposers: Vec<AccountId>, executors: Vec<AccountId>) -> Self {
let mut instance = Self::default();
AccessControlInternal::_init_with_admin(&mut instance, admin);
TimelockControllerInternal::_init_with_admin(&mut instance, admin, delay, proposers, executors);
instance
}
}
fn assert_min_delay_change_event(
event: &ink_env::test::EmittedEvent,
expected_old_delay: Timestamp,
expected_new_delay: Timestamp,
) {
if let Event::MinDelayChange(MinDelayChange { old_delay, new_delay }) =
<Event as scale::Decode>::decode(&mut &event.data[..])
.expect("encountered invalid contract event data buffer")
{
assert_eq!(
old_delay, expected_old_delay,
"Old delays were not equal: encountered delay {:?}, expected delay {:?}",
old_delay, expected_old_delay
);
assert_eq!(
new_delay, expected_new_delay,
"New delays were not equal: encountered delay {:?}, expected delay {:?}",
new_delay, expected_new_delay
);
}
}
fn assert_call_scheduled_event(
event: &ink_env::test::EmittedEvent,
expected_id: OperationId,
expected_index: u8,
expected_transaction: Transaction,
expected_predecessor: Option<OperationId>,
expected_delay: Timestamp,
) {
if let Event::CallScheduled(CallScheduled {
id,
index,
transaction,
predecessor,
delay,
}) = <Event as scale::Decode>::decode(&mut &event.data[..])
.expect("encountered invalid contract event data buffer")
{
assert_eq!(
id, expected_id,
"Id were not equal: encountered {:?}, expected {:?}",
id, expected_id
);
assert_eq!(
index, expected_index,
"Index were not equal: encountered {:?}, expected {:?}",
index, expected_index
);
assert_eq!(
transaction, expected_transaction,
"Transaction were not equal: encountered {:?}, expected {:?}",
transaction, expected_transaction
);
assert_eq!(
predecessor, expected_predecessor,
"Predecessor were not equal: encountered {:?}, expected {:?}",
predecessor, expected_predecessor
);
assert_eq!(
delay, expected_delay,
"Delay were not equal: encountered {:?}, expected {:?}",
delay, expected_delay
);
}
}
fn assert_cancelled_event(event: &ink_env::test::EmittedEvent, expected_id: OperationId) {
if let Event::Cancelled(Cancelled { id }) = <Event as scale::Decode>::decode(&mut &event.data[..])
.expect("encountered invalid contract event data buffer")
{
assert_eq!(
id, expected_id,
"Ids were not equal: encountered {:?}, expected {:?}",
id, expected_id
);
}
}
fn setup() -> DefaultAccounts<DefaultEnvironment> {
let accounts = accounts();
accounts
}
#[ink::test]
fn should_init_with_default_admin() {
let accounts = setup();
let timelock = TimelockControllerStruct::new(
accounts.alice,
10,
vec![accounts.bob, accounts.charlie],
vec![accounts.eve, accounts.charlie],
);
assert!(timelock.has_role(TIMELOCK_ADMIN_ROLE, accounts.alice));
assert!(!timelock.has_role(PROPOSER_ROLE, accounts.alice));
assert!(!timelock.has_role(EXECUTOR_ROLE, accounts.alice));
assert_eq!(timelock.get_role_admin(TIMELOCK_ADMIN_ROLE), TIMELOCK_ADMIN_ROLE);
assert_eq!(timelock.get_role_admin(PROPOSER_ROLE), PROPOSER_ROLE);
assert_eq!(timelock.get_role_admin(EXECUTOR_ROLE), EXECUTOR_ROLE);
assert_eq!(timelock.get_min_delay(), 10);
assert!(timelock.has_role(PROPOSER_ROLE, accounts.bob));
assert!(timelock.has_role(PROPOSER_ROLE, accounts.charlie));
assert!(!timelock.has_role(PROPOSER_ROLE, accounts.eve));
assert!(timelock.has_role(EXECUTOR_ROLE, accounts.eve));
assert!(timelock.has_role(EXECUTOR_ROLE, accounts.charlie));
assert!(!timelock.has_role(EXECUTOR_ROLE, accounts.bob));
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_min_delay_change_event(&emitted_events[0], 0, 10);
}
#[ink::test]
fn should_schedule() {
let accounts = setup();
let min_delay = 10;
let mut timelock = TimelockControllerStruct::new(accounts.alice, min_delay, vec![accounts.alice], vec![]);
let id = timelock.hash_operation(Transaction::default(), None, [0; 32]);
assert!(!timelock.is_operation(id));
assert!(timelock
.schedule(Transaction::default(), None, [0; 32], min_delay + 1)
.is_ok());
assert!(timelock.is_operation(id));
assert!(timelock.is_operation_pending(id));
assert_eq!(timelock.get_timestamp(id), min_delay + 1);
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_call_scheduled_event(&emitted_events[1], id, 0, Transaction::default(), None, min_delay + 1);
}
#[ink::test]
fn should_schedule_not_proposal() {
let accounts = setup();
let min_delay = 10;
let mut timelock = TimelockControllerStruct::new(accounts.alice, min_delay, vec![], vec![]);
assert_eq!(
Err(TimelockControllerError::AccessControlError(
AccessControlError::MissingRole
)),
timelock.schedule(Transaction::default(), None, [0; 32], min_delay + 1)
);
}
#[ink::test]
fn should_schedule_already_scheduled() {
let accounts = setup();
let min_delay = 10;
let mut timelock = TimelockControllerStruct::new(accounts.alice, min_delay, vec![accounts.alice], vec![]);
assert!(timelock
.schedule(Transaction::default(), None, [0; 32], min_delay + 1)
.is_ok());
assert_eq!(
Err(TimelockControllerError::OperationAlreadyScheduled),
timelock.schedule(Transaction::default(), None, [0; 32], min_delay + 1)
);
}
#[ink::test]
fn should_schedule_low_delay() {
let accounts = setup();
let min_delay = 10;
let mut timelock = TimelockControllerStruct::new(accounts.alice, min_delay, vec![accounts.alice], vec![]);
assert_eq!(
Err(TimelockControllerError::InsufficientDelay),
timelock.schedule(Transaction::default(), None, [0; 32], min_delay - 1)
);
}
#[ink::test]
fn should_schedule_batch() {
let accounts = setup();
let min_delay = 10;
let mut timelock = TimelockControllerStruct::new(accounts.alice, min_delay, vec![accounts.alice], vec![]);
let transactions = vec![Transaction::default(), Transaction::default()];
let id = timelock.hash_operation_batch(transactions.clone(), None, [0; 32]);
assert!(!timelock.is_operation(id));
assert!(timelock
.schedule_batch(transactions.clone(), None, [0; 32], min_delay + 1)
.is_ok());
assert!(timelock.is_operation(id));
assert!(timelock.is_operation_pending(id));
assert_eq!(timelock.get_timestamp(id), min_delay + 1);
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_eq!(emitted_events.len(), 3);
for (i, transaction) in transactions.into_iter().enumerate() {
assert_call_scheduled_event(&emitted_events[i + 1], id, i as u8, transaction, None, min_delay + 1);
}
}
#[ink::test]
fn should_schedule_batch_not_proposer() {
let accounts = setup();
let min_delay = 10;
let mut timelock = TimelockControllerStruct::new(accounts.alice, min_delay, vec![], vec![]);
let transactions = vec![Transaction::default(), Transaction::default()];
assert_eq!(
Err(TimelockControllerError::AccessControlError(
AccessControlError::MissingRole
)),
timelock.schedule_batch(transactions.clone(), None, [0; 32], min_delay + 1)
);
}
#[ink::test]
fn should_cancel() {
let accounts = setup();
let min_delay = 10;
let mut timelock = TimelockControllerStruct::new(accounts.alice, min_delay, vec![accounts.alice], vec![]);
let id = timelock.hash_operation(Transaction::default(), None, [0; 32]);
assert!(timelock
.schedule(Transaction::default(), None, [0; 32], min_delay + 1)
.is_ok());
assert!(timelock.cancel(id).is_ok());
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_call_scheduled_event(&emitted_events[1], id, 0, Transaction::default(), None, min_delay + 1);
assert_cancelled_event(&emitted_events[2], id);
}
#[ink::test]
fn should_cancel_not_proposer() {
let accounts = setup();
let min_delay = 10;
let mut timelock = TimelockControllerStruct::new(accounts.alice, min_delay, vec![accounts.alice], vec![]);
let id = timelock.hash_operation(Transaction::default(), None, [0; 32]);
assert!(timelock
.schedule(Transaction::default(), None, [0; 32], min_delay + 1)
.is_ok());
let emitted_events = ink_env::test::recorded_events().collect::<Vec<_>>();
assert_call_scheduled_event(&emitted_events[1], id, 0, Transaction::default(), None, min_delay + 1);
assert!(timelock.revoke_role(PROPOSER_ROLE, accounts.alice).is_ok());
assert_eq!(
Err(TimelockControllerError::AccessControlError(
AccessControlError::MissingRole
)),
timelock.cancel(id)
);
}
#[ink::test]
fn should_cancel_not_pending_operation() {
let accounts = setup();
let min_delay = 10;
let mut timelock = TimelockControllerStruct::new(accounts.alice, min_delay, vec![accounts.alice], vec![]);
let id = timelock.hash_operation(Transaction::default(), None, [0; 32]);
assert_eq!(
Err(TimelockControllerError::OperationCannonBeCanceled),
timelock.cancel(id)
);
}
#[ink::test]
fn should_update_delay() {
let accounts = setup();
let min_delay = 10;
let mut timelock = TimelockControllerStruct::new(accounts.alice, min_delay, vec![accounts.alice], vec![]);
// Caller of the method is contract itself
change_caller(timelock.env().account_id());
assert!(timelock.update_delay(min_delay + 2).is_ok());
}
#[ink::test]
fn should_update_delay_not_timelock_role() {
let accounts = setup();
let min_delay = 10;
let mut timelock = TimelockControllerStruct::new(accounts.alice, min_delay, vec![accounts.alice], vec![]);
change_caller([13; 32].into());
assert_eq!(
Err(TimelockControllerError::CallerMustBeTimeLock),
timelock.update_delay(min_delay + 2)
);
}
}
|
import { Component, Input, OnInit } from '@angular/core';
@Component({
selector: 'app-goal-image',
templateUrl: './goal-image.component.html',
styleUrls: ['./goal-image.component.scss']
})
export class GoalImageComponent implements OnInit {
@Input() goalDescription: string;
goalImages: Map<String, String> = new Map<String, String>();
constructor() {
this.goalImages['Car'] = './assets/car-ride.png';
this.goalImages['Boat'] = './assets/boat-icon.png';
this.goalImages['Retirement'] = './assets/retirement-icon.png';
this.goalImages['House'] = './assets/house-icon.png';
}
ngOnInit() {}
getImageUrl() {
return this.goalImages[this.goalDescription];
}
}
|
"""Algorithm demonstrating the power of dynamic programming: It is simple and elegant and one of
the most important concepts in computer science."""
import unittest
from typing import List
def num_ways_of_making_change(amount: int, coins: List[int]):
"""Here amount represents the desired monetary value and coins is the denominations"""
# Our approach uses bottom-up dynamic programming
# i.e we first calculate what is needed for the lowest value first and build
# the higher values as we go along
# Initialize the data structure we will use to store all possible amounts
# we initialize all amounts to 0 first
# we add 1 because our base case is making 0 amount
num_ways = [0] * (amount + 1)
# there is only one way to make 0 change, this is our base case
# TODO: Figure out why this is?
num_ways[0] = 1
for coin in coins:
# We loop from making the lowest possible value to the required value
# amount + 1 since we want the for loop to run until amount
for net_amount in range(1, amount + 1):
if coin <= net_amount:
num_ways[net_amount] += num_ways[net_amount - coin]
print(num_ways)
return num_ways[amount]
class TestMakingChange(unittest.TestCase):
def test_1(self):
self.assertEqual(
num_ways_of_making_change(4, [1, 2]),
3
)
if __name__ == "__main__":
unittest.main()
|
import { css } from '@emotion/react';
import { decidePalette } from '../lib/decidePalette';
const contributorImage = css`
border-radius: 100%;
object-fit: cover;
height: 100%;
width: 100%;
`;
const backgroundStyles = (palette: Palette) =>
css`
background-color: ${palette.background.avatar};
`;
export const Avatar: React.FC<{
imageSrc: string;
imageAlt: string;
format: ArticleFormat;
containerPalette?: DCRContainerPalette;
}> = ({ imageSrc, imageAlt, format, containerPalette }) => {
const palette = decidePalette(format, containerPalette);
return (
<img
src={imageSrc}
alt={imageAlt}
css={[backgroundStyles(palette), contributorImage]}
/>
);
};
|
#
# Cookbook Name:: learn_chef_httpd
# Recipe:: default
#
# Copyright (c) 2016 The Authors, All Rights Reserved.
package 'apache2'
service 'apache2' do
action [:enable, :start]
end
group 'webmaster'
user 'webmaster' do
group 'webmaster'
system true
shell '/bin/bash'
end
template '/var/www/html/index.html' do
source 'index.html.erb'
mode '0644'
owner 'webmaster'
group 'webmaster'
end
|
#!/bin/bash
set -euo pipefail
if [ "schedule" == "${BUILDKITE_SOURCE}" ]; then
exit 0
fi
echo "--- setup"
apt-get update
apt-get install -yy curl jq rubygems
git config --global user.email "sorbet+bot@stripe.com"
git config --global user.name "Sorbet build farm"
dryrun="1"
if [ "$BUILDKITE_BRANCH" == 'master' ]; then
dryrun=""
fi
git_commit_count=$(git rev-list --count HEAD)
prefix="0.4"
release_version="0.4.${git_commit_count}"
long_release_version="${release_version}.$(git log --format=%cd-%h --date=format:%Y%m%d%H%M%S -1)"
echo "--- Dowloading artifacts"
rm -rf release
rm -rf _out_
buildkite-agent artifact download "_out_/**/*" .
echo "--- releasing sorbet.run"
rm -rf sorbet.run
git clone git@github.com:sorbet/sorbet.run.git
tar -xvf ./_out_/webasm/sorbet-wasm.tar ./sorbet-wasm.wasm ./sorbet-wasm.js
mv sorbet-wasm.wasm sorbet.run/docs
mv sorbet-wasm.js sorbet.run/docs
pushd sorbet.run/docs
git add sorbet-wasm.wasm sorbet-wasm.js
dirty=
git diff-index --quiet HEAD -- || dirty=1
if [ "$dirty" != "" ]; then
echo "$BUILDKITE_COMMIT" > sha.html
git add sha.html
git commit -m "Updated site - $(date -u +%Y-%m-%dT%H:%M:%S%z)"
if [ "$dryrun" = "" ]; then
git push
fi
else
echo "Nothing to update"
fi
popd
rm -rf sorbet.run
echo "--- releasing sorbet.org"
git fetch origin gh-pages
current_rev=$(git rev-parse HEAD)
git checkout gh-pages
# Remove all tracked files, but leave untracked files (like _out_) untouched
git rm -rf '*'
tar -xjf _out_/website/website.tar.bz2 .
git add .
git reset HEAD _out_
dirty=
git diff-index --quiet HEAD -- || dirty=1
if [ "$dirty" != "" ]; then
echo "$BUILDKITE_COMMIT" > sha.html
git add sha.html
git commit -m "Updated site - $(date -u +%Y-%m-%dT%H:%M:%S%z)"
if [ "$dryrun" = "" ]; then
git push origin gh-pages
# For some reason, GitHub Pages won't automatically build for us on push
# We have a ticket open with GitHub to investigate why.
# For now, we trigger a build manually.
curl \
-X POST \
--netrc \
-H "Accept: application/vnd.github.mister-fantastic-preview+json" \
"https://api.github.com/repos/sorbet/sorbet/pages/builds"
fi
echo "pushed an update"
else
echo "nothing to update"
fi
git checkout -f "$current_rev"
echo "--- publishing gems to RubyGems.org"
mkdir -p "$HOME/.gem"
printf -- $'---\n:rubygems_api_key: %s\n' "$RUBY_GEMS_API_KEY" > "$HOME/.gem/credentials"
chmod 600 "$HOME/.gem/credentials"
# https://stackoverflow.com/a/8351489
with_backoff() {
local attempts=5
local timeout=1 # doubles each failure
local attempt=0
while true; do
attempt=$(( attempt + 1 ))
echo "Attempt $attempt"
if "$@"; then
return 0
fi
if (( attempt >= attempts )); then
echo "'$1' failed $attempts times. Quitting." 1>&2
exit 1
fi
echo "'$1' failed. Retrying in ${timeout}s..." 1>&2
sleep $timeout
timeout=$(( timeout * 2 ))
done
}
if [ "$dryrun" = "" ]; then
# push the sorbet-static gems first, in case they fail. We don't want to end
# up in a weird state where 'sorbet' requires a pinned version of
# sorbet-static, but the sorbet-static gem push failed.
#
# (By failure here, we mean that RubyGems.org 502'd for some reason.)
for gem_archive in "_out_/gems/sorbet-static-$release_version"-*.gem; do
with_backoff gem push "$gem_archive"
done
with_backoff gem push "_out_/gems/sorbet-runtime-$release_version.gem"
with_backoff gem push "_out_/gems/sorbet-$release_version.gem"
fi
echo "--- making a github release"
echo releasing "${long_release_version}"
git tag -f "${long_release_version}"
if [ "$dryrun" = "" ]; then
git push origin "${long_release_version}"
fi
mkdir release
cp -R _out_/* release/
mv release/gems/* release
rmdir release/gems
rm release/website/website.tar.bz2
rmdir release/website
rm release/webasm/sorbet-wasm.tar
rmdir release/webasm
pushd release
files=()
while IFS='' read -r line; do files+=("$line"); done < <(find . -type f | sed 's#^./##')
release_notes="To use Sorbet add this line to your Gemfile:
\`\`\`
gem 'sorbet', '$prefix.$git_commit_count'
\`\`\`"
if [ "$dryrun" = "" ]; then
echo "$release_notes" | ../.buildkite/tools/gh-release.sh sorbet/sorbet "${long_release_version}" -- "${files[@]}"
fi
popd
|
TERMUX_PKG_HOMEPAGE=https://www.gnu.org/software/m4/m4.html
TERMUX_PKG_DESCRIPTION="Traditional Unix macro processor"
TERMUX_PKG_LICENSE="GPL-3.0"
TERMUX_PKG_VERSION=1.4.18
TERMUX_PKG_REVISION=3
TERMUX_PKG_SRCURL=https://mirrors.kernel.org/gnu/m4/m4-${TERMUX_PKG_VERSION}.tar.xz
TERMUX_PKG_SHA256=f2c1e86ca0a404ff281631bdc8377638992744b175afb806e25871a24a934e07
TERMUX_PKG_EXTRA_CONFIGURE_ARGS="
ac_cv_header_spawn_h=no
"
|
""" test the automechanic.parse.chemkin module
"""
from __future__ import unicode_literals
from builtins import open
import os
from automechanic.parse import chemkin
PATH = os.path.dirname(os.path.realpath(__file__))
NATGAS_PATH = os.path.join(PATH, '../../../examples/natgas')
HEPTANE_PATH = os.path.join(PATH, '../../../examples/heptane')
def test__reaction_data():
""" test chemkin.reaction_data_strings
"""
mech_txt = os.path.join(HEPTANE_PATH, 'mechanism.txt')
mech_str = open(mech_txt, encoding='utf8', errors='ignore').read()
rxn_dat_lst = chemkin.reaction_data(mech_str)
assert len(rxn_dat_lst) == 5336
def test__thermo_data():
""" test chemkin.thermo_data
"""
ther_txt = os.path.join(HEPTANE_PATH, 'thermo_data.txt')
ther_str = open(ther_txt, encoding='utf8', errors='ignore').read()
thm_dat_lst = chemkin.thermo_data(ther_str)
assert len(thm_dat_lst) == 1268
def test__reaction_unit_names():
""" test chemkin.reaction_unit_names()
"""
mech_txt = os.path.join(HEPTANE_PATH, 'mechanism.txt')
mech_str = open(mech_txt, encoding='utf8', errors='ignore').read()
unts = chemkin.reaction_unit_names(mech_str)
assert unts == (None, None)
mech_txt = os.path.join(NATGAS_PATH, 'mechanism.txt')
mech_str = open(mech_txt, encoding='utf8', errors='ignore').read()
unts = chemkin.reaction_unit_names(mech_str)
assert unts == ('MOLES', 'KCAL/MOLE')
def test__thermo_t_common_default():
""" test chemkin.thermo_t_common_default()
"""
ther_txt = os.path.join(HEPTANE_PATH, 'thermo_data.txt')
ther_str = open(ther_txt, encoding='utf8', errors='ignore').read()
tmp_com_def = chemkin.thermo_t_common_default(ther_str)
assert tmp_com_def == 1000.
mech_txt = os.path.join(NATGAS_PATH, 'mechanism.txt')
mech_str = open(mech_txt, encoding='utf8', errors='ignore').read()
tmp_com_def = chemkin.thermo_t_common_default(mech_str)
assert tmp_com_def == 1000.
if __name__ == '__main__':
test__thermo_t_common_default()
test__thermo_data()
test__reaction_unit_names()
test__reaction_data()
|
#!/bin/bash
mkdir silesia
cd silesia
wget http://sun.aei.polsl.pl/\~sdeor/corpus/silesia.zip
unzip silesia.zip
rm silesia.zip
cd ..
tar -cvf silesia.tar silesia
rm -r silesia
python3 silesia_gen.py
|
package com.rostegg.android.hotspot_scanner.services
import android.content.ComponentCallbacks
import android.content.res.Configuration
import android.util.Log
import com.rostegg.android.hotspot_scanner.services.scanners.WifiScannerManager
import org.koin.android.ext.android.inject
class SessionManager : ComponentCallbacks {
// fix of inline type mismatch
override fun onConfigurationChanged(newConfig: Configuration) {}
override fun onLowMemory() {}
var state = false
private val wifiScannerManager: WifiScannerManager by inject()
private val sessionStorage: SessionStorage by inject()
fun changeState() {
Log.i("Session", "Changing session")
state = !state
if (state) createSession() else saveSession()
}
fun saveSession() {
}
fun createSession() {
Log.i("Session", "Creating session")
val result = wifiScannerManager.getScanResult()
sessionStorage.addToStorage(result)
Log.i("SessionStorage", sessionStorage.currentStorage.toString())
}
}
|
# rollupdemos
Including some rollup demos
# features
* build ES6 modules
* ESlint
# License
MIT
|
# Collections
Collections implementation with generators. This thing is just for practice
## How to use
Read the tests
## Scripts
`composer fix`: Run php-cs-fixer
`composer test`: Run test suite
`composer coverage`: Generate coverage report
`composer analyze`: Generate static analysis report
## Roadmap
### Critical
- [ ] Refactor to generators
- [ ] Rust bindings? (@MichielBier)
### The basics & too lazy to think of a category
- [x] each
- [x] reduce
- [ ] tap
- [*] sum
- [ ] average/avg
- [ ] contains
- [x] first
- [x] last
- [ ] merge
- [ ] isEmpty (refactor empty conditionals afterwards)
### Filtering
- [ ] filter
- [ ] except
- [ ] only
### Sorting
- [ ] sort
- [ ] sortBy
- [ ] sortByDesc
### Transforming
- [x] map
- [ ] flatMap
- [ ] mapWithKeys
- [ ] mapSpread
- [ ] count
- [ ] keys
- [ ] values
### Representations
- [ ] all/toArray
- [ ] toJson
### Conditionals
- [x] when
- [x] whenEmpty
- [x] whenNotEmpty
- [x] unless
- [x] unlessEmpty
- [x] unlessNotEmpty
|
use crate::Errno;
use crate::Sysno;
extern "C" {
fn __syscall0(nr: usize) -> usize;
fn __syscall1(nr: usize, arg1: usize) -> usize;
fn __syscall2(nr: usize, arg1: usize, arg2: usize) -> usize;
fn __syscall3(nr: usize, arg1: usize, arg2: usize, arg3: usize) -> usize;
fn __syscall4(
nr: usize,
arg1: usize,
arg2: usize,
arg3: usize,
arg4: usize,
) -> usize;
fn __syscall5(
nr: usize,
arg1: usize,
arg2: usize,
arg3: usize,
arg4: usize,
arg5: usize,
) -> usize;
fn __syscall6(
nr: usize,
arg1: usize,
arg2: usize,
arg3: usize,
arg4: usize,
arg5: usize,
arg6: usize,
) -> usize;
}
/// Issues a system call with 0 arguments.
///
/// # Safety
///
/// Running a system call is inherently unsafe. It is the caller's
/// responsibility to ensure safety.
#[inline(always)]
pub unsafe fn syscall0(nr: Sysno) -> Result<usize, Errno> {
Errno::from_ret(__syscall0(nr as usize))
}
/// Issues a system call with 1 arguments.
///
/// # Safety
///
/// Running a system call is inherently unsafe. It is the caller's
/// responsibility to ensure safety.
#[inline(always)]
pub unsafe fn syscall1(nr: Sysno, a1: usize) -> Result<usize, Errno> {
Errno::from_ret(__syscall1(nr as usize, a1))
}
/// Issues a system call with 2 arguments.
///
/// # Safety
///
/// Running a system call is inherently unsafe. It is the caller's
/// responsibility to ensure safety.
#[inline(always)]
pub unsafe fn syscall2(
nr: Sysno,
a1: usize,
a2: usize,
) -> Result<usize, Errno> {
Errno::from_ret(__syscall2(nr as usize, a1, a2))
}
/// Issues a system call with 3 arguments.
///
/// # Safety
///
/// Running a system call is inherently unsafe. It is the caller's
/// responsibility to ensure safety.
#[inline(always)]
pub unsafe fn syscall3(
nr: Sysno,
a1: usize,
a2: usize,
a3: usize,
) -> Result<usize, Errno> {
Errno::from_ret(__syscall3(nr as usize, a1, a2, a3))
}
/// Issues a system call with 4 arguments.
///
/// # Safety
///
/// Running a system call is inherently unsafe. It is the caller's
/// responsibility to ensure safety.
#[inline(always)]
pub unsafe fn syscall4(
nr: Sysno,
a1: usize,
a2: usize,
a3: usize,
a4: usize,
) -> Result<usize, Errno> {
Errno::from_ret(__syscall4(nr as usize, a1, a2, a3, a4))
}
/// Issues a system call with 5 arguments.
///
/// # Safety
///
/// Running a system call is inherently unsafe. It is the caller's
/// responsibility to ensure safety.
#[inline(always)]
pub unsafe fn syscall5(
nr: Sysno,
a1: usize,
a2: usize,
a3: usize,
a4: usize,
a5: usize,
) -> Result<usize, Errno> {
Errno::from_ret(__syscall5(nr as usize, a1, a2, a3, a4, a5))
}
/// Issues a system call with 6 arguments.
///
/// # Safety
///
/// Running a system call is inherently unsafe. It is the caller's
/// responsibility to ensure safety.
#[inline(always)]
pub unsafe fn syscall6(
nr: Sysno,
a1: usize,
a2: usize,
a3: usize,
a4: usize,
a5: usize,
a6: usize,
) -> Result<usize, Errno> {
Errno::from_ret(__syscall6(nr as usize, a1, a2, a3, a4, a5, a6))
}
|
namespace FingerTree
{
using System;
using System.Collections.Generic;
using Stact.Data.Internal;
public class FNSeq<T> //where T : IMeasured<uint>
{
private Seq<T> theSeq = null;
public FNSeq()
{
theSeq = new Seq<T>(new List<T>());
}
public FNSeq(IEnumerable<T> seqIterator)
{
theSeq = new Seq<T>(new List<T>());
foreach (T t in seqIterator)
theSeq = (Seq<T>)(theSeq.PushBack(new SizedElement<T>(t)));
}
protected FNSeq(Seq<T> aSeq)
{
theSeq = aSeq;
}
public uint Length()
{
return this.theSeq.length;
}
public List<T> ToSequence()
{
List<T> lstResult = new List<T>();
foreach (SizedElement<T> elem in theSeq.ToSequence())
lstResult.Add(elem.Value);
return lstResult;//.ToArray();
}
public T itemAt(int ind)
{
if (ind < 0 || ind >= Length())
throw new ArgumentOutOfRangeException();
//else
return theSeq.ElemAt(((uint)ind));
}
public FNSeq<T> reverse()
{
return new FNSeq<T>((Seq<T>)(theSeq.Reverse()));
}
public FNSeq<T> Merge(FNSeq<T> seq2)
{
return new FNSeq<T>(new Seq<T>(theSeq.Merge(seq2.theSeq.treeRep)));
}
public FNSeq<T> skip(int length)
{
return new FNSeq<T>
(
new Seq<T>
(
this.theSeq.dropUntil(new MPredicate<uint>
(FP.Curry<uint, uint, bool>(theLTMethod, (uint)length))
)
)
);
}
public FNSeq<T> take(int length)
{
return new FNSeq<T>
(
new Seq<T>
(
this.theSeq.takeUntil(new MPredicate<uint>
(FP.Curry<uint, uint, bool>(theLTMethod, (uint)length))
)
)
);
}
public FNSeq<T> subsequence(int startInd, int subLength)
{
uint theLength = theSeq.length;
if (theLength == 0 || subLength <= 0)
return this;
//else
if (startInd < 0)
startInd = 0;
if (startInd + subLength > theLength)
subLength = (int)(theLength - startInd);
// Now ready to do the real work
FNSeq<T> fsResult =
new FNSeq<T>(
(Seq<T>)
(
((Seq<T>)
(theSeq.SeqSplit
(
new MPredicate<uint>
(FP.Curry<uint, uint, bool>(theLTMethod, (uint)startInd))
).Second
)
).SeqSplit
(new MPredicate<uint>
(FP.Curry<uint, uint, bool>(theLTMethod, (uint)subLength))
).First
)
);
return fsResult;
}
public FNSeq<T> remove(int ind)
{
if (ind < 0 || ind >= Length())
throw new ArgumentOutOfRangeException();
//else
return new FNSeq<T>(theSeq.RemoveAt((uint)(ind)));
}
// this inserts a whole sequence, so we cannot just use Seq.snsertAt()
public FNSeq<T> insert_before(int ind, FNSeq<T> fSeq2)
{
if (ind < 0 || ind >= this.Length())
throw new ArgumentOutOfRangeException();
//else
Pair<FTreeM<SizedElement<T>, uint>, FTreeM<SizedElement<T>, uint>> theSplit =
theSeq.SeqSplit
(new MPredicate<uint>
(
FP.Curry<uint, uint, bool>(theLTMethod, (uint)ind - 1)
)
);
FNSeq<T> fs1 = new FNSeq<T>((Seq<T>)(theSplit.First));
FNSeq<T> fs3 = new FNSeq<T>((Seq<T>)(theSplit.Second));
return fs1.Merge(fSeq2).Merge(fs3);
}
bool theLTMethod(uint i1, uint i2)
{
return i1 < i2;
}
}
}
|
import { join } from "../deps.ts";
/**
* @description
* Attempts to get the default shell of the user. This is primarily for use in
* the `install` and `uninstall` commands, where we edit a user's config.
*/
export default function getDefaultShellName($HOME: string): string {
const $SHELL = Deno.env.get("SHELL") || "";
if ($SHELL.includes("zsh")) {
return join($HOME, ".zshrc");
}
if ($SHELL.includes("fish")) {
return join("$HOME", ".config/fish/config.fish");
}
if ($SHELL.includes("bash")) {
return join($HOME, ".bashrc");
}
throw new Error(`Did not recognize default shell: ${$SHELL}`);
}
|
#!/bin/bash
#
# Copyright (c) 2019-2020 P3TERX <https://p3terx.com>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
# https://github.com/P3TERX/Actions-OpenWrt
# Description: OpenWrt DIY script part 1 (Before Update feeds)
#
# 修改版本内核
# sed -i 's/KERNEL_PATCHVER:=5.4/KERNEL_PATCHVER:=5.10/g' ./target/linux/x86/Makefile
#添加额外软件包
# git clone https://github.com/vernesong/OpenClash.git package/lean/OpenClash
# git clone https://github.com/riverscn/openwrt-iptvhelper.git package/lean/openwrt-iptvhelper
# git clone https://github.com/kiddin9/luci-app-dnsfilter.git package/lean/luci-app-dnsfilter
git clone https://github.com/esirplayground/luci-app-poweroff.git package/lean/luci-app-poweroff
# 去除默认软件
sed -i 's/luci-app-wireguard//g' target/linux/x86/Makefile
|
/*
Control program for the ImDisk Virtual Disk Driver for Windows NT/2000/XP.
Copyright (C) 2004-2015 Olof Lagerkvist.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
*/
#include <windows.h>
#include <winioctl.h>
#include <shellapi.h>
#include <shlobj.h>
#include <dbt.h>
#include <stdio.h>
#include <stdlib.h>
#include "..\inc\ntumapi.h"
#include "..\inc\imdisk.h"
#include "..\inc\imdproxy.h"
#pragma comment(lib, "ntdll.lib")
enum
{
IMDISK_CLI_SUCCESS = 0,
IMDISK_CLI_ERROR_DEVICE_NOT_FOUND = 1,
IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE = 2,
IMDISK_CLI_ERROR_CREATE_DEVICE = 3,
IMDISK_CLI_ERROR_DRIVER_NOT_INSTALLED = 4,
IMDISK_CLI_ERROR_DRIVER_WRONG_VERSION = 5,
IMDISK_CLI_ERROR_DRIVER_INACCESSIBLE = 6,
IMDISK_CLI_ERROR_SERVICE_INACCESSIBLE = 7,
IMDISK_CLI_ERROR_FORMAT = 8,
IMDISK_CLI_ERROR_BAD_MOUNT_POINT = 9,
IMDISK_CLI_ERROR_BAD_SYNTAX = 10,
IMDISK_CLI_ERROR_NOT_ENOUGH_MEMORY = 11,
IMDISK_CLI_ERROR_PARTITION_NOT_FOUND = 12,
IMDISK_CLI_ERROR_WRONG_SYNTAX = 13,
IMDISK_CLI_NO_FREE_DRIVE_LETTERS = 14,
IMDISK_CLI_ERROR_FATAL = -1
};
//#define DbgOemPrintF(x) ImDiskOemPrintF x
#define DbgOemPrintF(x)
/// Macros for "human readable" file sizes.
#define _1KB (1ui64<<10)
#define _1MB (1ui64<<20)
#define _1GB (1ui64<<30)
#define _1TB (1ui64<<40)
#define _B(n) ((double)(n))
#define _KB(n) ((double)(n)/_1KB)
#define _MB(n) ((double)(n)/_1MB)
#define _GB(n) ((double)(n)/_1GB)
#define _TB(n) ((double)(n)/_1TB)
#define _h(n) ((n)>=_1TB ? _TB(n) : (n)>=_1GB ? _GB(n) : \
(n)>=_1MB ? _MB(n) : (n)>=_1KB ? _KB(n) : (n))
#define _p(n) ((n)>=_1TB ? "TB" : (n)>=_1GB ? "GB" : \
(n)>=_1MB ? "MB" : (n)>=_1KB ? "KB": (n)==1 ? "byte" : "bytes")
#pragma warning(disable: 6255)
#pragma warning(disable: 28719)
#pragma warning(disable: 28159)
void __declspec(noreturn)
ImDiskSyntaxHelp()
{
int rc = fputs
("Control program for the ImDisk Virtual Disk Driver.\r\n"
"For copyrights and credits, type imdisk --version\r\n"
"\n"
"Syntax:\r\n"
"imdisk -a -t type -m mountpoint [-n] [-o opt1[,opt2 ...]] [-f|-F file]\r\n"
" [-s size] [-b offset] [-v partition] [-S sectorsize] [-u unit]\r\n"
" [-x sectors/track] [-y tracks/cylinder] [-p \"format-parameters\"] [-P]\r\n"
"imdisk -d|-D [-u unit | -m mountpoint] [-P]\r\n"
"imdisk -R -u unit\r\n"
"imdisk -l [-u unit | -m mountpoint]\r\n"
"imdisk -e [-s size] [-o opt1[,opt2 ...]] [-u unit | -m mountpoint]\r\n"
"\n"
"-a Attach a virtual disk. This will configure and attach a virtual disk\r\n"
" with the parameters specified and attach it to the system.\r\n"
"\n"
"-d Detach a virtual disk from the system and release all resources.\r\n"
" Use -D to force removal even if the device is in use.\r\n"
"\n"
"-R Emergency removal of hung virtual disks. Should only be used as a last\r\n"
" resort when a virtual disk has some kind of problem that makes it\r\n"
" impossible to detach it in a safe way. This could happen for example\r\n"
" for proxy-type virtual disks sometimes when proxy communication fails.\r\n"
" Note that this does not attempt to dismount filesystem or lock the\r\n"
" volume in any way so there is a potential risk of data loss. Use with\r\n"
" caution!\r\n"
"\n"
"-e Edit an existing virtual disk.\r\n"
"\n"
" Along with the -s parameter extends the size of an existing virtual\r\n"
" disk. Note that even if the disk can be extended successfully, the\r\n"
" existing filesystem on it can only be extended to fill the new size\r\n"
" without re-formatting if you are running Windows 2000 or later and the\r\n"
" current filesystem is NTFS.\r\n"
"\n"
" Along with the -o parameter changes media characteristics for an\r\n"
" existing virtual disk. Options that can be changed on existing virtual\r\n"
" disks are those specifying wether or not the media of the virtual disk\r\n"
" should be writable and/or removable.\r\n"
"\n"
"-t type\r\n"
" Select the backingstore for the virtual disk.\r\n"
"\n"
"vm Storage for this type of virtual disk is allocated from virtual memory\r\n"
" in the system process. If a file is specified with -f that file is\r\n"
" is loaded into the memory allocated for the disk image.\r\n"
"\n"
"file A file specified with -f file becomes the backingstore for this\r\n"
" virtual disk.\r\n"
"\n"
"proxy The actual backingstore for this type of virtual disk is controlled by\r\n"
" an ImDisk storage server accessed by the driver on this machine by\r\n"
" sending storage I/O request through a named pipe specified with -f.\r\n"
"\n"
"-f file or -F file\r\n"
" Filename to use as backingstore for the file type virtual disk, to\r\n"
" initialize a vm type virtual disk or name of a named pipe for I/O\r\n"
" client/server communication for proxy type virtual disks. For proxy\r\n"
" type virtual disks \"file\" may be a COM port or a remote server\r\n"
" address if the -o options includes \"ip\" or \"comm\".\r\n"
"\n"
" Instead of using -f to specify 'DOS-style' paths, such as\r\n"
" C:\\dir\\image.bin or \\\\server\\share\\image.bin, you can use -F to\r\n"
" specify 'NT-style' native paths, such as\r\n"
" \\Device\\Harddisk0\\Partition1\\image.bin. This makes it possible to\r\n"
" specify files on disks or communication devices that currently have no\r\n"
" drive letters assigned.\r\n"
"\n"
"-l List configured devices. If given with -u or -m, display details about\r\n"
" that particular device.\r\n"
"\n"
"-n When printing ImDisk device names, print only the unit number without\r\n"
" the \\Device\\ImDisk prefix.\r\n"
"\n"
"-s size\r\n"
" Size of the virtual disk. Size is number of bytes unless suffixed with\r\n"
" a b, k, m, g, t, K, M, G or T which denotes number of 512-byte blocks,\r\n"
" thousand bytes, million bytes, billion bytes, trillion bytes,\r\n"
" kilobytes, megabytes, gigabytes and terabytes respectively. The suffix\r\n"
" can also be % to indicate percentage of free physical memory which\r\n"
" could be useful when creating vm type virtual disks. It is optional to\r\n"
" specify a size unless the file to use for a file type virtual disk does\r\n"
" not already exist or when a vm type virtual disk is created without\r\n"
" specifying an initialization image file using the -f or -F. If size is\r\n"
" specified when creating a file type virtual disk, the size of the file\r\n"
" used as backingstore for the virtual disk is adjusted to the new size\r\n"
" specified with this size option.\r\n"
"\n"
" The size can be a negative value to indicate the size of free physical\r\n"
" memory minus this size. If you e.g. type -400M the size of the virtual\r\n"
" disk will be the amount of free physical memory minus 400 MB.\r\n"
"\n"
"-b offset\r\n"
" Specifies an offset in an image file where the virtual disk begins. All\r\n"
" offsets of I/O operations on the virtual disk will be relative to this\r\n"
" offset. This parameter is particularily useful when mounting a specific\r\n"
" partition in an image file that contains an image of a complete hard\r\n"
" disk, not just one partition. This parameter has no effect when\r\n"
" creating a blank vm type virtual disk. When creating a vm type virtual\r\n"
" disk with a pre-load image file specified with -f or -F parameters, the\r\n"
" -b parameter specifies an offset in the image file where the image to\r\n"
" be loaded into the vm type virtual disk begins.\r\n"
"\n"
" Specify auto as offset to automatically select offset for a few known\r\n"
" non-raw disk image file formats. Currently auto-selection is supported\r\n"
" for Nero .nrg and Microsoft .sdi image files.\r\n"
"\n"
"-v partition\r\n"
" Specifies which partition to mount when mounting a raw hard disk image\r\n"
" file containing a master boot record and partitions.\r\n"
"\n"
" Specify number 1-4 to mount a partition from the primary partition\r\n"
" table and 5-8 to mount a partition from an extended partition table.\r\n"
"\n"
"-S sectorsize\r\n"
" Sectorsize to use for the virtual disk device. Default value is 512\r\n"
" bytes except for CD-ROM/DVD-ROM style devices where 2048 bytes is used\r\n"
" by default.\r\n"
"\n"
"-x sectors/track\r\n"
" See the description of the -y option below.\r\n"
"\n"
"-y tracks/cylinder\r\n"
" The -x and -y options can be used to specify a synthetic geometry.\r\n"
" This is useful for constructing bootable images for later download to\r\n"
" physical devices. Default values depend on the device-type specified\r\n"
" with the -o option. If the 'fd' option is specified the default values\r\n"
" are based on the virtual disk size, e.g. a 1440K image gets 2\r\n"
" tracks/cylinder and 18 sectors/track.\r\n"
"\n"
"-p \"format-parameters\"\r\n"
" If -p is specified the 'format' command is invoked to create a\r\n"
" filesystem when the new virtual disk has been created.\r\n"
" \"format-parameters\" must be a parameter string enclosed within\r\n"
" double-quotes. The string is added to the command line that starts\r\n"
" 'format'. You usually specify something like \"/fs:ntfs /q /y\", that\r\n"
" is, create an NTFS filesystem with quick formatting and without user\r\n"
" interaction.\r\n"
"\n"
"-o option\r\n"
" Set or reset options.\r\n"
"\n"
"ro Creates a read-only virtual disk. For vm type virtual disks, this\r\n"
" option can only be used if the -f option is also specified.\r\n"
"\n"
"rw Specifies that the virtual disk should be read/writable. This is the\r\n"
" default setting. It can be used with the -e parameter to set an\r\n"
" existing read-only virtual disk writable.\r\n"
"\n"
"sparse Sets NTFS sparse attribute on image file. This has no effect on proxy\r\n"
" or vm type virtual disks.\r\n"
"\n"
"rem Specifies that the device should be created with removable media\r\n"
" characteristics. This changes the device properties returned by the\r\n"
" driver to the system. For example, this changes how some filesystems\r\n"
" cache write operations.\r\n"
"\n"
"fix Specifies that the media characteristics of the virtual disk should be\r\n"
" fixed media, as opposed to removable media specified with the rem\r\n"
" option. Fixed media is the default setting. The fix option can be used\r\n"
" with the -e parameter to set an existing removable virtual disk as\r\n"
" fixed.\r\n"
"\n"
"saved Clears the 'image modified' flag from an existing virtual disk. This\r\n"
" flag is set by the driver when an image is modified and is displayed\r\n"
" in the -l output for a virtual disk. The 'saved' option is only valid\r\n"
" with the -e parameter.\r\n"
"\n"
" Note that virtual floppy or CD/DVD-ROM drives are always read-only and\r\n"
" removable devices and that cannot be changed.\r\n"
"\n"
"cd Creates a virtual CD-ROM/DVD-ROM. This is the default if the file\r\n"
" name specified with the -f option ends with either .iso, .nrg or .bin\r\n"
" extensions.\r\n"
"\n"
"fd Creates a virtual floppy disk. This is the default if the size of the\r\n"
" virtual disk is any of 160K, 180K, 320K, 360K, 640K, 720K, 820K, 1200K,\r\n"
" 1440K, 1680K, 1722K, 2880K, 123264K or 234752K.\r\n"
"\n"
"hd Creates a virtual fixed disk partition. This is the default unless\r\n"
" file extension or size match the criterias for defaulting to the cd or\r\n"
" fd options.\r\n"
"\n"
"raw Creates a device object with \"unknown\" device type. The system will not\n"
" attempt to do anything by its own with such devices, but it could be\n"
" useful in combination with third-party drivers that can provide further\n"
" device objects using this virtual disk device as a backing store.\n"
"\n"
"ip Can only be used with proxy-type virtual disks. With this option, the\r\n"
" user-mode service component is initialized to connect to an ImDisk\r\n"
" storage server using TCP/IP. With this option, the -f switch specifies\r\n"
" the remote host optionally followed by a colon and a port number to\r\n"
" connect to.\r\n"
"\n"
"comm Can only be used with proxy-type virtual disks. With this option, the\r\n"
" user-mode service component is initialized to connect to an ImDisk\r\n"
" storage server through a COM port. With this option, the -f switch\r\n"
" specifies the COM port to connect to, optionally followed by a colon,\r\n"
" a space, and then a device settings string with the same syntax as the\r\n"
" MODE command.\r\n"
"\n"
"shm Can only be used with proxy-type virtual disks. With this option, the\r\n"
" driver communicates with a storage server on the same computer using\r\n"
" shared memory block to transfer I/O data.\r\n"
"\n"
"awe Can only be used with file-type virtual disks. With this option, the\r\n"
" driver copies contents of image file to physical memory. No changes are\r\n"
" written to image file. If this option is used in combination with no\r\n"
" image file name, a physical memory block will be used without loading\r\n"
" an image file onto it. In that case, -s parameter is needed to specify\r\n"
" size of memory block. This option requires awealloc driver, which\r\n"
" requires Windows 2000 or later.\r\n"
"\n"
"bswap Instructs driver to swap each pair of bytes read from or written to\r\n"
" image file. Useful when examining images from some embedded systems\r\n"
" and similar where data is stored in reverse byte order.\r\n"
"\n"
"shared Instructs driver to open image file in shared write mode even when\r\n"
" image is opened for writing. This can be useful to mount each partition\r\n"
" of a multi-partition image as separate virtual disks with different\r\n"
" image file offsets and sizes. It could potentially corrupt filesystems\r\n"
" if used with incorrect offset and size parameters so use with caution!\r\n"
"\n"
"par Parallel I/O. Valid for file-type virtual disks. With this flag set,\r\n"
" driver sends read and write requests for the virtual disk directly down\r\n"
" to the filesystem driver that handles the image file, within the same\r\n"
" thread context as the original request was made. In some scenarios this\r\n"
" flag can increase performance, particularly when you use several layers\r\n"
" of virtual disks backed by image files stored on other virtual disks,\r\n"
" network file shares or similar storage.\r\n"
"\n"
" This flag is not supported in all scenarios depending on other drivers\r\n"
" that need to complete requests to the image file. It could also degrade\r\n"
" performance or cause reads and writes to fail if underlying drivers\r\n"
" cannot handle I/O requests simultaneously.\r\n"
"\n"
"-u unit\r\n"
" Along with -a, request a specific unit number for the ImDisk device\r\n"
" instead of automatic allocation. Along with -d or -l specifies the\r\n"
" unit number of the virtual disk to remove or query.\r\n"
"\n"
"-m mountpoint\r\n"
" Specifies a drive letter or mount point for the new virtual disk, the\r\n"
" virtual disk to query or the virtual disk to remove. When creating a\r\n"
" new virtual disk you can specify #: as mountpoint in which case the\r\n"
" first unused drive letter is automatically used.\r\n"
"\n"
"-P Persistent. Along with -a, saves registry settings for re-creating the\r\n"
" same virtual disk automatically when driver is loaded, which usually\r\n"
" occurs during system startup. Along with -d or -D, existing such\r\n"
" settings for the removed virtual disk are also removed from registry.\r\n"
" There are some limitations to what settings could be saved in this way.\r\n"
" Only features directly implemented in the kernel level driver are\r\n"
" saved, so for example the -p switch to format a virtual disk will not\r\n"
" be saved.\r\n",
stderr);
if (rc > 0)
exit(IMDISK_CLI_ERROR_WRONG_SYNTAX);
else
exit(IMDISK_CLI_ERROR_FATAL);
}
// Prints out a FormatMessage style parameterized message to specified stream.
BOOL
ImDiskOemPrintF(FILE *Stream, LPCSTR Message, ...)
{
va_list param_list;
LPSTR lpBuf = NULL;
va_start(param_list, Message);
if (!FormatMessageA(78 |
FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_STRING, Message, 0, 0,
(LPSTR)&lpBuf, 0, ¶m_list))
return FALSE;
CharToOemA(lpBuf, lpBuf);
fprintf(Stream, "%s\n", lpBuf);
LocalFree(lpBuf);
return TRUE;
}
// Writes out to console a message followed by system error message
// corresponding to current "last error" code from Win32 API.
void
PrintLastError(LPCWSTR Prefix)
{
LPSTR MsgBuf;
if (!FormatMessageA(FORMAT_MESSAGE_MAX_WIDTH_MASK |
FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
NULL, GetLastError(), 0, (LPSTR)&MsgBuf, 0, NULL))
MsgBuf = NULL;
ImDiskOemPrintF(stderr, "%1!ws! %2", Prefix, MsgBuf);
if (MsgBuf != NULL)
LocalFree(MsgBuf);
}
LPVOID
ImDiskCliAssertNotNull(LPVOID Ptr)
{
if (Ptr == NULL)
{
RaiseException(STATUS_NO_MEMORY,
EXCEPTION_NONCONTINUABLE,
0,
NULL);
}
return Ptr;
}
// Checks current driver version for compatibility with this library and
// returns TRUE if found compatible, FALSE otherwise. Device parameter is
// either handle to driver control device or an existing ImDisk device.
BOOL
ImDiskCliCheckDriverVersion(HANDLE Device)
{
DWORD VersionCheck;
DWORD BytesReturned;
if (!DeviceIoControl(Device,
IOCTL_IMDISK_QUERY_VERSION,
NULL, 0,
&VersionCheck, sizeof VersionCheck,
&BytesReturned, NULL))
switch (GetLastError())
{
case ERROR_INVALID_FUNCTION:
case ERROR_NOT_SUPPORTED:
fputs("Error: Not an ImDisk device.\r\n", stderr);
return FALSE;
default:
PrintLastError(L"Error opening device:");
return FALSE;
}
if (BytesReturned < sizeof VersionCheck)
{
fprintf(stderr,
"Wrong version of ImDisk Virtual Disk Driver.\n"
"No current driver version information, expected: %u.%u.\n"
"Please reinstall ImDisk and reboot if this issue persists.\n",
HIBYTE(IMDISK_DRIVER_VERSION), LOBYTE(IMDISK_DRIVER_VERSION));
return FALSE;
}
if (VersionCheck != IMDISK_DRIVER_VERSION)
{
fprintf(stderr,
"Wrong version of ImDisk Virtual Disk Driver.\n"
"Expected: %u.%u Installed: %u.%u\n"
"Please re-install ImDisk and reboot if this issue persists.\n",
HIBYTE(IMDISK_DRIVER_VERSION), LOBYTE(IMDISK_DRIVER_VERSION),
HIBYTE(VersionCheck), LOBYTE(VersionCheck));
return FALSE;
}
return TRUE;
}
BOOL
ImDiskCliValidateDriveLetterTarget(LPCWSTR DriveLetter,
LPCWSTR ValidTargetPath)
{
WCHAR target[MAX_PATH];
if (QueryDosDevice(DriveLetter, target, _countof(target)))
{
if (wcscmp(target, ValidTargetPath) == 0)
{
return TRUE;
}
ImDiskOemPrintF(stderr,
"Drive letter %1!ws! points to '%2!ws!' instead of expected '%3!ws!'. "
"Will attempt to redefine drive letter.",
DriveLetter, target, ValidTargetPath);
}
else if (GetLastError() != ERROR_FILE_NOT_FOUND)
{
PrintLastError(L"Error verifying temporary drive letter:");
}
return FALSE;
}
// Formats a new virtual disk device by calling system supplied format.com
// command line tool. MountPoint parameter should be a drive letter followed by
// a colon, and FormatOptions parameter is passed to the format.com command
// line.
int
ImDiskCliFormatDisk(LPCWSTR DevicePath,
WCHAR DriveLetter,
LPCWSTR FormatOptions)
{
static const WCHAR format_mutex[] = L"ImDiskFormat";
static const WCHAR format_cmd_prefix[] = L"format.com ";
WCHAR temporary_mount_point[] = { 255, L':', 0 };
#pragma warning(suppress: 6305)
LPWSTR format_cmd = (LPWSTR)
ImDiskCliAssertNotNull(_alloca(sizeof(format_cmd_prefix) +
sizeof(temporary_mount_point) + (wcslen(FormatOptions) << 1)));
STARTUPINFO startup_info = { sizeof(startup_info) };
PROCESS_INFORMATION process_info;
BOOL temp_drive_defined = FALSE;
int iReturnCode;
HANDLE hMutex = CreateMutex(NULL, FALSE, format_mutex);
if (hMutex == NULL)
{
PrintLastError(L"Error creating mutex object:");
return IMDISK_CLI_ERROR_FORMAT;
}
switch (WaitForSingleObject(hMutex, INFINITE))
{
case WAIT_OBJECT_0:
case WAIT_ABANDONED:
break;
default:
PrintLastError(L"Error, mutex object failed:");
CloseHandle(hMutex);
return IMDISK_CLI_ERROR_FORMAT;
}
if (DriveLetter != 0)
{
temporary_mount_point[0] = DriveLetter;
}
else
{
temporary_mount_point[0] = ImDiskFindFreeDriveLetter();
temp_drive_defined = TRUE;
}
if (temporary_mount_point[0] == 0)
{
fprintf
(stderr,
"Format failed. No free drive letters available.\r\n");
ReleaseMutex(hMutex);
CloseHandle(hMutex);
return IMDISK_CLI_ERROR_FORMAT;
}
if (!ImDiskCliValidateDriveLetterTarget(temporary_mount_point,
DevicePath))
{
if (!DefineDosDevice(DDD_RAW_TARGET_PATH,
temporary_mount_point,
DevicePath))
{
PrintLastError(L"Error defining drive letter:");
ReleaseMutex(hMutex);
CloseHandle(hMutex);
return IMDISK_CLI_ERROR_FORMAT;
}
if (!ImDiskCliValidateDriveLetterTarget(temporary_mount_point,
DevicePath))
{
if (!DefineDosDevice(DDD_REMOVE_DEFINITION |
DDD_EXACT_MATCH_ON_REMOVE |
DDD_RAW_TARGET_PATH,
temporary_mount_point,
DevicePath))
PrintLastError(L"Error undefining temporary drive letter:");
ReleaseMutex(hMutex);
CloseHandle(hMutex);
return IMDISK_CLI_ERROR_FORMAT;
}
}
printf("Formatting disk %ws...\n", temporary_mount_point);
wcscpy(format_cmd, format_cmd_prefix);
wcscat(format_cmd, temporary_mount_point);
wcscat(format_cmd, L" ");
wcscat(format_cmd, FormatOptions);
if (CreateProcess(NULL, format_cmd, NULL, NULL, TRUE, 0, NULL, NULL,
&startup_info, &process_info))
{
CloseHandle(process_info.hThread);
WaitForSingleObject(process_info.hProcess, INFINITE);
CloseHandle(process_info.hProcess);
iReturnCode = IMDISK_CLI_SUCCESS;
}
else
{
PrintLastError(L"Cannot format drive:");
iReturnCode = IMDISK_CLI_ERROR_FORMAT;
}
if (temp_drive_defined)
{
if (!DefineDosDevice(DDD_REMOVE_DEFINITION |
DDD_EXACT_MATCH_ON_REMOVE |
DDD_RAW_TARGET_PATH,
temporary_mount_point,
DevicePath))
PrintLastError(L"Error undefining temporary drive letter:");
}
if (!ReleaseMutex(hMutex))
PrintLastError(L"Error releasing mutex:");
if (!CloseHandle(hMutex))
PrintLastError(L"Error releasing mutex:");
return iReturnCode;
}
// Creates a new virtual disk device.
int
ImDiskCliCreateDevice(LPDWORD DeviceNumber,
PDISK_GEOMETRY DiskGeometry,
PLARGE_INTEGER ImageOffset,
DWORD Flags,
LPCWSTR FileName,
BOOL NativePath,
LPWSTR MountPoint,
BOOL NumericPrint,
LPWSTR FormatOptions,
BOOL SaveSettings)
{
PIMDISK_CREATE_DATA create_data;
HANDLE driver;
UNICODE_STRING file_name;
DWORD dw;
WCHAR device_path[MAX_PATH];
RtlInitUnicodeString(&file_name, IMDISK_CTL_DEVICE_NAME);
for (;;)
{
driver = ImDiskOpenDeviceByName(&file_name,
GENERIC_READ | GENERIC_WRITE);
if (driver != INVALID_HANDLE_VALUE)
break;
if (GetLastError() != ERROR_FILE_NOT_FOUND)
{
PrintLastError(L"Error controlling the ImDisk Virtual Disk Driver:");
return IMDISK_CLI_ERROR_DRIVER_INACCESSIBLE;
}
if (!ImDiskStartService(IMDISK_DRIVER_NAME))
switch (GetLastError())
{
case ERROR_SERVICE_DOES_NOT_EXIST:
fputs("The ImDisk Virtual Disk Driver is not installed. "
"Please re-install ImDisk.\r\n", stderr);
return IMDISK_CLI_ERROR_DRIVER_NOT_INSTALLED;
case ERROR_PATH_NOT_FOUND:
case ERROR_FILE_NOT_FOUND:
fputs("Cannot load imdisk.sys. "
"Please re-install ImDisk.\r\n", stderr);
return IMDISK_CLI_ERROR_DRIVER_NOT_INSTALLED;
case ERROR_SERVICE_DISABLED:
fputs("The ImDisk Virtual Disk Driver is disabled.\r\n", stderr);
return IMDISK_CLI_ERROR_DRIVER_NOT_INSTALLED;
default:
PrintLastError(L"Error loading ImDisk Virtual Disk Driver:");
return IMDISK_CLI_ERROR_DRIVER_NOT_INSTALLED;
}
Sleep(0);
puts("The ImDisk Virtual Disk Driver was loaded into the kernel.");
}
if (!ImDiskCliCheckDriverVersion(driver))
{
CloseHandle(driver);
return IMDISK_CLI_ERROR_DRIVER_WRONG_VERSION;
}
// Physical memory allocation requires the AWEAlloc driver.
if (((IMDISK_TYPE(Flags) == IMDISK_TYPE_FILE) |
(IMDISK_TYPE(Flags) == 0)) &
(IMDISK_FILE_TYPE(Flags) == IMDISK_FILE_TYPE_AWEALLOC))
{
HANDLE awealloc;
UNICODE_STRING file_name;
RtlInitUnicodeString(&file_name, AWEALLOC_DEVICE_NAME);
for (;;)
{
awealloc = ImDiskOpenDeviceByName(&file_name,
GENERIC_READ | GENERIC_WRITE);
if (awealloc != INVALID_HANDLE_VALUE)
{
NtClose(awealloc);
break;
}
if (GetLastError() != ERROR_FILE_NOT_FOUND)
break;
if (ImDiskStartService(AWEALLOC_DRIVER_NAME))
{
puts("AWEAlloc driver was loaded into the kernel.");
continue;
}
switch (GetLastError())
{
case ERROR_SERVICE_DOES_NOT_EXIST:
fputs("The AWEAlloc driver is not installed.\r\n"
"Please re-install ImDisk.\r\n", stderr);
break;
case ERROR_PATH_NOT_FOUND:
case ERROR_FILE_NOT_FOUND:
fputs("Cannot load AWEAlloc driver.\r\n"
"Please re-install ImDisk.\r\n", stderr);
break;
case ERROR_SERVICE_DISABLED:
fputs("The AWEAlloc driver is disabled.\r\n", stderr);
break;
default:
PrintLastError(L"Error loading AWEAlloc driver:");
}
CloseHandle(driver);
return IMDISK_CLI_ERROR_SERVICE_INACCESSIBLE;
}
}
// Proxy reconnection types requires the user mode service.
else if ((IMDISK_TYPE(Flags) == IMDISK_TYPE_PROXY) &
((IMDISK_PROXY_TYPE(Flags) == IMDISK_PROXY_TYPE_TCP) |
(IMDISK_PROXY_TYPE(Flags) == IMDISK_PROXY_TYPE_COMM)))
{
if (!WaitNamedPipe(IMDPROXY_SVC_PIPE_DOSDEV_NAME, 0))
if (GetLastError() == ERROR_FILE_NOT_FOUND)
if (ImDiskStartService(IMDPROXY_SVC))
{
while (!WaitNamedPipe(IMDPROXY_SVC_PIPE_DOSDEV_NAME, 0))
if (GetLastError() == ERROR_FILE_NOT_FOUND)
Sleep(200);
else
break;
puts
("The ImDisk Virtual Disk Driver Helper Service was started.");
}
else
{
switch (GetLastError())
{
case ERROR_SERVICE_DOES_NOT_EXIST:
fputs("The ImDisk Virtual Disk Driver Helper Service is not "
"installed.\r\n"
"Please re-install ImDisk.\r\n", stderr);
break;
case ERROR_PATH_NOT_FOUND:
case ERROR_FILE_NOT_FOUND:
fputs("Cannot start ImDisk Virtual Disk Driver Helper "
"Service.\r\n"
"Please re-install ImDisk.\r\n", stderr);
break;
case ERROR_SERVICE_DISABLED:
fputs("The ImDisk Virtual Disk Driver Helper Service is "
"disabled.\r\n", stderr);
break;
default:
PrintLastError
(L"Error starting ImDisk Virtual Disk Driver Helper "
L"Service:");
}
CloseHandle(driver);
return IMDISK_CLI_ERROR_SERVICE_INACCESSIBLE;
}
}
if (FileName == NULL)
RtlInitUnicodeString(&file_name, NULL);
else if (NativePath)
{
if (!RtlCreateUnicodeString(&file_name, FileName))
{
CloseHandle(driver);
fputs("Memory allocation error.\r\n", stderr);
return IMDISK_CLI_ERROR_FATAL;
}
}
else if ((IMDISK_TYPE(Flags) == IMDISK_TYPE_PROXY) &
(IMDISK_PROXY_TYPE(Flags) == IMDISK_PROXY_TYPE_SHM))
{
LPWSTR namespace_prefix;
LPWSTR prefixed_name;
HANDLE h = CreateFile(L"\\\\?\\Global", 0, FILE_SHARE_READ, NULL,
OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
if ((h == INVALID_HANDLE_VALUE) &
(GetLastError() == ERROR_FILE_NOT_FOUND))
namespace_prefix = L"\\BaseNamedObjects\\";
else
namespace_prefix = L"\\BaseNamedObjects\\Global\\";
if (h != INVALID_HANDLE_VALUE)
CloseHandle(h);
prefixed_name = (LPWSTR)
_alloca(((wcslen(namespace_prefix) + wcslen(FileName)) << 1) + 1);
if (prefixed_name == NULL)
{
CloseHandle(driver);
fputs("Memory allocation error.\r\n", stderr);
return IMDISK_CLI_ERROR_FATAL;
}
wcscpy(prefixed_name, namespace_prefix);
wcscat(prefixed_name, FileName);
if (!RtlCreateUnicodeString(&file_name, prefixed_name))
{
CloseHandle(driver);
fputs("Memory allocation error.\r\n", stderr);
return IMDISK_CLI_ERROR_FATAL;
}
}
else
{
if (!RtlDosPathNameToNtPathName_U(FileName, &file_name, NULL, NULL))
{
CloseHandle(driver);
fputs("Memory allocation error.\r\n", stderr);
return IMDISK_CLI_ERROR_FATAL;
}
}
create_data = ImDiskCliAssertNotNull(_alloca(sizeof(IMDISK_CREATE_DATA) +
file_name.Length));
ZeroMemory(create_data, sizeof(IMDISK_CREATE_DATA) + file_name.Length);
puts("Creating device...");
// Check if mount point is a drive letter or junction point
if (MountPoint != NULL)
if ((wcslen(MountPoint) == 2) ? MountPoint[1] == ':' :
(wcslen(MountPoint) == 3) ? wcscmp(MountPoint + 1, L":\\") == 0 :
FALSE)
create_data->DriveLetter = MountPoint[0];
create_data->DeviceNumber = *DeviceNumber;
create_data->DiskGeometry = *DiskGeometry;
create_data->ImageOffset = *ImageOffset;
create_data->Flags = Flags;
create_data->FileNameLength = file_name.Length;
if (file_name.Length != 0)
{
memcpy(&create_data->FileName, file_name.Buffer, file_name.Length);
RtlFreeUnicodeString(&file_name);
}
if (!DeviceIoControl(driver,
IOCTL_IMDISK_CREATE_DEVICE,
create_data,
sizeof(IMDISK_CREATE_DATA) +
create_data->FileNameLength,
create_data,
sizeof(IMDISK_CREATE_DATA) +
create_data->FileNameLength,
&dw,
NULL))
{
PrintLastError(L"Error creating virtual disk:");
CloseHandle(driver);
return IMDISK_CLI_ERROR_CREATE_DEVICE;
}
CloseHandle(driver);
*DeviceNumber = create_data->DeviceNumber;
// Build device path, e.g. \Device\ImDisk2
_snwprintf(device_path, sizeof(device_path) / sizeof(*device_path) - 1,
IMDISK_DEVICE_BASE_NAME L"%u", create_data->DeviceNumber);
device_path[sizeof(device_path) / sizeof(*device_path) - 1] = 0;
if (MountPoint != NULL)
{
if (create_data->DriveLetter == 0)
{
if (!ImDiskCreateMountPoint(MountPoint, device_path))
{
switch (GetLastError())
{
case ERROR_INVALID_REPARSE_DATA:
ImDiskOemPrintF(stderr,
"Invalid mount point path: '%1!ws!'\n",
MountPoint);
break;
case ERROR_INVALID_PARAMETER:
fputs("This version of Windows only supports drive letters "
"as mount points.\r\n"
"Windows 2000 or higher is required to support "
"subdirectory mount points.\r\n",
stderr);
break;
case ERROR_INVALID_FUNCTION:
case ERROR_NOT_A_REPARSE_POINT:
fputs("Mount points are only supported on NTFS volumes.\r\n",
stderr);
break;
case ERROR_DIRECTORY:
case ERROR_DIR_NOT_EMPTY:
fputs("Mount points can only be created on empty "
"directories.\r\n", stderr);
break;
default:
PrintLastError(L"Error creating mount point:");
}
fputs
("Warning: The device is created without a mount point.\r\n",
stderr);
MountPoint[0] = 0;
}
}
#ifndef _WIN64
else if (!IMDISK_GTE_WINXP())
if (!DefineDosDevice(DDD_RAW_TARGET_PATH, MountPoint, device_path))
PrintLastError(L"Error creating mount point:");
#endif
}
if (NumericPrint)
printf("%u\n", *DeviceNumber);
else
ImDiskOemPrintF(stdout,
"Created device %1!u!: %2!ws! -> %3!ws!",
*DeviceNumber,
MountPoint == NULL ? L"No mountpoint" : MountPoint,
FileName == NULL ? L"Image in memory" : FileName);
if (SaveSettings)
{
puts("Saving registry settings...");
if (!ImDiskSaveRegistrySettings(create_data))
PrintLastError(L"Registry edit failed");
}
if (FormatOptions != NULL)
return ImDiskCliFormatDisk(device_path,
create_data->DriveLetter,
FormatOptions);
return IMDISK_CLI_SUCCESS;
}
// Removes an existing virtual disk device. ForeDismount can be set to TRUE to
// continue with dismount even if there are open handles to files or similar on
// the virtual disk. EmergencyRemove can be set to TRUE to have the device
// immediately removed, regardless of whether device handler loop in driver is
// responsive or hung, or whether or not there are any handles open to the
// device. Use this as a last restort to remove for example proxy backed
// devices with hung proxy connections and similar.
int
ImDiskCliRemoveDevice(DWORD DeviceNumber,
LPCWSTR MountPoint,
BOOL ForceDismount,
BOOL EmergencyRemove,
BOOL RemoveSettings)
{
WCHAR drive_letter_mount_point[] = L" :";
DWORD dw;
if (EmergencyRemove)
{
puts("Emergency removal...");
if (!ImDiskForceRemoveDevice(NULL, DeviceNumber))
{
PrintLastError(MountPoint == NULL ? L"Error" : MountPoint);
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
}
else
{
PIMDISK_CREATE_DATA create_data;
HANDLE device;
if (MountPoint == NULL)
{
device = ImDiskOpenDeviceByNumber(DeviceNumber,
GENERIC_READ | GENERIC_WRITE);
if (device == INVALID_HANDLE_VALUE)
device = ImDiskOpenDeviceByNumber(DeviceNumber,
GENERIC_READ);
if (device == INVALID_HANDLE_VALUE)
device = ImDiskOpenDeviceByNumber(DeviceNumber,
FILE_READ_ATTRIBUTES);
}
else if ((MountPoint[0] != 0) &&
((wcscmp(MountPoint + 1, L":") == 0) ||
(wcscmp(MountPoint + 1, L":\\") == 0)))
{
WCHAR drive_letter_path[] = L"\\\\.\\ :";
drive_letter_path[4] = MountPoint[0];
// Notify processes that this device is about to be removed.
if ((MountPoint[0] >= L'A') & (MountPoint[0] <= L'Z'))
{
puts("Notifying applications...");
ImDiskNotifyRemovePending(NULL, MountPoint[0]);
}
DbgOemPrintF((stdout, "Opening %1!ws!...\n", MountPoint));
device = CreateFile(drive_letter_path,
GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE,
NULL, OPEN_EXISTING, FILE_FLAG_NO_BUFFERING,
NULL);
if (device == INVALID_HANDLE_VALUE)
device = CreateFile(drive_letter_path,
GENERIC_READ,
FILE_SHARE_READ | FILE_SHARE_WRITE,
NULL, OPEN_EXISTING, FILE_FLAG_NO_BUFFERING,
NULL);
if (device == INVALID_HANDLE_VALUE)
device = CreateFile(drive_letter_path,
FILE_READ_ATTRIBUTES,
FILE_SHARE_READ | FILE_SHARE_WRITE,
NULL, OPEN_EXISTING, FILE_FLAG_NO_BUFFERING,
NULL);
}
else
{
device = ImDiskOpenDeviceByMountPoint(MountPoint,
GENERIC_READ | GENERIC_WRITE);
if (device == INVALID_HANDLE_VALUE)
device = ImDiskOpenDeviceByMountPoint(MountPoint,
GENERIC_READ);
if (device == INVALID_HANDLE_VALUE)
device = ImDiskOpenDeviceByMountPoint(MountPoint,
FILE_READ_ATTRIBUTES);
if (device == INVALID_HANDLE_VALUE)
{
switch (GetLastError())
{
case ERROR_INVALID_PARAMETER:
fputs("This version of Windows only supports drive letters as "
"mount points.\r\n"
"Windows 2000 or higher is required to support "
"subdirectory mount points.\r\n",
stderr);
return IMDISK_CLI_ERROR_BAD_MOUNT_POINT;
case ERROR_INVALID_FUNCTION:
fputs("Mount points are only supported on NTFS volumes.\r\n",
stderr);
return IMDISK_CLI_ERROR_BAD_MOUNT_POINT;
case ERROR_NOT_A_REPARSE_POINT:
case ERROR_DIRECTORY:
case ERROR_DIR_NOT_EMPTY:
ImDiskOemPrintF(stderr, "Not a mount point: '%1!ws!'\n",
MountPoint);
return IMDISK_CLI_ERROR_BAD_MOUNT_POINT;
default:
PrintLastError(MountPoint);
return IMDISK_CLI_ERROR_BAD_MOUNT_POINT;
}
}
}
if (device == INVALID_HANDLE_VALUE)
{
if (GetLastError() == ERROR_FILE_NOT_FOUND)
{
fputs("No such device.\r\n", stderr);
return IMDISK_CLI_ERROR_DEVICE_NOT_FOUND;
}
else
{
PrintLastError(L"Error opening device:");
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
}
if (!ImDiskCliCheckDriverVersion(device))
{
CloseHandle(device);
return IMDISK_CLI_ERROR_DRIVER_WRONG_VERSION;
}
create_data = (PIMDISK_CREATE_DATA)
ImDiskCliAssertNotNull(malloc(sizeof(IMDISK_CREATE_DATA) +
(MAX_PATH << 2)));
if (!DeviceIoControl(device,
IOCTL_IMDISK_QUERY_DEVICE,
NULL,
0,
create_data,
sizeof(IMDISK_CREATE_DATA) + (MAX_PATH << 2),
&dw, NULL))
{
PrintLastError(MountPoint);
ImDiskOemPrintF(stderr,
"%1!ws!: Is that drive really an ImDisk drive?",
MountPoint);
free(create_data);
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
if (dw < sizeof(IMDISK_CREATE_DATA) - sizeof(*create_data->FileName))
{
ImDiskOemPrintF(stderr,
"%1!ws!: Is that drive really an ImDisk drive?",
MountPoint);
free(create_data);
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
if ((MountPoint == NULL) & (create_data->DriveLetter != 0))
{
drive_letter_mount_point[0] = create_data->DriveLetter;
MountPoint = drive_letter_mount_point;
}
if (RemoveSettings)
{
printf("Removing registry settings for device %u...\n",
create_data->DeviceNumber);
if (!ImDiskRemoveRegistrySettings(create_data->DeviceNumber))
PrintLastError(L"Registry edit failed");
}
free(create_data);
create_data = NULL;
puts("Flushing file buffers...");
FlushFileBuffers(device);
puts("Locking volume...");
if (!DeviceIoControl(device,
FSCTL_LOCK_VOLUME,
NULL,
0,
NULL,
0,
&dw,
NULL))
if (ForceDismount)
{
puts("Failed, forcing dismount...");
DeviceIoControl(device,
FSCTL_DISMOUNT_VOLUME,
NULL,
0,
NULL,
0,
&dw,
NULL);
DeviceIoControl(device,
FSCTL_LOCK_VOLUME,
NULL,
0,
NULL,
0,
&dw,
NULL);
}
else
{
PrintLastError(MountPoint == NULL ? L"Error" : MountPoint);
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
else
{
puts("Dismounting filesystem...");
if (!DeviceIoControl(device,
FSCTL_DISMOUNT_VOLUME,
NULL,
0,
NULL,
0,
&dw,
NULL))
{
PrintLastError(MountPoint == NULL ? L"Error" : MountPoint);
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
}
puts("Removing device...");
if (!DeviceIoControl(device,
IOCTL_STORAGE_EJECT_MEDIA,
NULL,
0,
NULL,
0,
&dw,
NULL))
if (ForceDismount ? !ImDiskForceRemoveDevice(device, 0) : FALSE)
{
PrintLastError(MountPoint == NULL ? L"Error" : MountPoint);
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
DeviceIoControl(device,
FSCTL_UNLOCK_VOLUME,
NULL,
0,
NULL,
0,
&dw,
NULL);
CloseHandle(device);
}
if (MountPoint != NULL)
{
puts("Removing mountpoint...");
if (!ImDiskRemoveMountPoint(MountPoint))
{
switch (GetLastError())
{
case ERROR_INVALID_PARAMETER:
fputs("This version of Windows only supports drive letters as "
"mount points.\r\n"
"Windows 2000 or higher is required to support "
"subdirectory mount points.\r\n",
stderr);
break;
case ERROR_INVALID_FUNCTION:
fputs("Mount points are only supported on empty directories "
"on NTFS volumes.\r\n",
stderr);
break;
case ERROR_NOT_A_REPARSE_POINT:
case ERROR_DIRECTORY:
case ERROR_DIR_NOT_EMPTY:
ImDiskOemPrintF(stderr,
"Not a mount point: '%1!ws!'\n", MountPoint);
break;
default:
PrintLastError(MountPoint);
}
}
}
puts("Done.");
return 0;
}
// Prints a list of current virtual disk devices. If NumericPrint is TRUE a
// simple number list is printed, otherwise each device object name with path
// is printed.
int
ImDiskCliQueryStatusDriver(BOOL NumericPrint)
{
ULONG current_size = 3;
int i;
DWORD counter;
PULONG device_list = NULL;
for (i = 0; i < 2; i++)
{
device_list = (PULONG)HeapAlloc(GetProcessHeap(),
0, sizeof(ULONG) * current_size);
if (device_list == NULL)
{
fprintf(stderr, "Memory alloation error\n");
return 0;
}
if (ImDiskGetDeviceListEx(current_size, device_list))
break;
switch (GetLastError())
{
case ERROR_FILE_NOT_FOUND:
fputs("The ImDisk Virtual Disk Driver is not loaded.\r\n", stderr);
HeapFree(GetProcessHeap(), 0, device_list);
return 0;
case ERROR_MORE_DATA:
current_size = *device_list + 1;
HeapFree(GetProcessHeap(), 0, device_list);
device_list = NULL;
continue;
default:
PrintLastError(L"Cannot control the ImDisk Virtual Disk Driver:");
HeapFree(GetProcessHeap(), 0, device_list);
return -1;
}
}
if (device_list == NULL)
{
return 0;
}
if (*device_list < 1)
{
if (!NumericPrint)
puts("No virtual disks.");
HeapFree(GetProcessHeap(), 0, device_list);
return 0;
}
for (counter = 1; counter <= *device_list; counter++)
printf("%s%u\n",
NumericPrint ? "" : "\\Device\\ImDisk",
device_list[counter]);
HeapFree(GetProcessHeap(), 0, device_list);
return 0;
}
/* int */
/* ImDiskCliQueryStatusDriver(BOOL NumericPrint) */
/* { */
/* DWORDLONG device_list = ImDiskGetDeviceList(); */
/* DWORD counter; */
/* if (device_list == 0) */
/* switch (GetLastError()) */
/* { */
/* case NO_ERROR: */
/* puts("No virtual disks."); */
/* return 0; */
/* case ERROR_FILE_NOT_FOUND: */
/* puts("The ImDisk Virtual Disk Driver is not loaded."); */
/* return 0; */
/* default: */
/* PrintLastError(L"Cannot control the ImDisk Virtual Disk Driver:"); */
/* return -1; */
/* } */
/* for (counter = 0; device_list != 0; device_list >>= 1, counter++) */
/* if (device_list & 1) */
/* printf("%s%u\n", NumericPrint ? "" : "\\Device\\ImDisk", counter); */
/* return 0; */
/* } */
// Prints information about an existing virtual disk device, identified by
// either a device number or mount point.
int
ImDiskCliQueryStatusDevice(DWORD DeviceNumber, LPWSTR MountPoint)
{
HANDLE device;
DWORD dw;
PIMDISK_CREATE_DATA create_data;
if (MountPoint == NULL)
{
device = ImDiskOpenDeviceByNumber(DeviceNumber, FILE_READ_ATTRIBUTES);
}
else
{
device = ImDiskOpenDeviceByMountPoint(MountPoint,
FILE_READ_ATTRIBUTES);
if (device == INVALID_HANDLE_VALUE)
{
switch (GetLastError())
{
case ERROR_INVALID_PARAMETER:
fputs("This version of Windows only supports drive letters as "
"mount points.\r\n"
"Windows 2000 or higher is required to support "
"subdirectory mount points.\r\n",
stderr);
break;
case ERROR_INVALID_FUNCTION:
fputs("Mount points are only supported on empty directories on "
"NTFS volumes.\r\n",
stderr);
break;
case ERROR_NOT_A_REPARSE_POINT:
case ERROR_DIRECTORY:
case ERROR_DIR_NOT_EMPTY:
ImDiskOemPrintF(stderr,
"Not a mount point: '%1!ws!'\n", MountPoint);
break;
default:
PrintLastError(MountPoint);
}
return IMDISK_CLI_ERROR_BAD_MOUNT_POINT;
}
}
if (device == INVALID_HANDLE_VALUE)
if (GetLastError() == ERROR_FILE_NOT_FOUND)
{
fputs("No such device.\r\n", stderr);
return IMDISK_CLI_ERROR_DEVICE_NOT_FOUND;
}
else
{
PrintLastError(L"Error opening device:");
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
if (!ImDiskCliCheckDriverVersion(device))
{
CloseHandle(device);
return IMDISK_CLI_ERROR_DRIVER_WRONG_VERSION;
}
create_data = (PIMDISK_CREATE_DATA)
ImDiskCliAssertNotNull(malloc(sizeof(IMDISK_CREATE_DATA) +
(MAX_PATH << 2)));
if (!DeviceIoControl(device,
IOCTL_IMDISK_QUERY_DEVICE,
NULL,
0,
create_data,
sizeof(IMDISK_CREATE_DATA) + (MAX_PATH << 2),
&dw, NULL))
{
PrintLastError(MountPoint);
ImDiskOemPrintF(stderr,
"%1!ws!: Is that drive really an ImDisk drive?",
MountPoint);
CloseHandle(device);
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
if (dw < sizeof(IMDISK_CREATE_DATA) - sizeof(*create_data->FileName))
{
ImDiskOemPrintF(stderr,
"%1!ws!: Is that drive really an ImDisk drive?",
MountPoint);
CloseHandle(device);
free(create_data);
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
CloseHandle(device);
if (MountPoint != NULL)
ImDiskOemPrintF(stdout,
"Mount point: %1!ws!",
MountPoint);
else if (create_data->DriveLetter != 0)
ImDiskOemPrintF(stdout,
"Drive letter: %1!wc!",
create_data->DriveLetter);
else
puts("No drive letter.");
if (create_data->FileNameLength != 0)
ImDiskOemPrintF(stdout,
"Image file: %1!.*ws!",
(int)(create_data->FileNameLength /
sizeof(*create_data->FileName)),
create_data->FileName);
else
puts("No image file.");
if (create_data->ImageOffset.QuadPart > 0)
printf("Image file offset: %I64i bytes\n",
create_data->ImageOffset.QuadPart);
printf("Size: %I64i bytes (%.4g %s)",
create_data->DiskGeometry.Cylinders.QuadPart,
_h(create_data->DiskGeometry.Cylinders.QuadPart),
_p(create_data->DiskGeometry.Cylinders.QuadPart));
printf("%s%s%s%s%s%s.\n",
IMDISK_SHARED_IMAGE(create_data->Flags) ?
", Shared image" : "",
IMDISK_READONLY(create_data->Flags) ?
", ReadOnly" : "",
IMDISK_REMOVABLE(create_data->Flags) ?
", Removable" : "",
IMDISK_TYPE(create_data->Flags) == IMDISK_TYPE_VM ?
", Virtual Memory" :
IMDISK_TYPE(create_data->Flags) == IMDISK_TYPE_PROXY ?
", Proxy" :
IMDISK_FILE_TYPE(create_data->Flags) == IMDISK_FILE_TYPE_AWEALLOC ?
", Physical Memory" :
IMDISK_FILE_TYPE(create_data->Flags) == IMDISK_FILE_TYPE_PARALLEL_IO ?
", Parallel I/O Image File" :
", Queued I/O Image File",
IMDISK_DEVICE_TYPE(create_data->Flags) ==
IMDISK_DEVICE_TYPE_CD ? ", CD-ROM" :
IMDISK_DEVICE_TYPE(create_data->Flags) ==
IMDISK_DEVICE_TYPE_RAW ? ", RAW" :
IMDISK_DEVICE_TYPE(create_data->Flags) ==
IMDISK_DEVICE_TYPE_FD ? ", Floppy" : ", HDD",
create_data->Flags & IMDISK_IMAGE_MODIFIED ? ", Modified" : "");
free(create_data);
return 0;
}
// Changes flags for an existing virtual disk, identified by either device
// number or mount point. FlagsToChange specifies which flag bits to change,
// (0=not touch, 1=set to corresponding bit value in Flags parameter).
int
ImDiskCliChangeFlags(DWORD DeviceNumber, LPCWSTR MountPoint,
DWORD FlagsToChange, DWORD Flags)
{
HANDLE device;
DWORD dw;
IMDISK_SET_DEVICE_FLAGS device_flags;
if (MountPoint == NULL)
{
device = ImDiskOpenDeviceByNumber(DeviceNumber,
GENERIC_READ | GENERIC_WRITE);
if (device == INVALID_HANDLE_VALUE)
device = ImDiskOpenDeviceByNumber(DeviceNumber,
GENERIC_READ);
}
else
{
device = ImDiskOpenDeviceByMountPoint(MountPoint,
GENERIC_READ | GENERIC_WRITE);
if (device == INVALID_HANDLE_VALUE)
device = ImDiskOpenDeviceByMountPoint(MountPoint,
GENERIC_READ);
if (device == INVALID_HANDLE_VALUE)
switch (GetLastError())
{
case ERROR_INVALID_PARAMETER:
fputs("This version of Windows only supports drive letters as "
"mount points.\r\n"
"Windows 2000 or higher is required to support "
"subdirectory mount points.\r\n",
stderr);
return IMDISK_CLI_ERROR_BAD_MOUNT_POINT;
case ERROR_INVALID_FUNCTION:
fputs("Mount points are only supported on NTFS volumes.\r\n",
stderr);
return IMDISK_CLI_ERROR_BAD_MOUNT_POINT;
case ERROR_NOT_A_REPARSE_POINT:
case ERROR_DIRECTORY:
case ERROR_DIR_NOT_EMPTY:
ImDiskOemPrintF(stderr, "Not a mount point: '%1!ws!'\n",
MountPoint);
return IMDISK_CLI_ERROR_BAD_MOUNT_POINT;
default:
PrintLastError(MountPoint);
return IMDISK_CLI_ERROR_BAD_MOUNT_POINT;
}
}
if (device == INVALID_HANDLE_VALUE)
if (GetLastError() == ERROR_FILE_NOT_FOUND)
{
fputs("No such device.\r\n", stderr);
return IMDISK_CLI_ERROR_DEVICE_NOT_FOUND;
}
else
{
PrintLastError(L"Error opening device:");
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
if (!ImDiskCliCheckDriverVersion(device))
{
CloseHandle(device);
return IMDISK_CLI_ERROR_DRIVER_WRONG_VERSION;
}
if (FlagsToChange & (IMDISK_OPTION_RO | IMDISK_OPTION_REMOVABLE))
{
puts("Flushing file buffers...");
FlushFileBuffers(device);
puts("Locking volume...");
if (!DeviceIoControl(device,
FSCTL_LOCK_VOLUME,
NULL,
0,
NULL,
0,
&dw,
NULL))
{
PrintLastError(MountPoint == NULL ? L"Error" : MountPoint);
CloseHandle(device);
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
puts("Dismounting filesystem...");
if (!DeviceIoControl(device,
FSCTL_DISMOUNT_VOLUME,
NULL,
0,
NULL,
0,
&dw,
NULL))
{
PrintLastError(MountPoint == NULL ? L"Error" : MountPoint);
CloseHandle(device);
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
}
puts("Setting new flags...");
device_flags.FlagsToChange = FlagsToChange;
device_flags.FlagValues = Flags;
if (!DeviceIoControl(device,
IOCTL_IMDISK_SET_DEVICE_FLAGS,
&device_flags,
sizeof(device_flags),
&device_flags,
sizeof(device_flags),
&dw, NULL))
PrintLastError(MountPoint);
if (device_flags.FlagsToChange != 0)
{
CloseHandle(device);
ImDiskOemPrintF(stderr,
"%1!ws!: Not all new options were successfully changed.",
MountPoint);
return IMDISK_CLI_ERROR_CREATE_DEVICE;
}
else
{
CloseHandle(device);
puts("Done.");
return 0;
}
}
// Extends an existing virtual disk, identified by either device number or
// mount point.
int
ImDiskCliExtendDevice(DWORD DeviceNumber, LPCWSTR MountPoint,
LARGE_INTEGER ExtendSize)
{
HANDLE device;
DWORD dw;
DISK_GROW_PARTITION grow_partition = { 0 };
GET_LENGTH_INFORMATION length_information;
DISK_GEOMETRY disk_geometry;
LONGLONG new_filesystem_size;
if (MountPoint == NULL)
{
device = ImDiskOpenDeviceByNumber(DeviceNumber,
GENERIC_READ | GENERIC_WRITE);
if (device == INVALID_HANDLE_VALUE)
device = ImDiskOpenDeviceByNumber(DeviceNumber,
GENERIC_READ);
}
else
{
device = ImDiskOpenDeviceByMountPoint(MountPoint,
GENERIC_READ | GENERIC_WRITE);
if (device == INVALID_HANDLE_VALUE)
device = ImDiskOpenDeviceByMountPoint(MountPoint,
GENERIC_READ);
if (device == INVALID_HANDLE_VALUE)
switch (GetLastError())
{
case ERROR_INVALID_PARAMETER:
fputs("This version of Windows only supports drive letters as "
"mount points.\r\n"
"Windows 2000 or higher is required to support "
"subdirectory mount points.\r\n",
stderr);
return IMDISK_CLI_ERROR_BAD_MOUNT_POINT;
case ERROR_INVALID_FUNCTION:
fputs("Mount points are only supported on NTFS volumes.\r\n",
stderr);
return IMDISK_CLI_ERROR_BAD_MOUNT_POINT;
case ERROR_NOT_A_REPARSE_POINT:
case ERROR_DIRECTORY:
case ERROR_DIR_NOT_EMPTY:
ImDiskOemPrintF(stderr, "Not a mount point: '%1!ws!'\n",
MountPoint);
return IMDISK_CLI_ERROR_BAD_MOUNT_POINT;
default:
PrintLastError(MountPoint);
return IMDISK_CLI_ERROR_BAD_MOUNT_POINT;
}
}
if (device == INVALID_HANDLE_VALUE)
if (GetLastError() == ERROR_FILE_NOT_FOUND)
{
fputs("No such device.\r\n", stderr);
return IMDISK_CLI_ERROR_DEVICE_NOT_FOUND;
}
else
{
PrintLastError(L"Error opening device:");
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
puts("Extending disk size...");
grow_partition.PartitionNumber = 1;
grow_partition.BytesToGrow = ExtendSize;
if (!DeviceIoControl(device,
IOCTL_DISK_GROW_PARTITION,
&grow_partition,
sizeof(grow_partition),
NULL,
0,
&dw, NULL))
{
PrintLastError(MountPoint);
CloseHandle(device);
return IMDISK_CLI_ERROR_CREATE_DEVICE;
}
puts("Extending filesystem size...");
if (!DeviceIoControl(device,
IOCTL_DISK_GET_LENGTH_INFO,
NULL,
0,
&length_information,
sizeof(length_information),
&dw, NULL))
{
PrintLastError(MountPoint);
ImDiskOemPrintF(stderr,
"%1!ws!: Is that drive really an ImDisk drive?",
MountPoint);
CloseHandle(device);
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
if (!DeviceIoControl(device,
IOCTL_DISK_GET_DRIVE_GEOMETRY,
NULL,
0,
&disk_geometry,
sizeof(disk_geometry),
&dw, NULL))
{
PrintLastError(MountPoint);
ImDiskOemPrintF(stderr,
"%1!ws!: Is that drive really an ImDisk drive?",
MountPoint);
CloseHandle(device);
return IMDISK_CLI_ERROR_DEVICE_INACCESSIBLE;
}
new_filesystem_size =
length_information.Length.QuadPart /
disk_geometry.BytesPerSector;
if (!DeviceIoControl(device,
FSCTL_EXTEND_VOLUME,
&new_filesystem_size,
sizeof(new_filesystem_size),
NULL,
0,
&dw, NULL))
{
PrintLastError(MountPoint);
puts("The disk size was extended successfully, but it was not possible to extend the\r\n"
"current filesystem on it. You will have to reformat the disk to use the full\r\n"
"disk size.");
}
CloseHandle(device);
printf("New size: %.4g %s\n",
_h(length_information.Length.QuadPart),
_p(length_information.Length.QuadPart));
return 0;
}
// Entry function. Translates command line switches and parameters and calls
// corresponding functions to carry out actual tasks.
int
__cdecl
wmain(int argc, LPWSTR argv[])
{
enum
{
OP_MODE_NONE,
OP_MODE_CREATE,
OP_MODE_REMOVE,
OP_MODE_QUERY,
OP_MODE_EDIT
} op_mode = OP_MODE_NONE;
DWORD flags = 0;
BOOL native_path = FALSE;
BOOL numeric_print = FALSE;
BOOL force_dismount = FALSE;
BOOL emergency_remove = FALSE;
LPWSTR file_name = NULL;
LPWSTR mount_point = NULL;
LPWSTR format_options = NULL;
BOOL save_settings = FALSE;
DWORD device_number = IMDISK_AUTO_DEVICE_NUMBER;
DISK_GEOMETRY disk_geometry = { 0 };
LARGE_INTEGER image_offset = { 0 };
BOOL auto_find_offset = FALSE;
BYTE auto_find_partition_entry = 0;
DWORD flags_to_change = 0;
int ret = 0;
if (argc == 2)
if (wcscmp(argv[1], L"--version") == 0)
{
printf
("Control program for the ImDisk Virtual Disk Driver for Windows NT/2000/XP.\n"
"Version %i.%i.%i - (Compiled " __DATE__ ")\n"
"\n"
"Copyright (C) 2004-2015 Olof Lagerkvist.\r\n"
"\n"
"http://www.ltr-data.se olof@ltr-data.se\r\n"
"\n"
"Permission is hereby granted, free of charge, to any person\r\n"
"obtaining a copy of this software and associated documentation\r\n"
"files (the \"Software\"), to deal in the Software without\r\n"
"restriction, including without limitation the rights to use,\r\n"
"copy, modify, merge, publish, distribute, sublicense, and/or\r\n"
"sell copies of the Software, and to permit persons to whom the\r\n"
"Software is furnished to do so, subject to the following\r\n"
"conditions:\r\n"
"\r\n"
"The above copyright notice and this permission notice shall be\r\n"
"included in all copies or substantial portions of the Software.\r\n"
"\r\n"
"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\r\n"
"EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\r\n"
"OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r\n"
"NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\r\n"
"HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\r\n"
"WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\r\n"
"FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\r\n"
"OTHER DEALINGS IN THE SOFTWARE.\r\n"
"\r\n"
"This program contains some GNU GPL licensed code:\r\n"
"- Parts related to floppy emulation based on VFD by Ken Kato.\r\n"
" http://chitchat.at.infoseek.co.jp/vmware/vfd.html\r\n"
"Copyright (C) Free Software Foundation, Inc.\r\n"
"Read gpl.txt for the full GNU GPL license.\r\n"
"\r\n"
"This program may contain BSD licensed code:\r\n"
"- Some code ported to NT from the FreeBSD md driver by Olof Lagerkvist.\r\n"
" http://www.ltr-data.se\r\n"
"Copyright (C) The FreeBSD Project.\r\n"
"Copyright (C) The Regents of the University of California.\r\n",
(IMDISK_VERSION & 0xFF00) >> 8,
(IMDISK_VERSION & 0xF0) >> 4,
IMDISK_VERSION & 0xF);
return 0;
}
// Argument parse loop
while (argc-- > 1)
{
argv++;
if (wcslen(argv[0]) == 2 ? argv[0][0] == L'-' : FALSE)
switch (argv[0][1])
{
case L'a':
if (op_mode != OP_MODE_NONE)
ImDiskSyntaxHelp();
op_mode = OP_MODE_CREATE;
break;
case L'd':
case L'D':
case L'R':
if (op_mode != OP_MODE_NONE)
ImDiskSyntaxHelp();
op_mode = OP_MODE_REMOVE;
if (argv[0][1] == L'D')
force_dismount = TRUE;
if (argv[0][1] == L'R')
{
force_dismount = TRUE;
emergency_remove = TRUE;
}
break;
case L'l':
if (op_mode != OP_MODE_NONE)
ImDiskSyntaxHelp();
op_mode = OP_MODE_QUERY;
break;
case L'e':
if (op_mode != OP_MODE_NONE)
ImDiskSyntaxHelp();
op_mode = OP_MODE_EDIT;
break;
case L't':
if ((op_mode != OP_MODE_CREATE) |
(argc < 2) |
(IMDISK_TYPE(flags) != 0))
ImDiskSyntaxHelp();
if (wcscmp(argv[1], L"file") == 0)
flags |= IMDISK_TYPE_FILE;
else if (wcscmp(argv[1], L"vm") == 0)
flags |= IMDISK_TYPE_VM;
else if (wcscmp(argv[1], L"proxy") == 0)
flags |= IMDISK_TYPE_PROXY;
else
ImDiskSyntaxHelp();
argc--;
argv++;
break;
case L'n':
numeric_print = TRUE;
break;
case L'o':
if (((op_mode != OP_MODE_CREATE) & (op_mode != OP_MODE_EDIT)) |
(argc < 2))
ImDiskSyntaxHelp();
{
LPWSTR opt;
for (opt = wcstok(argv[1], L",");
opt != NULL;
opt = wcstok(NULL, L","))
if (wcscmp(opt, L"ro") == 0)
{
if (IMDISK_READONLY(flags_to_change))
ImDiskSyntaxHelp();
flags_to_change |= IMDISK_OPTION_RO;
flags |= IMDISK_OPTION_RO;
}
else if (wcscmp(opt, L"rw") == 0)
{
if (IMDISK_READONLY(flags_to_change))
ImDiskSyntaxHelp();
flags_to_change |= IMDISK_OPTION_RO;
flags &= ~IMDISK_OPTION_RO;
}
else if (wcscmp(opt, L"sparse") == 0)
{
flags_to_change |= IMDISK_OPTION_SPARSE_FILE;
flags |= IMDISK_OPTION_SPARSE_FILE;
}
else if (wcscmp(opt, L"rem") == 0)
{
if (IMDISK_REMOVABLE(flags_to_change))
ImDiskSyntaxHelp();
flags_to_change |= IMDISK_OPTION_REMOVABLE;
flags |= IMDISK_OPTION_REMOVABLE;
}
else if (wcscmp(opt, L"fix") == 0)
{
if (IMDISK_REMOVABLE(flags_to_change))
ImDiskSyntaxHelp();
flags_to_change |= IMDISK_OPTION_REMOVABLE;
flags &= ~IMDISK_OPTION_REMOVABLE;
}
else if (wcscmp(opt, L"saved") == 0)
{
if (op_mode != OP_MODE_EDIT)
ImDiskSyntaxHelp();
flags_to_change |= IMDISK_IMAGE_MODIFIED;
flags &= ~IMDISK_IMAGE_MODIFIED;
}
// None of the other options are valid with the -e parameter.
else if (op_mode != OP_MODE_CREATE)
ImDiskSyntaxHelp();
else if (wcscmp(opt, L"ip") == 0)
{
if ((IMDISK_TYPE(flags) != IMDISK_TYPE_PROXY) |
(IMDISK_PROXY_TYPE(flags) != IMDISK_PROXY_TYPE_DIRECT))
ImDiskSyntaxHelp();
native_path = TRUE;
flags |= IMDISK_PROXY_TYPE_TCP;
}
else if (wcscmp(opt, L"comm") == 0)
{
if ((IMDISK_TYPE(flags) != IMDISK_TYPE_PROXY) |
(IMDISK_PROXY_TYPE(flags) != IMDISK_PROXY_TYPE_DIRECT))
ImDiskSyntaxHelp();
native_path = TRUE;
flags |= IMDISK_PROXY_TYPE_COMM;
}
else if (wcscmp(opt, L"shm") == 0)
{
if ((IMDISK_TYPE(flags) != IMDISK_TYPE_PROXY) |
(IMDISK_PROXY_TYPE(flags) != IMDISK_PROXY_TYPE_DIRECT))
ImDiskSyntaxHelp();
flags |= IMDISK_PROXY_TYPE_SHM;
}
else if (wcscmp(opt, L"awe") == 0)
{
if (((IMDISK_TYPE(flags) != IMDISK_TYPE_FILE) &
(IMDISK_TYPE(flags) != 0)) |
(IMDISK_FILE_TYPE(flags) != 0))
ImDiskSyntaxHelp();
flags |= IMDISK_TYPE_FILE | IMDISK_FILE_TYPE_AWEALLOC;
}
else if (wcscmp(opt, L"par") == 0)
{
if (((IMDISK_TYPE(flags) != IMDISK_TYPE_FILE) &
(IMDISK_TYPE(flags) != 0)) |
(IMDISK_FILE_TYPE(flags) != 0))
ImDiskSyntaxHelp();
flags |= IMDISK_TYPE_FILE | IMDISK_FILE_TYPE_PARALLEL_IO;
}
else if (wcscmp(opt, L"bswap") == 0)
{
flags |= IMDISK_OPTION_BYTE_SWAP;
}
else if (wcscmp(opt, L"shared") == 0)
{
flags |= IMDISK_OPTION_SHARED_IMAGE;
}
else if (IMDISK_DEVICE_TYPE(flags) != 0)
{
ImDiskSyntaxHelp();
}
else if (wcscmp(opt, L"hd") == 0)
{
flags |= IMDISK_DEVICE_TYPE_HD;
}
else if (wcscmp(opt, L"fd") == 0)
{
flags |= IMDISK_DEVICE_TYPE_FD;
}
else if (wcscmp(opt, L"cd") == 0)
{
flags |= IMDISK_DEVICE_TYPE_CD;
}
else if (wcscmp(opt, L"raw") == 0)
{
flags |= IMDISK_DEVICE_TYPE_RAW;
}
else
{
ImDiskSyntaxHelp();
}
}
argc--;
argv++;
break;
case L'f':
case L'F':
if ((op_mode != OP_MODE_CREATE) |
(argc < 2) |
(file_name != NULL))
ImDiskSyntaxHelp();
if (argv[0][1] == L'F')
native_path = TRUE;
file_name = argv[1];
argc--;
argv++;
break;
case L's':
if (((op_mode != OP_MODE_CREATE) & (op_mode != OP_MODE_EDIT)) |
(argc < 2) |
(disk_geometry.Cylinders.QuadPart != 0))
ImDiskSyntaxHelp();
{
WCHAR suffix = 0;
(void)swscanf(argv[1], L"%I64i%c",
&disk_geometry.Cylinders, &suffix);
switch (suffix)
{
case 0:
break;
case '%':
if ((disk_geometry.Cylinders.QuadPart <= 0) |
(disk_geometry.Cylinders.QuadPart >= 100))
ImDiskSyntaxHelp();
{
MEMORYSTATUS memstat;
#pragma warning(suppress: 28159)
GlobalMemoryStatus(&memstat);
disk_geometry.Cylinders.QuadPart =
disk_geometry.Cylinders.QuadPart *
memstat.dwAvailPhys / 100;
}
break;
case 'T':
disk_geometry.Cylinders.QuadPart <<= 10;
case 'G':
disk_geometry.Cylinders.QuadPart <<= 10;
case 'M':
disk_geometry.Cylinders.QuadPart <<= 10;
case 'K':
disk_geometry.Cylinders.QuadPart <<= 10;
break;
case 'b':
disk_geometry.Cylinders.QuadPart <<= 9;
break;
case 't':
disk_geometry.Cylinders.QuadPart *= 1000;
case 'g':
disk_geometry.Cylinders.QuadPart *= 1000;
case 'm':
disk_geometry.Cylinders.QuadPart *= 1000;
case 'k':
disk_geometry.Cylinders.QuadPart *= 1000;
break;
default:
fprintf(stderr, "ImDisk: Unsupported size suffix: '%wc'\n",
suffix);
return IMDISK_CLI_ERROR_BAD_SYNTAX;
}
if (disk_geometry.Cylinders.QuadPart < 0)
{
MEMORYSTATUS memstat;
#pragma warning(suppress: 28159)
GlobalMemoryStatus(&memstat);
disk_geometry.Cylinders.QuadPart =
memstat.dwAvailPhys +
disk_geometry.Cylinders.QuadPart;
if (disk_geometry.Cylinders.QuadPart < 0)
{
fprintf(stderr,
"ImDisk: Not enough memory, there is currently "
"%.4g %s free physical memory.\n",
_h(memstat.dwAvailPhys),
_p(memstat.dwAvailPhys));
return IMDISK_CLI_ERROR_NOT_ENOUGH_MEMORY;
}
}
}
argc--;
argv++;
break;
case L'S':
if ((op_mode != OP_MODE_CREATE) |
(argc < 2) |
(disk_geometry.BytesPerSector != 0))
ImDiskSyntaxHelp();
if (!iswdigit(argv[1][0]))
ImDiskSyntaxHelp();
disk_geometry.BytesPerSector = wcstoul(argv[1], NULL, 0);
argc--;
argv++;
break;
case L'x':
if ((op_mode != OP_MODE_CREATE) |
(argc < 2) |
(disk_geometry.SectorsPerTrack != 0))
ImDiskSyntaxHelp();
if (!iswdigit(argv[1][0]))
ImDiskSyntaxHelp();
disk_geometry.SectorsPerTrack = wcstoul(argv[1], NULL, 0);
argc--;
argv++;
break;
case L'y':
if ((op_mode != OP_MODE_CREATE) |
(argc < 2) |
(disk_geometry.TracksPerCylinder != 0))
ImDiskSyntaxHelp();
if (!iswdigit(argv[1][0]))
ImDiskSyntaxHelp();
disk_geometry.TracksPerCylinder = wcstoul(argv[1], NULL, 0);
argc--;
argv++;
break;
case L'v':
if ((op_mode != OP_MODE_CREATE) |
(argc < 2) |
(auto_find_partition_entry != 0))
ImDiskSyntaxHelp();
if ((argv[1][0] < L'1') | (argv[1][0] > L'8'))
ImDiskSyntaxHelp();
if (argv[1][1] != 0)
ImDiskSyntaxHelp();
auto_find_partition_entry = (BYTE)(argv[1][0] - L'0');
argc--;
argv++;
break;
case L'b':
if ((op_mode != OP_MODE_CREATE) |
(argc < 2) |
(image_offset.QuadPart != 0) |
(auto_find_offset != FALSE))
ImDiskSyntaxHelp();
if (wcscmp(argv[1], L"auto") == 0)
auto_find_offset = TRUE;
else
{
WCHAR suffix = 0;
(void)swscanf(argv[1], L"%I64u%c",
&image_offset, &suffix);
switch (suffix)
{
case 0:
break;
case 'T':
image_offset.QuadPart <<= 10;
case 'G':
image_offset.QuadPart <<= 10;
case 'M':
image_offset.QuadPart <<= 10;
case 'K':
image_offset.QuadPart <<= 10;
break;
case 'b':
image_offset.QuadPart <<= 9;
break;
case 't':
image_offset.QuadPart *= 1000;
case 'g':
image_offset.QuadPart *= 1000;
case 'm':
image_offset.QuadPart *= 1000;
case 'k':
image_offset.QuadPart *= 1000;
default:
fprintf(stderr, "ImDisk: Unsupported size suffix: '%wc'\n",
suffix);
return IMDISK_CLI_ERROR_BAD_SYNTAX;
}
}
argc--;
argv++;
break;
case L'p':
if ((op_mode != OP_MODE_CREATE) |
(argc < 2) |
(format_options != NULL))
ImDiskSyntaxHelp();
format_options = argv[1];
argc--;
argv++;
break;
case L'P':
if ((op_mode != OP_MODE_CREATE) &
(op_mode != OP_MODE_REMOVE))
ImDiskSyntaxHelp();
save_settings = TRUE;
break;
case L'u':
if ((argc < 2) |
((mount_point != NULL) & (op_mode != OP_MODE_CREATE)) |
(device_number != IMDISK_AUTO_DEVICE_NUMBER))
ImDiskSyntaxHelp();
if (!iswdigit(argv[1][0]))
ImDiskSyntaxHelp();
device_number = wcstoul(argv[1], NULL, 0);
argc--;
argv++;
break;
case L'm':
if ((argc < 2) |
(mount_point != NULL) |
((device_number != IMDISK_AUTO_DEVICE_NUMBER) &
(op_mode != OP_MODE_CREATE)))
ImDiskSyntaxHelp();
mount_point = CharUpper(argv[1]);
argc--;
argv++;
break;
default:
ImDiskSyntaxHelp();
}
else
ImDiskSyntaxHelp();
}
// Switch block for operation switch found on command line.
switch (op_mode)
{
case OP_MODE_CREATE:
{
if ((mount_point != NULL) &&
(wcscmp(mount_point, L"#:") == 0))
{
mount_point[0] = ImDiskFindFreeDriveLetter();
if (mount_point[0] == 0)
{
fputs("All drive letters are in use.\r\n", stderr);
return IMDISK_CLI_NO_FREE_DRIVE_LETTERS;
}
}
if (auto_find_offset)
if (file_name == NULL)
ImDiskSyntaxHelp();
else
ImDiskGetOffsetByFileExt(file_name, &image_offset);
if (auto_find_partition_entry != 0)
{
PARTITION_INFORMATION partition_information[8];
PPARTITION_INFORMATION part_rec =
partition_information +
auto_find_partition_entry - 1;
if (!ImDiskGetPartitionInformation(file_name,
disk_geometry.BytesPerSector,
&image_offset,
partition_information))
{
fputs("Error: Partition table not found.\r\n", stderr);
return IMDISK_CLI_ERROR_PARTITION_NOT_FOUND;
}
if (part_rec->PartitionLength.QuadPart == 0)
{
fprintf(stderr,
"Error: Partition %i not defined.\n",
(int)auto_find_partition_entry);
return IMDISK_CLI_ERROR_PARTITION_NOT_FOUND;
}
image_offset.QuadPart += part_rec->StartingOffset.QuadPart;
disk_geometry.Cylinders = part_rec->PartitionLength;
}
else if (auto_find_offset)
{
PARTITION_INFORMATION partition_information[8];
if (ImDiskGetPartitionInformation(file_name,
disk_geometry.BytesPerSector,
&image_offset,
partition_information))
{
PPARTITION_INFORMATION part_rec;
for (part_rec = partition_information;
part_rec < partition_information + 8;
part_rec++)
if ((part_rec->PartitionLength.QuadPart != 0) &
!IsContainerPartition(part_rec->PartitionType))
{
image_offset.QuadPart +=
part_rec->StartingOffset.QuadPart;
disk_geometry.Cylinders = part_rec->PartitionLength;
break;
}
}
}
ret = ImDiskCliCreateDevice(&device_number,
&disk_geometry,
&image_offset,
flags,
file_name,
native_path,
mount_point,
numeric_print,
format_options,
save_settings);
if (ret != 0)
return ret;
// Notify processes that new device has arrived.
if ((mount_point != NULL) &&
(((wcslen(mount_point) == 2) && mount_point[1] == ':') ||
((wcslen(mount_point) == 3) && (wcscmp(mount_point + 1, L":\\") == 0))))
{
puts("Notifying applications...");
ImDiskNotifyShellDriveLetter(NULL, mount_point);
}
puts("Done.");
return 0;
}
case OP_MODE_REMOVE:
if ((device_number == IMDISK_AUTO_DEVICE_NUMBER) &
((mount_point == NULL) |
emergency_remove))
ImDiskSyntaxHelp();
return ImDiskCliRemoveDevice(device_number, mount_point, force_dismount,
emergency_remove, save_settings);
case OP_MODE_QUERY:
if ((device_number == IMDISK_AUTO_DEVICE_NUMBER) &
(mount_point == NULL))
return !ImDiskCliQueryStatusDriver(numeric_print);
return ImDiskCliQueryStatusDevice(device_number, mount_point);
case OP_MODE_EDIT:
if ((device_number == IMDISK_AUTO_DEVICE_NUMBER) &
(mount_point == NULL))
ImDiskSyntaxHelp();
if (flags_to_change != 0)
ret = ImDiskCliChangeFlags(device_number, mount_point, flags_to_change,
flags);
if (disk_geometry.Cylinders.QuadPart > 0)
ret = ImDiskCliExtendDevice(device_number, mount_point,
disk_geometry.Cylinders);
return ret;
}
ImDiskSyntaxHelp();
}
#pragma intrinsic(_InterlockedCompareExchange)
#if !defined(_DEBUG) && !defined(DEBUG) && (defined(_WIN64) || _MSC_VER < 1600)
// We have our own EXE entry to be less dependent on
// specific MSVCRT code that may not be available in older Windows versions.
// It also saves some EXE file size.
__declspec(noreturn)
void
__cdecl
wmainCRTStartup()
{
int argc = 0;
LPWSTR *argv = CommandLineToArgvW(GetCommandLine(), &argc);
if (argv == NULL)
{
MessageBoxA(NULL,
"This program requires Windows NT/2000/XP.",
"ImDisk Virtual Disk Driver",
MB_ICONSTOP);
ExitProcess((UINT)-1);
}
exit(wmain(argc, argv));
}
#endif
|
# Modify external links for collections documents and pages
# - add rel 'noopener' and 'noreferrer'
# - add target _blank
# - add externalLink class
def external_links(document)
Jekyll.logger.debug('external link for', document.relative_path)
doc = Nokogiri::HTML::Document.parse( document.output )
site_url = document.site.config['url']
doc.css("a").each do |link|
url = link["href"]
next if url == nil
next if url[0,4] != 'http'
next if url.downcase.include?(site_url)
link.add_class('externalLink')
link['rel'] = 'noopener noreferrer'
link['target'] = "_blank"
#Jekyll.logger.info("external link document type", document.data['type'])
#Jekyll.logger.info("external link", link.to_s)
end
document.output = doc.to_s
end
Jekyll::Hooks.register :documents, :post_render do |document|
external_links(document)
end
Jekyll::Hooks.register :pages, :post_render do |page|
# avoid to proccess js files
allowed_extensions = ['.md', '.markdown', '.htm', '.html']
if allowed_extensions.include?( page.ext )
external_links(page)
end
end
|
import './ScoreBadge.css'
import ScoreBadge from './ScoreBadge'
export default ScoreBadge
|
<?php
namespace SlackPHP\Tests\SlackAPI\Models\Methods;
use PHPUnit\Framework\TestCase;
use SlackPHP\SlackAPI\Exceptions\SlackException;
use SlackPHP\SlackAPI\Models\Methods\UsersList;
use SlackPHP\SlackAPI\Enumerators\Method;
/**
* @author Dzianis Zhaunerchyk <dzhaunerchyk@gmail.com>
* @author Zxurian
* @covers UsersList
*/
class UsersListTest extends TestCase
{
private $dummyBool = true;
/**
* Test for setting presence
*/
public function testSettingPresence()
{
$usersListObject = new UsersList();
$returnedObject = $usersListObject->setPresence($this->dummyBool);
$refUsersListObject = new \ReflectionObject($usersListObject);
$presenceProperty = $refUsersListObject->getProperty('presence');
$presenceProperty->setAccessible(true);
$this->assertInstanceOf(UsersList::class, $returnedObject);
$this->assertEquals($this->dummyBool, $presenceProperty->getValue($usersListObject));
}
/**
* Test for setting invalid presence
*/
public function testSettingInvalidPresence()
{
$this->expectException(\InvalidArgumentException::class);
$usersListObject = new UsersList();
$usersListObject->setPresence(null);
}
/**
* Test for getting presence
*/
public function testGetPresence()
{
$usersListObject = new UsersList();
$refUsersListObject = new \ReflectionObject($usersListObject);
$presenceProperty = $refUsersListObject->getProperty('presence');
$presenceProperty->setAccessible(true);
$presenceProperty->setValue($usersListObject, $this->dummyBool);
}
/**
* Test for getting response class
*/
public function testGetResponseClass()
{
$usersListObject = new UsersList();
$this->assertEquals(get_class($usersListObject).'Response', $usersListObject->getResponseClass());
}
/**
* Test for getting API method
*/
public function testGetMethod()
{
$usersListObject = new UsersList();
$this->assertEquals(Method::USERS_LIST(), $usersListObject->getMethod());
}
}
|
namespace FooCoin.Core.Validation
{
public class ValidationResult
{
public bool IsValid { get; set; }
public string Error { get; set; }
public ValidationResult(bool isValid, string error = null)
{
IsValid = isValid;
Error = error;
}
public static ValidationResult Valid() => new ValidationResult(isValid:true);
public static ValidationResult Invalid(string error) => new ValidationResult(isValid: false, error: error);
public static implicit operator bool(ValidationResult result) => result.IsValid;
public static implicit operator ValidationResult(bool isValid) => new ValidationResult(isValid);
}
}
|
#!/usr/bin/env ruby
def code_processor
File.readlines('./sample-txt/sample.txt').each do |line|
print "#{line.chomp} # => #{eval(line)}", "\n"
end
end
if __FILE__ == $0
code_processor
end
|
package org.vilvaadn.pubsubexample
import org.scalajs.dom
import japgolly.scalajs.react.extra.router._
import japgolly.scalajs.react.vdom.html_<^._
import SubscriberClient.WebSocketsApp
object SubscriberRouter {
sealed trait MenuItems
case object Home extends MenuItems
case object PubSubExample extends MenuItems
val routerConfig = RouterConfigDsl[MenuItems].buildConfig { dsl =>
import dsl._
(emptyRule
| staticRoute(root, Home) ~> render(<.h1("Welcome!"))
| staticRoute("#pubsubexample", PubSubExample) ~> renderR(r => WebSocketsApp())
).notFound(redirectToPage(Home)(Redirect.Replace))
}
/*
val baseUrl =
dom.window.location.hostname match {
case "localhost" | "127.0.0.1" | "0.0.0.0" =>
BaseUrl.fromWindowUrl(_.takeWhile(_ != '#'))
case _ =>
BaseUrl.fromWindowOrigin / "pubsub-example"
}
*/
val baseUrl = BaseUrl.fromWindowOrigin
val router = Router(baseUrl, routerConfig)
}
|
# TodoistAPI_JP
Todoist Developer APIの翻訳をしているところです。
| API | source | before | after | public |
| --- | --- | --- | --- | --- |
| REST API | [本家][original-rest-v1] | [翻訳前][trans-rest-v1-before] | [翻訳後][trans-rest-v1-after] | [公開版](REST_API/) |
| Sync API | [本家][original-sync-v8] | [翻訳前][trans-sync-v8-before] | [翻訳後][trans-sync-v8-after] | [公開版](Sync_API/) |
[original-rest-v1]:https://developer.todoist.com/rest/v1/
[trans-rest-v1-before]:translation_project/source/REST_API/
[trans-rest-v1-after]:translation_project/target/REST_API/
[original-sync-v8]:https://developer.todoist.com/sync/v8/
[trans-sync-v8-before]:translation_project/source/Sync_API/
[trans-sync-v8-after]:translation_project/target/Sync_API/
markdown形式でやろうかと思ってたんですが、
すべて張り付けたり抽出したりするのが面倒になってきたので、
とりあえず元のHTMLを分割してomegaTで翻訳しています。
gitを使える方はブランチを作ってから翻訳して、プルリクお願いします。
HTMLからmarkdownに変換する方法を知っている方いましたら、
教えていただけるか、変換したものをプルリクしてもらえるとありがたいです。
ご協力いただける方、募集中です。
|
# coding: utf-8
require "skull_and_crossbones"
module FormatterSupport
def new_example(metadata = {})
metadata = metadata.dup
result = RSpec::Core::Example::ExecutionResult.new
result.started_at = Time.now
finished_at = metadata.delete(:finished_at) { Time.now }
result.record_finished(metadata.delete(:status) { :passed }, finished_at)
instance_double(RSpec::Core::Example,
description: "Example",
full_description: "Example",
execution_result: result,
metadata: metadata)
end
def start_notification
RSpec::Core::Notifications::StartNotification.new(2)
end
def example_notification(metadata = {})
RSpec::Core::Notifications::ExampleNotification.for new_example(metadata)
end
end
RSpec.describe SkullAndCrossbones do
include FormatterSupport
let(:output) { StringIO.new }
let(:config) do
config = RSpec::Core::Configuration.new
config.output_stream = output
config
end
let(:formatter) do
config.add_formatter(described_class)
config.formatters.first
end
let(:reporter) do
formatter
config.reporter
end
before(:each) do
reporter.notify :start, start_notification
end
it "prints . for fast, passing examples" do
reporter.notify :example_passed, example_notification
expect(output.string).to eq(".")
end
it "prints ☠ for slow but passing examples" do
reporter.notify :example_passed,
example_notification(finished_at: Time.now + 100)
expect(output.string).to eq("☠")
end
it "prints an F for failing examples" do
reporter.notify :example_failed, example_notification(status: :failed)
expect(output.string).to eq("F")
end
it "prints a * for pending examples" do
reporter.notify :example_pending, example_notification
expect(output.string).to eq("*")
end
end
|
! include 'opkda2.f'
! include 'opkda1.f'
! include 'opkdmain.f'
module odepack
implicit none
real(kind=8), dimension(:), allocatable :: rwork
integer, dimension(:), allocatable :: iwork
integer :: jt, itol, iopt, itask, lrw, liw, istate, neq
real(kind=8) :: rtol,atol
real(kind=8) :: tsave
!$omp threadprivate(rwork,iwork,jt,itol,iopt,itask,lrw,liw,istate,neq,rtol,atol,tsave)
interface lsoda_init
module procedure lsoda_init
end interface
interface lsoda_wrapper
module procedure lsoda_wrapper
end interface
interface lsoda
subroutine dlsoda(f,neq,y,t,tout,itol,rtol,atol,itask, &
istate,iopt,rwork,lrw,iwork,liw,jac,jt)
external f, jac
integer, intent(in) :: neq, itol, itask, liw, lrw,jt,iopt
integer, intent(inout) :: istate
real(kind=8), intent(in) :: tout
real(kind=8), intent(in) :: atol,rtol
real(kind=8), intent(inout) :: t
real(kind=8), intent(inout), dimension(neq) :: y
integer, intent(inout), dimension(liw) :: iwork
real(kind=8), intent(inout), dimension(lrw) :: rwork
end subroutine dlsoda
! module procedure lsoda_basic
end interface
contains
subroutine lsoda_basic(f,y,t,oatol,ortol,jac,yout,mt,mxs,si, &
hmin,hmax)
external f, jac
real(kind=8), intent(inout), dimension(:) :: y
real(kind=8), intent(in), dimension(:) :: t
! real(kind=8), intent(out), dimension(size(t)) :: tarr
real(kind=8), intent(out), dimension(size(y),size(t)) :: &
yout
real(kind=8), intent(in) :: oatol, ortol
integer, intent(out), optional, dimension(4) :: si
integer, intent(in), optional :: mt,mxs
integer :: oiopt,oitask,ojt,oneq,npts,i
real(kind=8) :: tout, t0
real(kind=8), intent(in), optional :: hmin, hmax
character(len=500) :: errmsg
oneq=size(y)
npts=size(t)
ojt=1; oitask=1; oiopt=0
t0=t(1)
! write(6,*) 'init: ',y,npts,t
! write(6,*) 't0: ',t0,oatol,ortol,oneq
if(present(mt)) oitask=4
if(present(mxs)) oiopt=1
! write(6,*) 'yout: ',size(yout),y,t,oatol,ortol
call lsoda_init(y,t0,oatol,ortol, &
oiopt,oitask,ojt,oneq)
! iwork=0
! rwork=0d0
if(present(mt)) rwork(1)=maxval(t)
if(present(mxs)) then
iwork(6)=mxs
rwork(5:7)=0d0
iwork(5)=0
iwork(7:9)=0
endif
if(present(hmin)) rwork(7)=hmin
if(present(hmax)) rwork(6)=hmax
yout(:,1)=y
do i=2,npts
! write(6,*) 'sizet: ',size(t)
tout=t(i)
! write(6,*) 'tout: ',tout, size(y), y, jt, oneq
call lsoda_wrapper(f,y,tout,jac,errmsg)
! write(6,*) 'tout: ',y
! tarr(i)=tsave
! write(6,*) 'yout: ',i,size(yout(:,i)),size(y)
yout(:,i)=y
! write(6,*) 'tout: ',tout,size(y),y,(yout(1,i)-yout(1,i-1))
! & /(t(i)-t(i-1)),jt,oneq
! write(6,*) 'made it'
if(istate.lt.0) then
! error: set this pixel to zero and break
! write(6,*) 'istate lt 0 in lsoda: ',i,istate,errmsg
yout(:,:)=0d0
exit
endif
! istate=2 ! Reset istate in case of error
enddo
! Write some statistics:
! write(6,*) 'Last method switch: ',rwork(15)
! write(6,*) 'N_f: ',iwork(12)
! write(6,*) 'N_jac: ',iwork(13)
! write(6,*) 'Method: ',iwork(19)
if(present(si)) then
si(1)=iwork(12); si(2)=iwork(13); si(3)=iwork(19); si(4)=iwork(11)
endif
call lsoda_destroy()
end subroutine lsoda_basic
subroutine lsoda_wrapper(f,y,t,jac,errmsg)
external f, jac
real(kind=8), intent(inout), dimension(:) :: y
real(kind=8), intent(inout) :: t
character(len=500), optional, intent(out) :: errmsg
! write(6,*) 'lsoda: ',neq,y(1:2),tsave,t
! write(6,*) 'args: ',itol,rtol,atol,itask
! write(6,*) 'more: ',istate,iopt,lrw,liw,jt
! write(6,*) 'tsave: ',tsave, t, y
call lsoda(f,neq,y,tsave,t,itol,rtol,atol,itask, &
istate,iopt,rwork,lrw,iwork,liw,jac,jt)
! write(6,*) 'tsave: ',tsave-t
call lsoda_err(errmsg)
! write(6,*) errmsg
! istate=2 ! Reset istate to continue run
tsave=t
end subroutine lsoda_wrapper
subroutine lsoda_init(y,t,oatol,ortol, &
oiopt,oitask,ojt,oneq)
! external f, jac
real(kind=8), intent(inout), dimension(:) :: y
real(kind=8), intent(inout) :: t
integer, optional :: oiopt, oitask, ojt, oneq
! integer :: oiopt, oitask, ojt
! real(kind=8), intent(in), dimension(:) :: oatol, ortol
real(kind=8), intent(in) :: oatol, ortol
if (present(oneq)) then
neq=oneq
else
neq=size(y)
endif
if (present(oiopt)) then
iopt=oiopt
else
iopt=0
endif
if (present(oitask)) then
itask=oitask
else
itask=0
endif
if (present(ojt)) then
jt=ojt
else
jt=1
endif
! jt=ojt; itask=oitask; iopt=oiopt
istate=1
itol=1
liw=20+neq; lrw=max(20+16*neq,22+9*neq+neq*neq);
allocate(iwork(liw)); allocate(rwork(lrw))
rtol=ortol; atol=oatol
! write(6,*) 'init: ',neq,y(1:2),t
! write(6,*) 'args: ',itol,rtol,atol,itask
! write(6,*) 'more: ',istate,iopt,lrw,liw,jt
! call lsoda(f,neq,y,t,tout,itol,rtol,atol,itask, &
! istate,iopt,rwork,lrw,iwork,liw,jac,jt)
! call lsoda_err(errmsg); write(6,*) istate, errmsg
tsave=t
end subroutine lsoda_init
subroutine lsoda_err(errmsg)
character(len=500), optional, intent(out) :: errmsg
! Check for ISTATE errors:
if (present (errmsg)) then
SELECT CASE (istate)
CASE (1)
errmsg='LSODA not initialized before call'
CASE (2)
errmsg='Integration successful'
CASE (-1)
errmsg='Excessive work done this step'
CASE (-2)
errmsg='Too much accuracy requested'
CASE (-3)
errmsg='Illegal input detected'
CASE (-4)
errmsg='Inappropriate input or singularity'
CASE (-5)
errmsg='Repeated convergence test failures'
CASE (-6)
errmsg='Error weight became zero'
CASE (-7)
errmsg='Work arrays too small for new method'
END SELECT
else
select case (istate)
CASE (1)
write(6,*) 'LSODA not initialized before call'
CASE (2)
write(6,*) 'Integration successful'
CASE (-1)
write(6,*) 'Excessive work done this step'
CASE (-2)
write(6,*) 'Too much accuracy requested'
CASE (-3)
write(6,*) 'Illegal input detected'
CASE (-4)
write(6,*) 'Inappropriate input or singularity'
CASE (-5)
write(6,*) 'Repeated convergence test failures'
CASE (-6)
write(6,*) 'Error weight became zero'
CASE (-7)
write(6,*) 'Work arrays too small for new method'
end select
endif
end subroutine lsoda_err
subroutine lsoda_destroy()
deallocate(iwork)
deallocate(rwork)
! deallocate(rtol)
! deallocate(atol)
end subroutine lsoda_destroy
end module
|
(ns gimel.highlight
(:require [clojure.java.io :as io]
[net.cgrand.enlive-html :as enlive]))
(defn- highlight [node]
(let [code (->> node :content (apply str))
lang (->> node :attrs :class (apply str))]
(assoc-in node [:attrs :class]
(str "language-" lang
" line-numbers"))))
(defn highlight-code-blocks [page]
(enlive/sniptest page
[:pre :code] highlight))
|
package statistics;
import java.util.Arrays;
/**
* @author zhangxuepei
* @since 3.0
*/
public class AverageStatistics extends AbstractContainerStatistics {
public AverageStatistics(SumStatisticsStrategy sumStatisticsStrategy,
CountStatisticsStrategy countStatisticsStrategy) {
super(Arrays.asList(sumStatisticsStrategy, countStatisticsStrategy));
}
public AverageStatistics() {
CountStatisticsStrategy countStatisticsStrategy = new CountStatisticsStrategy();
SumStatisticsStrategy sumStatisticsStrategy = new SumStatisticsStrategy();
this.initDependencyStrategy(Arrays.asList(sumStatisticsStrategy, countStatisticsStrategy));
}
public Double getStatisticsResult() {
return getStatisticsStrategy(SumStatisticsStrategy.class).getStatisticsResult() /
getStatisticsStrategy(CountStatisticsStrategy.class).getStatisticsResult();
}
}
|
RSpec.describe ThreeScaleToolbox::Commands::RemoteCommand::RemoteAddSubcommand do
include_context :resources
include_context :temp_dir
context '#run' do
let(:config_file) { File.join(tmp_dir, '.3scalerc') }
let(:options) { { 'config-file': config_file } }
let(:arguments) { {} }
subject { described_class.new(options, arguments, nil) }
context 'remote name already exists' do
let(:config_file) { File.join(resources_path, 'valid_config_file.yaml') }
let(:arguments) do
{ remote_name: 'remote_1', remote_url: 'https://1@example.com' }
end
it 'raises error' do
expect { subject.run }.to raise_error(ThreeScaleToolbox::Error,
/remote name already exists/)
end
end
context 'remote url not valid' do
let(:arguments) do
{ remote_name: 'remote_1', remote_url: 'https://1@example.com' }
end
before :each do
stub_request(:get, /example.com/).to_return(status: 403,
body: 'Forbidden')
end
it 'raises error' do
expect { subject.run }.to raise_error(ThreeScaleToolbox::Error,
/remote not valid/)
end
end
context 'remote url is not http' do
let(:arguments) do
{ remote_name: 'remote_1', remote_url: 'some_name' }
end
it 'raises error' do
expect { subject.run }.to raise_error(ThreeScaleToolbox::InvalidUrlError)
end
end
context 'remote is valid' do
let(:config_file) { File.join(tmp_dir, 'valid_config_file.yaml') }
let(:arguments) do
{ remote_name: 'remote_new', remote_url: 'https://new@example.com' }
end
before :each do
# Config file is going to be updated.
# Config file will be a fresh copy of the source
FileUtils.cp(File.join(resources_path, 'valid_config_file.yaml'),
tmp_dir)
stub_request(:get, /example.com/).to_return(status: 200,
body: '{"accounts": []}')
end
let(:configuration) { ThreeScaleToolbox::Configuration.new(config_file) }
it 'new remote is stored in conf file' do
subject.run
expected_remote_value = { endpoint: 'https://example.com',
authentication: 'new' }
expect(configuration.data(:remotes)).to include('remote_new' => expected_remote_value)
end
it 'old remotes still in conf file' do
subject.run
1.upto(5) do |i|
expect(configuration.data(:remotes)).to include("remote_#{i}")
end
end
end
end
end
|
@extends('Admin.Layout.index')
@section('content')
<div class="mws-panel grid_8">
@if (count($errors) > 0)
<div class="mws-form-message error">
<ul>
@foreach ($errors->all() as $error)
<li>{{ $error }}</li>
@endforeach
</ul>
</div>
@endif
<div class="mws-panel-header text-center">
<span >版块添加</span>
</div>
<div class="mws-panel-body no-padding">
<form class="mws-form" action="/admin/cates" method="post" enctype="multipart/form-data">
{{ csrf_field() }}
<div class="mws-form-block">
<div class="mws-form-row">
<label class="mws-form-label">所属分类</label>
<div class="mws-form-item">
<select class="small" name="pid">
<option value="0">请选择分类</option>
@foreach($data as $k=>$v)
<option value="{{ $v->id }}" @if($v->id == $id) selected @endif >{{ $v->cname }} </option>
@endforeach
</select>
</div>
</div>
<div class="mws-form-row">
<label class="mws-form-label ">版块名称</label>
<div class="mws-form-item">
<input type="text" name="cname" class="small">
</div>
</div>
<div class="mws-form-row">
<label class="mws-form-label">版块图标</label>
<div class="mws-form-item" style="width:588px">
<input type="file" name="profile">
</div>
</div>
</div>
<div class="mws-button-row">
<input type="submit" value="添加" class="btn btn-success">
<input type="reset" value="重置" class="btn btn-info">
</div>
</form>
</div>
</div>
@endsection
|
package dk.jamiemagee.swissspoon.module2
import mu.KotlinLogging
import org.springframework.shell.standard.ShellComponent
import org.springframework.shell.standard.ShellMethod
import java.io.File
private val logger = KotlinLogging.logger {}
@ShellComponent
class Module2 {
@ShellMethod("Log some stuff to STDOUT")
fun logSomething(): Unit {
logger.info { "Aren't logs fun?" }
}
@ShellMethod("Load a file")
fun loadFile(file: File): String {
return file.readText()
}
}
|
use std::io;
use self::reader::LogReader;
use super::*;
pub struct LogIter {
pub config: Config,
pub segment_iter: Box<dyn Iterator<Item = (Lsn, LogId)>>,
pub segment_base: Option<LogId>,
pub max_lsn: Lsn,
pub cur_lsn: Lsn,
pub trailer: Option<Lsn>,
}
impl Iterator for LogIter {
type Item = (Lsn, DiskPtr, Vec<u8>);
fn next(&mut self) -> Option<Self::Item> {
// If segment is None, get next on segment_iter, panic
// if we can't read something we expect to be able to,
// return None if there are no more remaining segments.
loop {
let remaining_seg_too_small_for_msg = !valid_entry_offset(
self.cur_lsn as LogId,
self.config.io_buf_size,
);
if self.trailer.is_none()
&& remaining_seg_too_small_for_msg
{
// We've read to the end of a torn
// segment and should stop now.
trace!(
"trailer is none, ending iteration at {}",
self.max_lsn
);
return None;
} else if self.segment_base.is_none()
|| remaining_seg_too_small_for_msg
{
if let Some((next_lsn, next_lid)) =
self.segment_iter.next()
{
assert!(
next_lsn + (self.config.io_buf_size as Lsn) >= self.cur_lsn,
"caller is responsible for providing segments \
that contain the initial cur_lsn value or higher"
);
#[cfg(target_os = "linux")]
self.fadvise_willneed(next_lid);
if let Err(e) =
self.read_segment(next_lsn, next_lid)
{
debug!(
"hit snap while reading segments in \
iterator: {:?}",
e
);
return None;
}
} else {
trace!("no segments remaining to iterate over");
return None;
}
}
if self.cur_lsn > self.max_lsn {
// all done
trace!("hit max_lsn in iterator, stopping");
return None;
}
let lid = self.segment_base.unwrap()
+ (self.cur_lsn % self.config.io_buf_size as Lsn) as LogId;
let f = &self.config.file;
match f.read_message(lid, &self.config) {
Ok(LogRead::Blob(lsn, buf, blob_ptr)) => {
if lsn != self.cur_lsn {
debug!("read Flush with bad lsn");
return None;
}
trace!("read blob flush in LogIter::next");
self.cur_lsn +=
(MSG_HEADER_LEN + BLOB_INLINE_LEN) as Lsn;
return Some((
lsn,
DiskPtr::Blob(lid, blob_ptr),
buf,
));
}
Ok(LogRead::Inline(lsn, buf, on_disk_len)) => {
if lsn != self.cur_lsn {
debug!("read Flush with bad lsn");
return None;
}
trace!("read inline flush in LogIter::next");
self.cur_lsn +=
(MSG_HEADER_LEN + on_disk_len) as Lsn;
return Some((lsn, DiskPtr::Inline(lid), buf));
}
Ok(LogRead::Failed(lsn, on_disk_len)) => {
if lsn != self.cur_lsn {
debug!("read Failed with bad lsn");
return None;
}
trace!("read zeroed in LogIter::next");
self.cur_lsn +=
(MSG_HEADER_LEN + on_disk_len) as Lsn;
}
Ok(LogRead::Corrupted(_len)) => {
trace!("read corrupted msg in LogIter::next as lid {} lsn {}",
lid, self.cur_lsn);
return None;
}
Ok(LogRead::Pad(lsn)) => {
if lsn != self.cur_lsn {
debug!("read Pad with bad lsn");
return None;
}
if self.trailer.is_none() {
// This segment was torn, nothing left to read.
trace!("no segment trailer found, ending iteration");
return None;
}
self.segment_base.take();
self.trailer.take();
continue;
}
Ok(LogRead::DanglingBlob(lsn, blob_ptr)) => {
debug!(
"encountered dangling blob \
pointer at lsn {} ptr {}",
lsn, blob_ptr
);
self.cur_lsn +=
(MSG_HEADER_LEN + BLOB_INLINE_LEN) as Lsn;
continue;
}
Err(e) => {
debug!(
"failed to read log message at lid {} \
with expected lsn {} during iteration: {}",
lid, self.cur_lsn, e
);
return None;
}
}
}
}
}
impl LogIter {
/// read a segment of log messages. Only call after
/// pausing segment rewriting on the segment accountant!
fn read_segment(
&mut self,
lsn: Lsn,
offset: LogId,
) -> Result<(), ()> {
trace!(
"LogIter::read_segment lsn: {:?} cur_lsn: {:?}",
lsn,
self.cur_lsn
);
// we add segment_len to this check because we may be getting the
// initial segment that is a bit behind where we left off before.
assert!(lsn + self.config.io_buf_size as Lsn >= self.cur_lsn);
let f = &self.config.file;
let segment_header = f.read_segment_header(offset)?;
if offset % self.config.io_buf_size as LogId != 0 {
debug!("segment offset not divisible by segment length");
return Err(Error::Corruption {
at: DiskPtr::Inline(offset),
});
}
if segment_header.lsn % self.config.io_buf_size as Lsn != 0 {
debug!(
"expected a segment header lsn that is divisible \
by the io_buf_size ({}) instead it was {}",
self.config.io_buf_size, segment_header.lsn
);
return Err(Error::Corruption {
at: DiskPtr::Inline(offset),
});
}
if segment_header.lsn != lsn {
// this page was torn, nothing to read
debug!(
"segment header lsn ({}) != expected lsn ({})",
segment_header.lsn, lsn
);
return Err(io::Error::new(
io::ErrorKind::Other,
"encountered torn segment",
)
.into());
}
let trailer_offset = offset + self.config.io_buf_size as LogId
- SEG_TRAILER_LEN as LogId;
let trailer_lsn = segment_header.lsn
+ self.config.io_buf_size as Lsn
- SEG_TRAILER_LEN as Lsn;
trace!("trying to read trailer from {}", trailer_offset);
let segment_trailer = f.read_segment_trailer(trailer_offset);
trace!("read segment header {:?}", segment_header);
trace!("read segment trailer {:?}", segment_trailer);
let trailer_lsn = segment_trailer.ok().and_then(|st| {
if st.ok && st.lsn == trailer_lsn {
Some(st.lsn)
} else {
trace!("segment trailer corrupted, not reading next segment");
None
}
});
self.trailer = trailer_lsn;
self.cur_lsn = segment_header.lsn + SEG_HEADER_LEN as Lsn;
self.segment_base = Some(offset);
Ok(())
}
#[cfg(target_os = "linux")]
fn fadvise_willneed(&self, lid: LogId) {
use std::os::unix::io::AsRawFd;
let f = &self.config.file;
let ret = unsafe {
libc::posix_fadvise(
f.as_raw_fd(),
lid as libc::off_t,
self.config.io_buf_size as libc::off_t,
libc::POSIX_FADV_WILLNEED,
)
};
if ret != 0 {
panic!(
"failed to call fadvise: {}",
std::io::Error::from_raw_os_error(ret)
);
}
}
}
fn valid_entry_offset(lid: LogId, segment_len: usize) -> bool {
let seg_start = lid / segment_len as LogId * segment_len as LogId;
let max_lid = seg_start + segment_len as LogId
- SEG_TRAILER_LEN as LogId
- MSG_HEADER_LEN as LogId;
let min_lid = seg_start + SEG_HEADER_LEN as LogId;
lid >= min_lid && lid <= max_lid
}
|
<?php
function teste($a = "teste") {
echo "O valor de A é: $a <br>";
}
teste();
teste("asd");
function testando($b, $a = "x") {
echo "O valor de a é: $a e de b é: $b <br>";
}
testando("1");
testando("1", "2");
|
use dropshot::endpoint;
use dropshot::HttpError;
use dropshot::HttpResponseOk;
use dropshot::RequestContext;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use tokio::sync::Mutex;
use std::sync::Arc;
use crate::context::ConjurContext;
/** Information about the client making the request */
#[allow(dead_code)]
#[derive(Deserialize, Serialize, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct WhoAmI {
/** The account attribute of the client provided access token. */
account: String,
/** The username attribute of the provided access token. */
username: String,
// TODO: fill out with other fields
}
/** Provides information about the client making an API request. */
#[allow(unused_variables)]
#[endpoint {
method = GET,
path = "/whoami",
tags = ["status"]
}]
pub async fn who_am_i(
rqctx: Arc<RequestContext<Mutex<ConjurContext>>>,
) -> Result<HttpResponseOk<WhoAmI>, HttpError>
{
let api_context = rqctx.context().lock().await;
let bad_request_err = Err(HttpError::for_status(
None,
http::StatusCode::BAD_REQUEST,
));
if api_context.user.is_none() || api_context.account.is_none() {
// TODO: return correct error code
return bad_request_err;
}
let username = api_context.user.clone().unwrap();
let account = api_context.account.clone().unwrap();
Ok(HttpResponseOk(WhoAmI {
account,
username,
}))
}
|
# filename, = ARGV
filename = ARGV.first
prompt = "> "
txt = File.open(filename)
puts "Here's your file: #{filename}"
puts txt.read()
txt.close()
puts "I'll also ask you to type it again:"
print prompt
file_again = STDIN.gets.chomp()
txt_again = File.open(file_again)
puts txt_again.read()
txt_again.close()
|
from django.contrib import admin
from imager_images.models import Photo, Album
admin.site.register(Photo)
class AlbumInline(admin.TabularInline):
model = Album.photos.through
@admin.register(Album)
class AlbumAdmin(admin.ModelAdmin):
inlines = (AlbumInline,)
exclude = ('photos',)
|
/**
*
*/
$(function() {
var html = $(".error-page-errors").html();
var words = ["mathtabolism", "null", "exception"];
var regex = RegExp(words.join("|"), "gi");
$(".error-page-errors").html(html.replace(regex, "<strong>$&</strong>"))
});
|
import qs from 'qs'
import path from 'path'
import Express from 'express'
import React from 'react'
import { createStore } from 'redux'
import { Provider } from 'react-redux'
import { renderToString } from 'react-dom/server'
import counterApp from './reducers'
import App from './containers/App'
const app = Express()
const port = 3000
// Serve static files
app.use('./static', Express.static('static'))
const handleRender = (req, res) => {
// Read the counter from the request, if provided
const params = qs.parse(req.query)
const counter = parseInt(params.counter, 10) || 0
// Compile an initial state
let preloadedState = { counter }
// Create a new Redux store instance
const store = createStore(counterApp, preloadedState)
// Render the component to a string
const html = renderToString(
<Provider store={store}>
<App />
</Provider>
)
// Grab the initial state from our Redux store
const finalState = store.getState()
// Send the rendered page back to the client
res.send(renderFullPage(html, finalState))
}
// This is fried every time the sever side receives a request
app.use(handleRender)
// Inject initial component HTML and state into a template
//to be rendered on the client side
const renderFullPage = (html, preloadedState) => {
return `
<!DOCTYPE html>
<html>
<head>
<title>Redux Universal Example</title>
</head>
<body>
<div id="root">${html}</div>
<script>
// WARNING: See the following for security issues around embedding JSON in HTML:
// https://redux.js.org/recipes/server-rendering/#security-considerations
window.__PRELOADED_STATE__ = ${JSON.stringify(preloadedState).replace(
/</g,
'\\u003c'
)}
</script>
<script src="/static/bundle.js"></script>
</body>
</html>
`
}
app.listen(port)
|
import * as React from "react";
import { createUseStyles } from "react-jss";
import * as colors from "../constants/colorScheme.json";
import * as fonts from "../constants/fontFamily.json";
import PublishedDate from "./publishedDate";
const useStyles = createUseStyles({
root: {
display: "flex",
flexDirection: "row",
marginLeft: "400px",
transition: "100ms",
"&:hover": {
transform: "translate(-20px, -20px)",
},
},
card: {
flex: 1,
position: "relative",
background: colors.white,
"&:hover": {
background: "white",
boxShadow: `rgba(206, 143, 143, 0.5) 10px 10px`,
},
"& a": {
padding: "65px",
textDecoration: "none",
color: "inherit",
display: "block",
},
"& h1": {
fontFamily: fonts.header,
fontSize: "36px",
lineHeight: "24px",
color: colors.background,
marginBottom: "54px",
},
"& p": {
fontSize: "18px",
lineHeight: "22px",
width: "700px",
},
},
});
interface IBlogCardProps {
blog: {
title: string;
publishedDate: string;
url: string;
blurb: string;
type: string;
};
}
function BlogCard(props: IBlogCardProps) {
const classes = useStyles();
const { blog } = props;
return (
<section className={classes.root}>
<PublishedDate publisedDate={blog.publishedDate} />
<div className={classes.card}>
<a href={blog.url}>
<h1>{blog.title}</h1>
<p>{blog.blurb}</p>
</a>
</div>
</section>
);
}
export default BlogCard;
|
require 'openssl'
class String
def encrypt(key)
cipher = OpenSSL::Cipher::AES.new(128, :CBC).encrypt
cipher.key = key
cipher.update(self) + cipher.final
end
def decrypt(key)
cipher = OpenSSL::Cipher::AES.new(128, :CBC).decrypt
cipher.key = key
cipher.update(self) + cipher.final
end
end
keygen = OpenSSL::Cipher::AES.new(128, :CBC).encrypt
key = keygen.random_key
encrypted = "geheim".encrypt(key)
puts encrypted.decrypt(key) # => geheim
|
// Copyright 2017 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
import 'package:flutter/material.dart';
/// Background color to be used for the selected conversaion in the conversation
/// list.
final Color kSelectedBgColor = Color.lerp(Colors.white, Colors.blue[200], 0.2);
/// New chat conversation form: title
const String kNewChatFormTitle = 'New Chat';
/// New chat conversation form: hint text for the text field
const String kNewChatFormHintText = 'Enter email';
/// New chat conversation form: cancel button text
const String kNewChatFormCancel = 'CANCEL';
/// New chat conversation form: submit button text
const String kNewChatFormSubmit = 'OK';
|
/**
* Copyright (c) 2017-present, Justin Nguyen.
* All rights reserved.
*
* @author tuan3.nguyen@gmail.com
*
* @flow
* @format
*/
"use strict"
import React, { Component } from "react"
import { PropTypes } from "prop-types"
import { Image } from "react-native"
import { Header, Left, Body, Right, Button, Title, Subtitle, Label } from "native-base"
import Icon from "react-native-vector-icons/FontAwesome"
const styles = {
icon: {
color: "#fff",
fontSize: 20
},
headerText: {
color: "#fff",
fontSize: 14
}
}
export default class AppHeader extends Component {
render() {
// Loading content still visible and we do not want to have header during initiation time.
if (this.props.appStillLoading || this.props.bookingInProgress) return null
return (
<Header style={{ backgroundColor: "#FF5E3A" }}>
<Left style={{ flex: 1 }}>
<Button transparent>
<Icon name="bars" style={styles.icon} />
</Button>
</Left>
<Body style={{ flex: 3 }}>
<Title>
<Image resizeMode="contain" source={this.props.logo} />
</Title>
{this.props.driverOnTheWay ? (
<Subtitle style={styles.headerText}>Your driver on the way</Subtitle>
) : null}
</Body>
<Right style={{ flex: 1 }}>
<Button transparent>
<Icon name="gift" style={styles.icon} />
</Button>
</Right>
</Header>
)
}
}
AppHeader.propTypes = {
logo: PropTypes.number.isRequired,
driverOnTheWay: PropTypes.bool.isRequired,
appStillLoading: PropTypes.bool.isRequired,
bookingInProgress: PropTypes.bool.isRequired
}
|
const chalk = require('chalk');
const filesystem = require('./filesystem');
const staticData = require('./staticData');
async function buildPythonProject(filepath) {
// Generating a Python project
//
// 1) Create a blank init file
// 2) Create an opinionated setup.py file
// 3) TODO: Install pacakges?
// 4) Print success/errors, return success message
try {
console.log('Generating Python project...')
await filesystem.createFile(`${filepath}/__init__.py`, '');
await filesystem.createFile(`${filepath}/setup.py`, staticData.setupPy);
// TODO: Install packages from setup.py here? Tricky with different invocations of Python per OS
const message = 'Python project built successfully!'
console.log(chalk.green(message));
return message
} catch (error) {
console.log(chalk.red(error))
return error
}
}
module.exports = {
buildPythonProject,
}
|
# Model: MutedUser
class Bot::Models::MutedUser < Sequel::Model
unrestrict_primary_key
def time_left
mute_end - Time.now
end
def mute_length
mute_end - mute_start
end
end
|
fitstats
========
A web service that exposes fitbit data to Panic's Status Board
|
import 'package:flutter/services.dart';
import 'package:alga/constants/import_helper.dart';
import 'package:alga/tools/formatters/formatter_abstract.dart';
import 'package:alga/utils/snackbar_util.dart';
class FormatterView extends StatefulWidget {
final Widget title;
final List<Widget> configs;
final FormatResult Function(String text) onChanged;
final String lang;
const FormatterView(
{Key? key,
required this.title,
required this.configs,
required this.onChanged,
required this.lang})
: super(key: key);
@override
State<FormatterView> createState() => FormatterViewState();
}
class FormatterViewState extends State<FormatterView> {
final _inputController = TextEditingController();
String outputText = '';
@override
void dispose() {
_inputController.dispose();
super.dispose();
}
@override
Widget build(BuildContext context) {
return ToolView(
title: widget.title,
content: ToolbarView(
configs: widget.configs,
inputWidget: LangTextField(
lang: widget.lang,
minLines: 80,
maxLines: 100,
controller: _inputController,
onChanged: (text) {
outputText = widget.onChanged(text).result;
setState(() {});
},
),
outputWidget: AppTextBox(
lang: widget.lang,
minLines: 80,
maxLines: 100,
data: outputText,
),
inputActions: [
IconButton(
icon: const Icon(Icons.paste),
onPressed: () async {
final rawText = await Clipboard.getData('text/plain');
_inputController.text = rawText?.text ?? '';
outputText = widget.onChanged(_inputController.text).result;
setState(() {});
if (mounted) SnackbarUtil(context).pasted();
},
),
IconButton(
icon: const Icon(Icons.clear),
onPressed: () {
_inputController.clear();
outputText = '';
setState(() {});
},
),
],
outputActions: [
IconButton(
icon: const Icon(Icons.copy),
onPressed: () async {
await Clipboard.setData(ClipboardData(text: outputText));
if (mounted) SnackbarUtil(context).copied();
},
),
],
),
);
}
}
|
{-# LANGUAGE ExistentialQuantification, Rank2Types, FunctionalDependencies, FlexibleInstances, FlexibleContexts, PatternGuards, ScopedTypeVariables #-}
-----------------------------------------------------------------------------
-- |
-- Module : XMonad.Layout.MultiToggle
-- Description : Dynamically apply and unapply transformers to your window layout.
-- Copyright : (c) Lukas Mai
-- License : BSD-style (see LICENSE)
--
-- Maintainer : <l.mai@web.de>
-- Stability : unstable
-- Portability : unportable
--
-- Dynamically apply and unapply transformers to your window layout. This can
-- be used to rotate your window layout by 90 degrees, or to make the
-- currently focused window occupy the whole screen (\"zoom in\") then undo
-- the transformation (\"zoom out\").
module XMonad.Layout.MultiToggle (
-- * Usage
-- $usage
Transformer(..),
Toggle(..),
(??),
EOT(..),
single,
mkToggle,
mkToggle1,
isToggleActive,
HList,
HCons,
MultiToggle,
) where
import XMonad
import XMonad.Prelude hiding (find)
import XMonad.StackSet (Workspace(..))
import Control.Arrow
import Data.IORef
import Data.Typeable
-- $usage
-- The basic idea is to have a base layout and a set of layout transformers,
-- of which at most one is active at any time. Enabling another transformer
-- first disables any currently active transformer; i.e. it works like a
-- group of radio buttons.
--
-- To use this module, you need some data types which represent
-- transformers; for some commonly used transformers (including
-- MIRROR, NOBORDERS, and FULL used in the examples below) you can
-- simply import "XMonad.Layout.MultiToggle.Instances".
--
-- Somewhere else in your file you probably have a definition of @layout@;
-- the default looks like this:
--
-- > layout = tiled ||| Mirror tiled ||| Full
--
-- After changing this to
--
-- > layout = mkToggle (single MIRROR) (tiled ||| Full)
--
-- you can now dynamically apply the 'XMonad.Layout.Mirror' transformation:
--
-- > ...
-- > , ((modm, xK_x ), sendMessage $ Toggle MIRROR)
-- > ...
--
-- (That should be part of your key bindings.) When you press @mod-x@, the
-- active layout is mirrored. Another @mod-x@ and it's back to normal.
--
-- It's also possible to stack @MultiToggle@s. For example:
--
-- @
-- layout = id
-- . 'XMonad.Layout.NoBorders.smartBorders'
-- . mkToggle (NOBORDERS ?? FULL ?? EOT)
-- . mkToggle (single MIRROR)
-- $ tiled ||| 'XMonad.Layout.Grid.Grid' ||| 'XMonad.Layout.Circle.Circle'
-- @
--
-- By binding a key to @(sendMessage $ Toggle FULL)@ you can temporarily
-- maximize windows, in addition to being able to rotate layouts and remove
-- window borders.
--
-- You can also define your own transformers by creating a data type
-- which is an instance of the 'Transformer' class. For example, here
-- is the definition of @MIRROR@:
--
-- > data MIRROR = MIRROR deriving (Read, Show, Eq)
-- > instance Transformer MIRROR Window where
-- > transform _ x k = k (Mirror x) (\(Mirror x') -> x')
--
-- Note, you need to put @{-\# LANGUAGE
-- TypeSynonymInstances, MultiParamTypeClasses \#-}@ at the
-- beginning of your file.
-- | A class to identify custom transformers (and look up transforming
-- functions by type).
class (Eq t, Typeable t) => Transformer t a | t -> a where
transform :: (LayoutClass l a) => t -> l a ->
(forall l'. (LayoutClass l' a) => l' a -> (l' a -> l a) -> b) -> b
data EL l a = forall l'. (LayoutClass l' a) => EL (l' a) (l' a -> l a)
unEL :: (LayoutClass l a) => EL l a -> (forall l'. (LayoutClass l' a) => l' a -> b) -> b
unEL (EL x _) k = k x
deEL :: (LayoutClass l a) => EL l a -> l a
deEL (EL x det) = det x
transform' :: (Transformer t a, LayoutClass l a) => t -> EL l a -> EL l a
transform' t (EL l det) = transform t l (\l' det' -> EL l' (det . det'))
-- | Toggle the specified layout transformer.
data Toggle a = forall t. (Transformer t a) => Toggle t
instance (Typeable a) => Message (Toggle a)
data MultiToggleS ts l a = MultiToggleS (l a) (Maybe Int) ts
deriving (Read, Show)
data MultiToggle ts l a = MultiToggle{
currLayout :: EL l a,
currIndex :: Maybe Int,
transformers :: ts
}
expand :: (LayoutClass l a, HList ts a) => MultiToggleS ts l a -> MultiToggle ts l a
expand (MultiToggleS b i ts) =
resolve ts (fromMaybe (-1) i) id
(\x mt ->
let g = transform' x in mt{ currLayout = g $ currLayout mt }
)
(MultiToggle (EL b id) i ts)
collapse :: (LayoutClass l a) => MultiToggle ts l a -> MultiToggleS ts l a
collapse mt = MultiToggleS (deEL (currLayout mt)) (currIndex mt) (transformers mt)
instance (LayoutClass l a, Read (l a), HList ts a, Read ts) => Read (MultiToggle ts l a) where
readsPrec p s = map (first expand) $ readsPrec p s
instance (Show ts, Show (l a), LayoutClass l a) => Show (MultiToggle ts l a) where
showsPrec p = showsPrec p . collapse
-- | Construct a @MultiToggle@ layout from a transformer table and a base
-- layout.
mkToggle :: (LayoutClass l a) => ts -> l a -> MultiToggle ts l a
mkToggle ts l = MultiToggle (EL l id) Nothing ts
-- | Construct a @MultiToggle@ layout from a single transformer and a base
-- layout.
mkToggle1 :: (LayoutClass l a) => t -> l a -> MultiToggle (HCons t EOT) l a
mkToggle1 t = mkToggle (single t)
-- | Marks the end of a transformer list.
data EOT = EOT deriving (Read, Show)
data HCons a b = HCons a b deriving (Read, Show)
infixr 0 ??
-- | Prepend an element to a heterogeneous list. Used to build transformer
-- tables for 'mkToggle'.
(??) :: a -> b -> HCons a b
(??) = HCons
-- | Construct a singleton transformer table.
single :: a -> HCons a EOT
single = (?? EOT)
class HList c a where
find :: (Transformer t a) => c -> t -> Maybe Int
resolve :: c -> Int -> b -> (forall t. (Transformer t a) => t -> b) -> b
instance HList EOT w where
find EOT _ = Nothing
resolve EOT _ d _ = d
instance (Transformer a w, HList b w) => HList (HCons a b) w where
find (HCons x xs) t
| t `geq` x = Just 0
| otherwise = fmap succ (find xs t)
resolve (HCons x xs) n d k =
case n `compare` 0 of
LT -> d
EQ -> k x
GT -> resolve xs (pred n) d k
geq :: (Typeable a, Eq a, Typeable b) => a -> b -> Bool
geq a b = Just a == cast b
instance (Typeable a, Show ts, Typeable ts, HList ts a, LayoutClass l a) => LayoutClass (MultiToggle ts l) a where
description mt = currLayout mt `unEL` \l -> description l
runLayout (Workspace i mt s) r = case currLayout mt of
EL l det -> (fmap . fmap $ (\x -> mt { currLayout = EL x det })) <$>
runLayout (Workspace i l s) r
handleMessage mt m
| Just (Toggle t) <- fromMessage m
, i@(Just _) <- find (transformers mt) t
= case currLayout mt of
EL l det -> do
l' <- fromMaybe l <$> handleMessage l (SomeMessage ReleaseResources)
return . Just $
mt {
currLayout = (if cur then id else transform' t) (EL (det l') id),
currIndex = if cur then Nothing else i
}
where cur = i == currIndex mt
| Just (MultiToggleActiveQueryMessage t ref :: MultiToggleActiveQueryMessage a) <- fromMessage m
, i@(Just _) <- find (transformers mt) t
= Nothing <$ io (writeIORef ref (Just (i == currIndex mt)))
| otherwise
= case currLayout mt of
EL l det -> fmap (\x -> mt { currLayout = EL x det }) <$>
handleMessage l m
data MultiToggleActiveQueryMessage a = forall t. (Transformer t a) =>
MultiToggleActiveQueryMessage t (IORef (Maybe Bool))
instance (Typeable a) => Message (MultiToggleActiveQueryMessage a)
-- | Query the state of a 'Transformer' on a given workspace.
--
-- To query the current workspace, use something like this:
--
-- > withWindowSet (isToggleActive t . W.workspace . W.current)
isToggleActive :: Transformer t Window => t -> WindowSpace -> X (Maybe Bool)
isToggleActive t w = do
ref <- io $ newIORef Nothing
sendMessageWithNoRefresh (MultiToggleActiveQueryMessage t ref) w
io $ readIORef ref
|
# gitserver
A quick way to set up an hardened gitserver in just about 5 simple commands.
The quick start guide can be found on (for experienced git and ssh users): http://jurrianfahner.github.io/gitserver/
Detailed documentation can be found on: http://gitserver.readthedocs.org/
|
# Development commands and notes
## Setup dev environment
```bash
apt update && \
apt install -y python3 python3-pip python3-apt python3-dev git build-essential bash-completion systemctl; \
source /usr/share/bash-completion/bash_completion && \
git clone git@github.com:shokinn/emby-updater.git && \
cd emby-updater && \
read -p 'git email for pushing: ' gitmail && \
git config user.email "$gitmail" && \
read -p 'git username for pushing: ' gituser && \
git config user.name "$gituser" && \
pip3 install --user -r requirements.txt && \
pip3 install --user setuptools && \
pip3 install --user pyinstaller && \
pip3 install --user wheel && \
pip3 install --user twine && \
pip3 install --user bumpversion && \
export PATH=$HOME/.local/bin:$PATH
```
## Release new version
To publish a release use bumpversion. This will update the `version.py` and tag the commit.
Travis will then push the new release to [PyPi](https://pypi.python.org/pypi/emby-updater).
```bash
./release.sh <patch|minor|major>
```
After building the binary it has to be released manually on github.
## Manual steps
These steps assumes that you are in the cloned directory
```bash
bumpversion --tag --commit <patch|minor|major>
git add .bumpversion.cfg
git add embyupdater/version.py
git commit --amend --no-edit
#git commit -m "v$(cat .bumpversion.cfg|grep current_version|tr -d ' '|cut -f 2 -d '=')""
git push --tag
python3 setup.py sdist bdist_wheel
twine upload dist/*
pyinstaller --clean --onefile --name emby-updater --distpath pyindist --workpath pyinbuild embyupdater/__main__.py
```
After building the binary it has to be released manually on github.
|
//#SECTION meta
export interface SongMeta {
url: string;
path: string;
meta: {
title: string;
fullTitle: string;
artists: string;
primaryArtist: {
name: string;
url: string;
},
},
resources: {
thumbnail: string;
image: string;
},
lyricsState: string;
id: number;
}
//#SECTION server
export type ResponseType = "serverError" | "clientError" | "success";
export type ResponseFormat = "json" | "xml";
|
package com.github.songjiang951130.leetcode.backtrack;
import org.junit.Test;
public class ParenthesiTest {
Parenthesi parenthesi = new Parenthesi();
@Test
public void generateParenthesis() {
parenthesi.generateParenthesis(2);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.