text
stringlengths 27
775k
|
|---|
package com.github.asalimonov.hay.core.metrics
trait ResetableMetric {
def Reset(): Unit
}
|
import React from 'react';
import {shallow} from 'enzyme'
import toJson from 'enzyme-to-json';
import {ConfirmNewFolderContainer} from "./ConfirmNewFolderContainer";
describe('ConfirmNewFolderContainer', () => {
it('match snapshot', () => {
expect(toJson(shallow(<ConfirmNewFolderContainer/>))).toMatchSnapshot();
});
});
|
<?php
namespace CarbonClean\Sanitizer\Tests;
use CarbonClean\Sanitizer\Tests\Fixtures\Filters\CustomFilter;
use CarbonClean\Sanitizer\Laravel\Factory;
use PHPUnit\Framework\TestCase;
class FactoryTest extends TestCase
{
public function test_custom_closure_filter()
{
$factory = new Factory;
$factory->extend('hash', function ($value) {
return sha1($value);
});
$data = [
'name' => 'TEST',
];
$filters = [
'name' => 'hash',
];
$newData = $factory->make($data, $filters)->sanitize();
$this->assertEquals(sha1('TEST'), $newData['name']);
}
public function test_custom_class_filter()
{
$factory = new Factory;
$factory->extend('custom', CustomFilter::class);
$data = [
'name' => 'TEST',
];
$filters = [
'name' => 'custom',
];
$newData = $factory->make($data, $filters)->sanitize();
$this->assertEquals('TESTTEST', $newData['name']);
}
public function test_replace_filter()
{
$factory = new Factory;
$factory->extend('trim', function ($value) {
return sha1($value);
});
$data = [
'name' => 'TEST',
];
$filters = [
'name' => 'trim',
];
$newData = $factory->make($data, $filters)->sanitize();
$this->assertEquals(sha1('TEST'), $newData['name']);
}
}
|
# canon
Using something like the WHERE part of an SQL query to evaluate an array like thing is true or false.
# Basic Usage
```php
use EUAutomation\Canon\Processor;
$processor = new Processor();
$expressions = $processor->process('foo = "bar"');
$expressions->evaluate([ "foo" => "bar" ]); // true
```
# Supports
- Basic Operators (=, !=, >, >=, <, <=) `foo > 9000`
- LIKE Operator `foo LIKE "shoes%"`
- LIKE ANY Operator `foo LIKE ANY ("shoes%", "socks%")`
- IN Operator `foo IN (1, 2)`
- Nested Columns Refs `foo.bar = 'baz'`
- Brackets `foo = 2 AND (bar = 3 OR baz < 10)`
# Acknowledgments
- https://github.com/greenlion/PHP-SQL-Parser - lots of the parser code is from here
|
import React, { Component } from 'react';
import { Button } from 'antd';
import bindLifecycle from '../../es/utils/bindLifecycle';
@bindLifecycle
export default class Index extends Component {
constructor(props) {
super(props);
}
render() {
return (
<div>
<h2>Home</h2>
<Button type="primary">
123123
</Button>
</div>
);
}
}
|
<?php
return [
/*
|--------------------------------------------------------------------------
| App Language Lines
|--------------------------------------------------------------------------
|
*/
'settings.system' => 'Rendszer',
'settings.appearance' => 'Megjelenés',
'settings.miscellaneous' => 'Miscellaneous',
'settings.support' => 'Támogatás',
'settings.donate' => 'Adomány',
'settings.version' => 'Verzió',
'settings.background_image' => 'Háttérkép',
'settings.window_target' => 'Link megnyitása',
'settings.window_target.current' => 'Ezen a lapon',
'settings.window_target.one' => 'Azonos lapon',
'settings.window_target.new' => 'Új lapon',
'settings.homepage_search' => 'Kezdő oldal kereső',
'settings.search_provider' => 'Alapértelmezett kereső motor ',
'settings.language' => 'Nyelv',
'settings.reset' => 'Alapértelmezetek visszaállítása',
'settings.remove' => 'Eltávolítás',
'settings.search' => 'keresés',
'settings.no_items' => 'Nincs találat',
'settings.label' => 'Címke',
'settings.value' => 'Érték',
'settings.edit' => 'Módosítás',
'settings.view' => 'Megtekintés',
'options.none' => '- nincs beállítva -',
'options.google' => 'Google',
'options.ddg' => 'DuckDuckGo',
'options.bing' => 'Bing',
'options.qwant' => 'Qwant',
'options.startpage' => 'Kezdő oldal',
'options.yes' => 'Igen',
'options.no' => 'Nem',
'options.nzbhydra' => 'NZBHydra',
'options.jackett' => 'Jackett',
'buttons.save' => 'Mentás',
'buttons.cancel' => 'Mégsem',
'buttons.add' => 'Hozzáadás',
'buttons.upload' => 'Ikon feltöltése',
'buttons.downloadapps' => 'Alkalmazás lista frissítése',
'dash.pin_item' => 'Kitőzés kezdő képernyőre',
'dash.no_apps' => 'Jelenleg nincsenek kitűzött alkalmazások, :link1 or :link2',
'dash.link1' => 'Alkalmazás hozzáadása',
'dash.link2' => 'Elem kitűzése kezdőképernyőre',
'dash.pinned_items' => 'Kitűzött elemek',
'apps.app_list' => 'Alkalmazás lista',
'apps.view_trash' => 'Törölt elemek megtekintése',
'apps.add_application' => 'Alkalmazás hozzáadása',
'apps.application_name' => 'Alkalmazás neve',
'apps.colour' => 'Szín',
'apps.icon' => 'Ikon',
'apps.pinned' => 'Kitűzve',
'apps.title' => 'Cím',
'apps.hex' => 'Hexa színkód',
'apps.username' => 'Felhasználói név',
'apps.password' => 'Jelszó',
'apps.config' => 'Konfiguráció',
'apps.apikey' => 'API Kulcs',
'apps.enable' => 'Engedélyezés',
'apps.tag_list' => 'Címke lista',
'apps.add_tag' => 'Címke hozzáadása',
'apps.tag_name' => 'Címke név',
'apps.tags' => 'Címke',
'apps.override' => 'Eltérő URL alkaplazása.',
'apps.preview' => 'Előnézet',
'apps.apptype' => 'Alkalmazás Típus',
'dashboard' => 'Kezdőképernyő',
'user.user_list' => 'Felhasználük',
'user.add_user' => 'Felhasználó hozzáadása',
'user.username' => 'Felhasználói név',
'user.avatar' => 'Avatar',
'user.email' => 'Email',
'user.password_confirm' => 'Jelszó megerősítés',
'user.secure_front' => 'Nyilvános hozzáférés engedélyezése - Csak jelszó alkalmazása esetén.',
'user.autologin' => 'Bejelentkezés URL-el, Link birtokában bárki bejelentkezhet.',
'url' => 'URL',
'title' => 'Cím',
'delete' => 'Törlés',
'optional' => 'Opcionális',
'restore' => 'Visszaállítás',
'alert.success.item_created' => 'Elem sikeresen létrehozva',
'alert.success.item_updated' => 'Elem sikeresen frissítve',
'alert.success.item_deleted' => 'Elem sikeresen törölve',
'alert.success.item_restored' => 'Elem sikeresen visszaállítva',
'alert.success.updating' => 'Alkalmazás lista frissítése',
'alert.success.tag_created' => 'Címkre sikeresen létrehozva',
'alert.success.tag_updated' => 'Címke sikeresen frissítve',
'alert.success.tag_deleted' => 'Címke sikeresen törölve',
'alert.success.tag_restored' => 'Címke sikeresen visszaállítva',
'alert.success.setting_updated' => 'Sikeres beállítás módosítás.',
'alert.error.not_exist' => 'Nem létező beállítás',
'alert.success.user_created' => 'Felhasználó sikeresen létrehozva',
'alert.success.user_updated' => 'Felhasználó sikeresen frissítve',
'alert.success.user_deleted' => 'Felhasználó sikeresen törölve',
'alert.success.user_restored' => 'Felhasználó sikeresen visszaállítva',
];
|
package be.mickverm.widgets.sample.recyclerview.ui.items
import android.view.View
import android.view.ViewGroup
import android.widget.TextView
import androidx.recyclerview.widget.RecyclerView
import be.mickverm.widget.recyclerview.adapter.RxDiffUtilAdapter
import be.mickverm.widgets.sample.R
import be.mickverm.widgets.sample.recyclerview.data.models.Item
class ItemsAdapter : RxDiffUtilAdapter<Item, ItemsAdapter.ViewHolder>() {
override fun onCreateViewHolder(parent: ViewGroup, viewType: Int) =
ViewHolder(inflateLayout(R.layout.item_item, parent))
override fun onBindViewHolder(holder: ViewHolder, position: Int, item: Item) =
holder.bind(item)
class ViewHolder(itemView: View) : RecyclerView.ViewHolder(itemView) {
private val textView: TextView = itemView.findViewById(R.id.text)
fun bind(item: Item) = with(item) {
itemView.setBackgroundColor(color)
textView.text = text
}
}
}
|
package org.aiotrade.lib.trading
import java.util.Date
import java.util.logging.Logger
import org.aiotrade.lib.securities.model.Sec
import org.aiotrade.lib.util.actors.Publisher
/**
* quantity should always be >= 0
*/
final case class Order(account: TradableAccount, sec: Sec, price: Double, var quantity: Double, side: OrderSide, tpe: OrderType = OrderType.Market, var funds: Double = Double.NaN) extends Publisher {
private val log = Logger.getLogger(this.getClass.getName)
private var _id: Long = _
private var _time: Long = 0
private var _expireTime: Long = Long.MinValue
private var _stopPrice: Double = Double.NaN
private var _validity: OrderValidity = _
private var _reference: String = _
// --- executing related
private var _filledQuantity: Double = _
private var _averagePrice: Double = _
private var _status: OrderStatus = OrderStatus.New
private var _message: String = _
def id = _id
def id_=(id: Long) {
this._id = id
}
def time = _time
def time_=(time: Long) {
this._time = time
}
def stopPrice = _stopPrice
def stopPrice_=(stopPrice: Double) {
this._stopPrice = stopPrice
}
def validity = _validity
def validity_=(validity: OrderValidity) {
this._validity = validity
}
def expireTime = _expireTime
def expireTime_=(time: Long) {
this._expireTime = time
}
def reference = _reference
def reference_=(reference: String) {
this._reference = reference
}
// --- executing related
def status = _status
def status_=(status: OrderStatus) {
if (_status != status) {
val oldValue = _status
_status = status
publish(OrderEvent.StatusChanged(this, oldValue, status))
if (status == OrderStatus.Filled) {
publish(OrderEvent.Completed(this))
}
}
}
def remainQuantity = quantity - _filledQuantity
def filledQuantity = _filledQuantity
def filledQuantity_=(filledQuantity: Double) {
val oldValue = _filledQuantity
if (filledQuantity != Long.MinValue && filledQuantity != _filledQuantity) {
_filledQuantity = filledQuantity
publish(OrderEvent.FilledQuantityChanged(this, oldValue, filledQuantity))
}
}
def averagePrice = _averagePrice
def averagePrice_=(averagePrice: Double) {
val oldValue = _averagePrice
if (averagePrice != Double.NaN && averagePrice != _averagePrice) {
_averagePrice = averagePrice
publish(OrderEvent.AveragePriceChanged(this, oldValue, averagePrice))
}
}
def message = _message
def message_=(message: String) {
_message = message
}
/**
* Fill order by price and quantity
*/
def fill(price: Double, quantity: Double) {
if (quantity > 0) {
var oldTotalAmount = _filledQuantity * _averagePrice
_filledQuantity += quantity
_averagePrice = (oldTotalAmount + price * quantity) / _filledQuantity
status = if (remainQuantity <= 0) {
OrderStatus.Filled
} else {
// if quantity is NaN, (remainQuantity <= 0) is always false @Todo how to deal with order in this case.
OrderStatus.Partial
}
log.info("Filling order with price=%1$ 5.2f, quantity=%2$ 5.2f. After filled %3$s".format(price, quantity, this))
} else {
log.warning("Filling quantity <= 0: fillingPrice=%s, fillingQuantity=%s, remainQuantity=%s".format(price, quantity, remainQuantity))
}
}
override
def toString = {
"Order(time=%1$tY.%1$tm.%1$td, sec=%2$s, tpe=%3$s, side=%4$s, quantity(filled)=%5$s(%6$s), price=%7$ 5.2f, funds=%8$ 5.2f, status=%9$s, stopPrice=%10$ 5.2f, validity=%11$s, expiration=%12$s, refrence=%13$s)".format(
new Date(time), sec.uniSymbol, tpe, side, quantity, _filledQuantity, price, funds, status, stopPrice, validity, expireTime, reference
)
}
}
/**
* @param name
* @param +/- quantity
* @param is to open or close a position
*/
sealed abstract class OrderSide(val name: String, val signum: Int, val isOpening: Boolean)
object OrderSide {
final case object Buy extends OrderSide("Buy", 1, true)
final case object BuyCover extends OrderSide("BuyCover", 1, false)
final case object Sell extends OrderSide("Sell", -1, false)
final case object SellShort extends OrderSide("SellShort", -1, true)
}
sealed abstract class OrderType(val name: String)
object OrderType {
final case object Market extends OrderType("Market")
final case object Limit extends OrderType("Limit")
final case object Stop extends OrderType("Stop")
final case object StopLimit extends OrderType("StopLimit")
}
sealed abstract class OrderValidity(val name: String)
object OrderValidity {
final case object Day extends OrderValidity("Day")
final case object ImmediateOrCancel extends OrderValidity("ImmediateOrCancel")
final case object AtOpening extends OrderValidity("AtOpening")
final case object AtClosing extends OrderValidity("AtClosing")
final case object GoodTillCancel extends OrderValidity("GoodTillCancel")
final case object GoodTillDate extends OrderValidity("GoodTillDate")
}
sealed abstract class OrderStatus(val name: String)
object OrderStatus {
final case object New extends OrderStatus("New")
final case object PendingNew extends OrderStatus("PendingNew")
final case object Partial extends OrderStatus("Partial")
final case object Filled extends OrderStatus("Filled")
final case object Canceled extends OrderStatus("Canceled")
final case object Rejected extends OrderStatus("Rejected")
final case object PendingCancel extends OrderStatus("PendingCancel")
final case object Expired extends OrderStatus("Expired")
}
sealed trait OrderEvent {
def order: Order
}
object OrderEvent {
final case class Completed(order: Order) extends OrderEvent
final case class IdChanged(order: Order, oldValue: String, value: String) extends OrderEvent
final case class StatusChanged(order: Order, oldValue: OrderStatus, value: OrderStatus) extends OrderEvent
final case class FilledQuantityChanged(order: Order, oldValue: Double, value: Double) extends OrderEvent
final case class AveragePriceChanged(order: Order, oldValue: Double, value: Double) extends OrderEvent
}
|
using System.Data;
namespace PeanutButter.FluentMigrator.Fakes
{
internal class FakeDbConnection: IDbConnection
{
private ConnectionState _state;
public void Dispose()
{
/* intentionally left blank */
}
public IDbTransaction BeginTransaction()
{
return BeginTransaction(IsolationLevel.ReadUncommitted);
}
public IDbTransaction BeginTransaction(IsolationLevel il)
{
return new FakeDbTransaction(this, il);
}
public void Close()
{
_state = ConnectionState.Closed;
}
public void ChangeDatabase(string databaseName)
{
/* intentionally left blank */
}
public IDbCommand CreateCommand()
{
return new FakeDbCommand()
{
Connection = this
};
}
public void Open()
{
_state = ConnectionState.Open;
}
public string ConnectionString { get; set; }
public int ConnectionTimeout { get; set; } = 30;
public string Database { get; set; }
public ConnectionState State => _state;
}
}
|
package org.jenkins.ci.plugins.jenkinslint.check;
import hudson.model.Item;
import hudson.model.Project;
import hudson.tasks.Builder;
import jenkins.model.Jenkins;
import org.jenkins.ci.plugins.jenkinslint.model.AbstractCheck;
import java.util.List;
import java.util.logging.Level;
/**
* @author Victor Martinez
*/
public class GradleWrapperChecker extends AbstractCheck {
public GradleWrapperChecker(boolean enabled) {
super(enabled);
this.setDescription(Messages.GradleWrapperCheckerDesc());
this.setSeverity(Messages.GradleWrapperCheckerSeverity());
}
public boolean executeCheck(Item item) {
boolean found = false;
if (Jenkins.getInstance().pluginManager.getPlugin("gradle") != null) {
if (item.getClass().getSimpleName().equals("MavenModuleSet")) {
try {
Object getPrebuilders = item.getClass().getMethod("getPrebuilders", null).invoke(item);
if (getPrebuilders instanceof List) {
found = isGradlew((List) getPrebuilders);
}
}catch (Exception e) {
LOG.log(Level.WARNING, "Exception " + e.getMessage(), e.getCause());
}
}
if (item instanceof Project) {
found = isGradlew(((Project) item).getBuilders());
}
if (item.getClass().getSimpleName().equals("MatrixProject")) {
try {
Object getBuilders = item.getClass().getMethod("getBuilders", null).invoke(item);
if (getBuilders instanceof List) {
found = isGradlew((List) getBuilders);
}
}catch (Exception e) {
LOG.log(Level.WARNING, "Exception " + e.getMessage(), e.getCause());
}
}
}
return found;
}
private boolean isGradlew (List<Builder> builders) {
boolean status = false;
if (builders != null && builders.size() > 0 ) {
for (Builder builder : builders) {
if (builder.getClass().getName().endsWith("Gradle")) {
try {
Object isUseWrapper = builder.getClass().getMethod("isUseWrapper", null).invoke(builder);
if (isUseWrapper instanceof Boolean) {
status = ! ((Boolean) isUseWrapper).booleanValue();
LOG.log(Level.FINE, "isGradlew " + !status);
}
} catch (Exception e) {
LOG.log(Level.WARNING, "Exception " + e.getMessage(), e.getCause());
status = false;
}
}
}
}
return status;
}
}
|
class PascalTriangle
include Enumerable
attr_reader :rows
def initialize row_count
raise ArgumentException unless row_count.class == Fixnum
raise "row_count must be greater than 0." unless row_count > 0
@row_count = row_count
generate_rows
end
def to_s
max_width = @rows.flatten.max.to_s.size
max_count = @rows.map{ |i| i.size }.max
line_width = max_count * max_width * 2 - max_width
separator = " " * max_width
self.map do |row|
row.map { |num| num.to_s.center(max_width) }.join(separator).center(line_width)
end.join("\n")
end
def each
@rows.each { |row| yield row }
end
private
def generate_rows
row = []
@rows = (0...@row_count).to_a.inject([]) do |m, i|
m << row = generate_row(row)
end
@rows.freeze
@rows.each { |r| r.freeze }
end
def generate_row row
result = [1]
row.each_with_index do |el, idx|
result << (idx == row.size - 1 ? 1 : el + row[idx + 1])
end
result
end
end
if ARGV[0].nil? or ARGV[0] !~ /^\d+$/
puts "Example: ruby pascal.rb 10"
exit
end
row_count = ARGV[0].to_i
puts PascalTriangle.new(row_count).to_s
|
package main
import (
"fmt"
"sync"
)
func scope1() {
var wg sync.WaitGroup
for _, m := range []string{"Hello", "Good day", "Greeting"} {
wg.Add(1)
go func() {
defer wg.Done()
fmt.Println(m)
}()
}
wg.Wait()
}
func scope2() {
var wg sync.WaitGroup
for _, m := range []string{"Hello", "Good day", "Greeting"} {
wg.Add(1)
go func(message string) {
defer wg.Done()
fmt.Println(message)
}(m)
}
wg.Wait()
}
func main() {
scope1()
scope2()
}
|
package com.xiyan.controller;
import com.xiyan.dto.*;
import com.xiyan.service.UserService;
import com.xiyan.vo.BaseVO;
import com.xiyan.vo.UserByIdVO;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import org.springframework.web.bind.annotation.*;
import javax.annotation.Resource;
import javax.mail.MessagingException;
import javax.servlet.http.HttpServletRequest;
import javax.validation.Valid;
/**
* @author bright
*/
@Api(description = "用户集合")
@RestController
@RequestMapping("/user")
public class UserController {
@Resource
private UserService userService;
@Resource
private HttpServletRequest request;
@ApiOperation(value = "发送验证码接口")
@PostMapping("/send")
public BaseVO sendOutEmail(@RequestBody @Valid EmailDTO emailDTO) throws MessagingException {
return userService.sendOutEmail(emailDTO);
}
@ApiOperation(value = "注册接口")
@PostMapping("/register")
public BaseVO register(@RequestBody @Valid UserDTO userDTO) {
return userService.register(userDTO);
}
@ApiOperation(value = "第三方登陆绑定账号接口")
@PutMapping("/bind")
public BaseVO bind(@RequestBody @Valid AccountLoginDTO accountLoginDTO) {
return userService.bind(accountLoginDTO, Integer.valueOf(request.getHeader("User-ID")));
}
@ApiOperation(value = "密码找回接口")
@PutMapping("/retrieve")
public BaseVO retrievePassword(@RequestBody @Valid UserDTO userDTO) {
return userService.retrievePassword(userDTO);
}
@ApiOperation(value = "根据参数查询接口")
@PostMapping("/by")
public UserByIdVO getUser(@RequestBody GetUserDTO getUserDTO) {
return userService.getUser(getUserDTO);
}
@ApiOperation(value = "修改")
@PutMapping("/update")
public BaseVO update(@RequestBody UserUpdateDTO userUpdateDTO) {
return userService.update(userUpdateDTO);
}
}
|
Format Relative Options
==============================================================================
`style`
> options for "best fit" ("yesterday") and "numeric" ("1 day ago") output.
`units`
> options for always rendering in a particular unit; e.g. "30 days ago",
> instead of "1 month ago".
|
# Maintainer's Guide
This document explains how to maintain an image-builder repo.
## How it works
The SD card is automatically built with [Circle](https://circleci.com).
You don't need real hardware to build and publish the SD card image.
### Pull requests
Pull requests are also built and checked with Circle.
See the `.circleci/config.yml` file which commands are run for each Git commit.
The tests defined in `builder/test/` check the SD card image for each Git
commit and before releasing a new SD card image.
## Draft a new release
You can create a new release directly on GitHub.
Just click on "releases", then click the "Draft a new release" button.
Please check that you draft a release on "master" branch.
Type in eg. "v1.0.0" into the "Tag version" input field.
Type in eg. "v1.0.0" into the "Release title" input field.
Type in a release description. You may use the text of the previous release
and adjust it. A release description is important for users of
[Sibbell](https://sibbell.com) to receive a good notification email or Slack
message.
You can decide if it's a pre-release with the checkbox at the bottom.
Then press "Publish release".
After that Circle starts a new tagged build with that version tag and also
runs the deploy steps defined in `.circleci/config.yml` and pushes the SD card image
to the GitHub release.
|
use crate::{api_client::{self,
Client},
common::ui::{Status,
UIReader,
UIWriter,
UI},
error::{Error,
Result},
PRODUCT,
VERSION};
use habitat_core::origin::{Origin,
OriginMemberRole};
use reqwest::StatusCode;
use url::Url;
pub async fn start(ui: &mut UI,
bldr_url: Url,
origin: Origin,
token: &str,
member_account: &str,
role: OriginMemberRole,
no_prompt: bool)
-> Result<()> {
let api_client = Client::new(bldr_url, PRODUCT, VERSION, None).map_err(Error::APIClient)?;
ui.begin(format!("Preparing to update member {}'s role to '{}' in {} origin.",
member_account, role, origin))?;
if !no_prompt && !confirm_update_role(ui)? {
return Ok(());
}
match api_client.update_member_role(origin.clone(), token, member_account, role)
.await
{
Ok(_) => {
ui.status(Status::Updated, "the member role successfully!".to_string())
.or(Ok(()))
}
Err(err @ api_client::Error::APIError(StatusCode::FORBIDDEN, _)) => {
ui.fatal("Failed to update the role!")?;
ui.fatal(format!("This situation could arise, if for example, you are not a member \
with sufficient privileges in the '{}' origin.",
origin))?;
Err(Error::APIClient(err))
}
Err(err @ api_client::Error::APIError(StatusCode::UNPROCESSABLE_ENTITY, _)) => {
ui.fatal("Failed to update the role!")?;
ui.fatal(format!("This situation could arise, if for example, role: '{}' was no \
longer supported by the server API.",
role))?;
Err(Error::APIClient(err))
}
Err(err @ api_client::Error::APIError(StatusCode::NOT_FOUND, _)) => {
ui.fatal("Failed to update the role!")?;
ui.fatal("This situation could arise, if for example, you passed a invalid member \
or origin name.")?;
Err(Error::APIClient(err))
}
Err(e) => {
ui.fatal(format!("Failed to update the role! {:?}", e))?;
Err(Error::from(e))
}
}
}
fn confirm_update_role(ui: &mut UI) -> Result<bool> {
Ok(ui.prompt_yes_no("Modify the role as indicated above?", Some(true))?)
}
|
module.exports = {
delims: ["\\$\\$", "<math>"],
output: "mml"
};
|
const path = require('path');
const fs = require('fs');
const m = require('match-file-utility');
const config = JSON.parse(fs.readFileSync('package.json')).gruntBuild;
function notGrunt(file) {
return !/Gruntfile.js$/.test(file);
}
let src = {
shared : m('src/shared/', /\.js$/).filter(notGrunt),
vendor : m('src/application/scripts/vendor/', /\.js$/).filter(notGrunt),
constants : m('src/application/scripts/constants/', /\.js$/).filter(notGrunt),
predicates : m('src/application/scripts/predicates/', /\.js$/).filter(notGrunt),
custom : m('src/application/scripts/custom/', /\.js$/).filter(notGrunt),
components : m('src/application/components/', /\.js$/).filter(notGrunt),
containers : m('src/application/containers/', /\.js$/).filter(notGrunt),
collections : m('src/application/collections/', /\.js$/).filter(notGrunt),
main : m('src/application/scripts/main/', /\.js$/).filter(notGrunt),
init : m('src/application/scripts/init/', /\.js$/).filter(notGrunt),
exports : m('src/application/scripts/exports/', /\.js$/).filter(notGrunt)
};
let dest = {
development : {},
production : {
bundle : config.scripts && config.bundle
? config.bundle
: 'bin/bundle.js'
}
};
if (config.isBundle) {
dest.development.bundle = dest.production.bundle;
} else {
for (var k in src) {
if (src[k].length) {
dest.development[k] = 'bin/' + k + '.js';
}
}
}
module.exports = {
src : src,
dest : dest,
list : [].concat(
src.shared,
src.constants,
src.predicates,
src.vendor,
src.custom,
src.components,
src.containers,
src.collections,
src.main,
src.init,
src.exports
)
};
|
import { combineReducers } from 'redux';
import UploadDetailsReducer from '../../views/Upload/upload.reducer';
import HomeReducer from '../../views/Home/home.reducer';
import ProfileReducer from '../../views/Profile/myprofile.reducer';
import AdminReducer from '../../views/Admin/admin.reducer';
import SelectorBarReducer from '../../components/SelectorBar/selectorbar.reducer';
import LoginReducer from '../../common-components/GoogleLoginComponent/GoogleLogin.reducer';
import QuizLeaderBoardReducer from '../../views/Quiz/quiz.reducer'
/** import your reducers */
export default combineReducers({
LoginReducer,
UploadDetailsReducer,
HomeReducer,
ProfileReducer,
AdminReducer,
SelectorBarReducer,
QuizLeaderBoardReducer
})
|
# MessagePack.Experimental
This C# project is the experimental project for the features which are very complex, unstable or unsafe.
- [HardwareIntrinsics](HardwareIntrinsics/HardwareIntrinsics.md)
- [UnsafeUnmanagedStructFormatter](UnsafeUnmanagedStructFormatter/UnsafeUnmanagedStructFormatter.md)
**Caution!**
`MessagePack.Experimental` only targets `.NET Core 3.1` and above.
You can not use this in Unity and .NET Framework.
|
import '../loadenv';
import '../utils/db';
import moment from 'moment';
import RSS from '../models/rss';
import Podcast from '../models/podcast';
import logger from '../utils/logger';
import { RssQueueAdd, PodcastQueueAdd } from '../asyncTasks';
import { isURL } from '../utils/validation';
const publicationTypes = {
rss: { schema: RSS, enqueue: RssQueueAdd },
podcast: { schema: Podcast, enqueue: PodcastQueueAdd },
};
const conductorInterval = 60;
var scrapeInterval = 25;
var popularScrapeInterval = 2;
// conductor runs conduct every interval seconds
const conductor = () => {
logger.info(
`Starting the conductor... will conduct every ${conductorInterval} seconds`,
);
function forever() {
conduct()
.then(() => {
logger.info('Conductor iteration completed...');
})
.catch(err => {
logger.error('Conductor broke down', { err });
});
setTimeout(forever, conductorInterval * 1000);
}
forever();
};
conductor();
// returns a random number from 2**1, 2**2, ..., 2**n-1, 2**n
// 2 is two time more likely to be returned than 4, 4 than 8 and so until 2**n
function rand(n = 6) {
const exp = n;
let rand = Math.floor(Math.random() * 2 ** exp);
let b;
for (b of [...Array(exp).keys()].reverse()) {
if (rand >= 2 ** b - 1) {
break;
}
}
return 2 ** (exp - b);
}
async function getPublications(schema, followerCount, scrapeInterval, limit) {
return await schema
.find({
isParsing: {
$ne: true,
},
followerCount: { $gte: followerCount },
valid: true,
lastScraped: {
$lte: moment()
.subtract(scrapeInterval, 'minutes')
.toDate(),
},
consecutiveScrapeFailures: {
$lt: rand(),
},
})
.limit(limit)
.sort('-followerCount');
}
// conduct does the actual work of scheduling the scraping
async function conduct() {
for (const [publicationType, publicationConfig] of Object.entries(publicationTypes)) {
// lookup the total number of rss feeds or podcasts
let total = await publicationConfig.schema.count({});
if (total < 1000) {
// when running winds locally we can scrape more frequently
scrapeInterval = popularScrapeInterval;
}
// never schedule more than 1/15 per minute interval
let maxToSchedule = Math.ceil(total / 15 + 1);
logger.info(
`conductor will schedule at most ${maxToSchedule} to scrape per ${conductorInterval} seconds`,
);
// find the publications that we need to update
const limit = maxToSchedule / 2;
const schema = publicationConfig.schema;
let popular = await getPublications(schema, 100, popularScrapeInterval, limit);
let other = await getPublications(schema, 1, scrapeInterval, limit);
logger.info(
`found ${
popular.length
} popular publications that we scrape every ${popularScrapeInterval} minutes and ${
other.length
} that we scrape every ${scrapeInterval} minutes`,
);
let publications = popular.concat(other);
// make sure we don't schedule these guys again till its finished
let publicationIDs = [];
for (let publication of publications) {
publicationIDs.push(publication._id);
}
let updated = await publicationConfig.schema.update(
{ _id: { $in: publicationIDs } },
{
isParsing: true,
},
{
multi: true,
},
);
logger.info(`marked ${updated.nModified} publications as isParsing`);
// actually schedule the update
logger.info(
`conductor found ${publications.length} of type ${publicationType} to scrape`,
);
let promises = [];
for (let publication of publications) {
if (!isURL(publication.feedUrl)) {
logger.warn(
`Conductor, url looks invalid for ${publication.feedUrl} with id ${
publication._id
}`,
);
continue;
}
let job = { url: publication.feedUrl };
job[publicationType] = publication._id;
let promise = publicationConfig.enqueue(job, {
removeOnComplete: true,
removeOnFail: true,
});
promises.push(promise);
}
await Promise.all(promises);
logger.info(
`Processing complete! Will try again in ${conductorInterval} seconds...`,
);
}
}
|
package me.liuhui.mall.repository.dao;
import me.liuhui.mall.repository.dao.mybatis.provider.BaseDao;
import me.liuhui.mall.repository.model.Cart;
import me.liuhui.mall.repository.model.annotation.MapperMapping;
@MapperMapping(table = "u_cart")
public interface CartDao extends BaseDao<Cart,Long> {
Cart selectByUserId(Integer v) ;
}
|
# After they all had been wandering away like lost sheep, to whom did they return?
They all returned to the shephered and guardian of their souls.
|
require 'spec_helper'
describe EpubBook do
it 'has a version number' do
expect(EpubBook::VERSION).not_to be nil
end
describe ".config" do
#before do
# EpubBook.default_config
#end
context 'not configure' do
it 'return nil' do
expect(EpubBook.config.mail_from).to eq(nil)
expect(EpubBook.config.mail_password).to eq(nil)
end
end
end
describe '.configure' do
describe 'not init @default_config' do
context 'have a default_setting.yml' do
it 'return original config' do
expect(EpubBook.config.setting_file).to be_nil
end
it 'set config if invoke default_config' do
EpubBook.default_config
expect(EpubBook.config.setting_file).to be_nil
end
end
end
describe 'corfigure config' do
let(:config) do
EpubBook.configure do |config|
config.mail_from = "test@example.com"
config.mail_subject = "epub 电子书"
config.mail_body = '您创建的电子书见附件'
config.mail_address = "smtp.example.com"
config.mail_port = 25
config.mail_user_name = "test@example.com"
config.mail_password = "test"
end
EpubBook.config
end
context 'setting config' do
it 'return mail_from' do
expect(config).to be_instance_of(EpubBook::Config)
expect(config.mail_from).to eq("test@example.com")
expect(config.mail_password).to eq("test")
end
end
context 'have a default_setting.yml' do
it 'return original config' do
EpubBook.default_config
expect(config.mail_from).to eq('test@example.com')
end
end
end
end
describe '.create_book' do
before :each do
expect_any_instance_of(EpubBook::Book).to receive(:generate_book).with("bookname")
end
it 'use the default yml :book' do
epub_book = EpubBook.create_book("http://www.example.com/bookindex.html","bookname")
expect(epub_book).to be_instance_of(EpubBook::Book)
expect(epub_book.instance_variable_get(:@index_url)).to eq("http://www.example.com/bookindex.html")
expect(epub_book.limit).to eq(10)
expect(epub_book.cover_css).to eq('.pic_txt_list .pic img')
end
it 'use the specify yml :www_piaotiao_net' do
epub_book = EpubBook.create_book("http://www.piaotian.net/bookindex.html","bookname")
expect(epub_book.instance_variable_get(:@index_url)).to eq("http://www.piaotian.net/bookindex.html")
expect(epub_book.cover_css).to eq( '#content td>table:not(.grid) img[src$=jpg]')
end
it 'create book with a block' do
epub_book = EpubBook.create_book("http://www.piaotian.net/bookindex.html","bookname") do |book|
book.cover_css = "#content .cover"
book.title_css = "#content .title"
end
expect(epub_book.cover_css).to eq( '#content .cover')
expect(epub_book.title_css).to eq( '#content .title')
end
end
end
|
from t2wml.input_processing.region import YamlRegion
from t2wml.settings import t2wml_settings
from t2wml.utils.t2wml_exceptions import ErrorInYAMLFileException
from t2wml.parsing.cleaning_functions import cleaning_functions_dict
from t2wml.utils.bindings import update_bindings
def create_lambda(function_name, *args, **kwargs):
"""create a composable lambda function from a cleaning function name"""
function=cleaning_functions_dict[function_name]
new_func= lambda input: function(input, *args, **kwargs)
return new_func
def compose(*fs):
"""used to compose cleaning functions together"""
def composition(x):
for f in fs:
x = f(x)
return x
return composition
class DFCleaner:
def __init__(self, cleaning_mappings, sheet):
self.sheet=sheet
validate_cleaning_yaml(cleaning_mappings)
instructions = self.get_instruction_sets(cleaning_mappings, sheet)
self.df=self.clean_sheet(instructions, sheet)
def get_instruction_sets(self, cleaning_mappings, sheet):
update_bindings(sheet=sheet)
parsing_instructions=[]
for index, mapping in enumerate(cleaning_mappings):
region=YamlRegion(mapping["region"])
functions=mapping["functions"]
parsed_funcs=[]
for function in functions:
if isinstance(function, dict):
for function_name, kwargs in function.items():
break
else:
function_name=function
kwargs={}
parsed_func = create_lambda(function_name, **kwargs)
parsed_funcs.append(parsed_func)
final_func=compose(*parsed_funcs)
parsing_instructions.append({"region":region, "parsed_func":final_func})
return parsing_instructions
def clean_sheet(self, parsing_instructions, sheet):
df=sheet.data.copy(deep=True)
for instruction in parsing_instructions:
region = instruction["region"]
parsed_func=instruction["parsed_func"]
for col, row in region:
col=col-1
row=row-1
new_val=parsed_func(df.iloc[row, col])
df.iat[row, col]=new_val
#output=df.apply(np.vectorize(my_func)) #a possibly faster way, but needs fiddling
return df
def get_cleaned_dataframe(sheet, yaml_instructions):
#TODO: handle caching somehow?
sc=DFCleaner(yaml_instructions, sheet)
return sc.df
def validate_cleaning_yaml(input):
if not isinstance(input, list):
raise ErrorInYAMLFileException("cleaningMapping must contain a list")
for entry in input:
if not isinstance(entry, dict):
raise ErrorInYAMLFileException("each entry in cleaningMapping must be a dictionary")
if set(entry.keys())!=set(["region", "functions"]):
raise ErrorInYAMLFileException("each entry must contain 2 keys, 'region' and 'functions'")
if not isinstance(entry["functions"], list):
raise ErrorInYAMLFileException("functions entry must contain a list")
|
require 'spec_helper'
module Rsvp
describe "Family associations" do
it "should have many invitations" do
Family.reflect_on_association(:invitations).macro.should eq(:has_many)
end
it "should have many members" do
Family.reflect_on_association(:members).macro.should eq(:has_many)
end
it "should have many people" do
Family.reflect_on_association(:people).macro.should eq(:has_many)
end
it "should instantiate people of appropriate types" do
salutation = Salutation.subclasses.first
gender = Gender.subclasses.first
family = Family.create(salutation_type: salutation)
person = family.people.create({ :gender_type => gender.to_s })
person.gender_type.should eq(gender)
end
end
describe "Family compositions" do
it "should constantize a salutation type of appropriate type" do
salutation = Salutation.subclasses.first
family = Family.new(salutation_type: salutation)
family.save
family.salutation_type.should eq(salutation)
end
end
describe "Family Salutation" do
it "should be required" do
salutation = Salutation.subclasses.first
family = Family.create
expect(family.persisted?).to be_false
expect(family.errors.get(:salutation_type)).to include("can't be blank")
end
it "should correctly instaniate a salutation" do
person = Person.create(gender_type: Gender::AdultMale, first_name: "John", last_name: "Smith")
family = Family.new(salutation_type: Salutation::SingleMale)
family.people << person
family.save(validate: false)
expect(family.salutation).to eq("Mr. John Smith")
end
end
end
|
# chainer-object-detection
Google Colaboratory上で独自データセットを用いた物体検出(バウンディングボックスとクラスラベルを付与)できます。
自分のPCにGPUが無いためcolabで実行しましたが、本ソースコードをコピペしてローカル環境でも実行できます。
# Demo
学習後の推論実行結果

# Requirement
Colab上でライブラリなどをインストールするので特になし。
# Note
読み込む画像名は半角英数字で記述してください。
# Usage
各セルを上から順に実行してください。
各セクションやセルの実行内容はコード内またはテキストセル内で記述しています。
# License
[MIT license](https://en.wikipedia.org/wiki/MIT_License).
|
---
layout: post
title: Week 11
---
# Part 1 Project Reflection
Particularly, Group 6 was very impressive in several ways.
First, I really liked how each member of the group has a respective task.
Since the project is big and active enough, they choose a similar approach of ours.
Also I liked how they started with less complicated tasks
such as front-end related issues (navbar overflow)
I also thought that translation is very useful for the
project since open source tends to be stretched out to global.
I can speak Korean and two of the other group members can speak
Chinese so translation is a very powerful tool we can use to contribute to our project too.
Group 6 is the most active and successful group of our class
since they are not only taking on lots of issues but also creating many pull requests.
There were a lot of things to learn from their presentation.
<hr>
# Part 2 Group Project
Our group recently made a huge leap.
Thanks to the discord channel that Victoria found,
I was able to figure out how to launch the remote dev tool
which is critical to making a new theme. Thanks to the css mapping,
we are now able to test our design to make a new theme such as moving menus to different places.
The new theme will hide the friend feed tab and switch the playbar as a vertical position.
I wouldn’t say this is practical design but it certainly gives fresh insights to the users.
Also, we are going to make a couple of color schemes (which is the color themes).
I hope we can work together far and good enough to make the PR.
Besides the dev tool, there are a limited number of issues we can currently start work on.
Therefore, we are planning to focus our attention solely on making the new theme by next thursday.
|
import 'package:glob/glob.dart';
/// Represents converted unused localization config,
/// which contains parsed entities.
class UnusedL10nAnalysisConfig {
final Iterable<Glob> globalExcludes;
final Iterable<Glob> analyzerExcludedPatterns;
final RegExp classPattern;
UnusedL10nAnalysisConfig(
this.globalExcludes,
this.analyzerExcludedPatterns,
String? classPattern,
) : classPattern = RegExp(classPattern ?? r'I18n$');
}
|
# Project journal for fp2md4roam
## Saturday 16 October 2021
Very primitive version working!
## Monday 18 October 2021
Beta version is now packaged with entry point.
## Thursday 09 December 2021
Made minor refactorings before merging common code in fp2md
Now uses the same markdown_builder as fp2md
|
export interface LeaveRequestItem {
datetime_leave_from : string;
datetime_leave_to : string | null;
email : string;
avatar : string;
full_name : string;
email_content : string;
leave_request_type_id : number;
reason : string;
user_id : number;
id : number;
isShow : boolean;
}
export interface Pagination {
current_page: number;
total_row : number;
row_per_page: number;
}
|
<?php
/**
*
* @package phpBB Gallery
* @version $Id$
* @copyright (c) 2007 nickvergessen nickvergessen@gmx.de http://www.flying-bits.org
* @license http://opensource.org/licenses/gpl-license.php GNU Public License
*
*/
namespace phpbbgallery\acpcleanup;
use phpbb\language\language;
use phpbb\user;
use phpbbgallery\core\album\album;
class cleanup
{
/** @var \phpbb\db\driver\driver_interface */
protected $db;
/** @var \phpbbgallery\core\file\file */
protected $tool;
/** @var \phpbb\user */
protected $user;
/** @var \phpbb\language\language */
protected $language;
/** @var \phpbbgallery\core\block */
protected $block;
/** @var \phpbbgallery\core\album\album */
protected $album;
/** @var \phpbbgallery\core\comment */
protected $comment;
/** @var \phpbbgallery\core\config */
protected $gallery_config;
/** @var \phpbbgallery\core\log */
protected $log;
/** @var \phpbbgallery\core\moderate */
protected $moderate;
/** @var */
protected $albums_table;
/** @var */
protected $images_table;
/**
* cleanup constructor.
*
* @param \phpbb\db\driver\driver_interface $db
* @param \phpbbgallery\core\file\file $tool
* @param \phpbb\user $user
* @param \phpbb\language\language $language
* @param \phpbbgallery\core\block $block
* @param \phpbbgallery\core\album\album $album
* @param \phpbbgallery\core\comment $comment
* @param \phpbbgallery\core\config $gallery_config
* @param \phpbbgallery\core\log $log
* @param \phpbbgallery\core\moderate $moderate
* @param $albums_table
* @param $images_table
*/
public function __construct(\phpbb\db\driver\driver_interface $db, \phpbbgallery\core\file\file $tool, \phpbb\user $user, \phpbb\language\language $language,
\phpbbgallery\core\block $block, \phpbbgallery\core\album\album $album, \phpbbgallery\core\comment $comment,
\phpbbgallery\core\config $gallery_config, \phpbbgallery\core\log $log, \phpbbgallery\core\moderate $moderate,
$albums_table, $images_table)
{
$this->db = $db;
$this->tool = $tool;
$this->user = $user;
$this->language = $language;
$this->block = $block;
$this->album = $album;
$this->comment = $comment;
$this->gallery_config = $gallery_config;
$this->log = $log;
$this->moderate = $moderate;
$this->albums_table = $albums_table;
$this->images_table = $images_table;
}
/**
* Delete source files without a database entry.
*
* @param array $filenames An array of filenames
* @return string Language key for the success message.
*/
public function delete_files($filenames)
{
foreach ($filenames as $file)
{
$this->tool->delete(utf8_decode($file));
$this->tool->delete_cache(utf8_decode($file));
}
$this->log->add_log('admin', 'clean_deletefiles', 0, 0, array('LOG_CLEANUP_DELETE_FILES', count($filenames)));
return 'CLEAN_ENTRIES_DONE';
}
/**
* Delete images, where the source file is missing.
*
* @param mixed $image_ids Either an array of integers or an integer.
* @return string Language key for the success message.
*/
public function delete_images($image_ids)
{
$this->log->add_log('admin', 'clean_deleteentries', 0, 0, array('LOG_CLEANUP_DELETE_ENTRIES', count($image_ids)));
$this->moderate->delete_images($image_ids, false);
return 'CLEAN_SOURCES_DONE';
}
/**
* Delete images, where the author is missing.
*
* @param mixed $image_ids Either an array of integers or an integer.
* @return string Language key for the success message.
*/
public function delete_author_images($image_ids)
{
$this->log->add_log('admin', 'clean_deletenoauthors', 0, 0, array('LOG_CLEANUP_DELETE_NO_AUTHOR', count($image_ids)));
$this->moderate->delete_images($image_ids);
return 'CLEAN_AUTHORS_DONE';
}
/**
* Delete comments, where the author is missing.
*
* @param mixed $comment_ids Either an array of integers or an integer.
* @return string Language key for the success message.
*/
public function delete_author_comments($comment_ids)
{
$this->log->add_log('admin', 'clean_deletecna', 0, 0, array('LOG_CLEANUP_COMMENT_DELETE_NO_AUTHOR', count($comment_ids)));
$this->comment->delete_comments($comment_ids);
return 'CLEAN_COMMENTS_DONE';
}
/**
* Delete unwanted and obsolent personal galleries.
*
* @param array $unwanted_pegas User IDs we want to delete the pegas.
* @param array $obsolent_pegas User IDs we want to delete the pegas.
* @return array Language keys for the success messages.
*/
public function delete_pegas($unwanted_pegas, $obsolent_pegas)
{
$delete_pegas = array_merge($unwanted_pegas, $obsolent_pegas);
$delete_images = $delete_albums = $user_image_count = array();
$num_pegas = 0;
$sql = 'SELECT album_id, parent_id
FROM ' . $this->albums_table . '
WHERE ' . $this->db->sql_in_set('album_user_id', $delete_pegas);
$result = $this->db->sql_query($sql);
while ($row = $this->db->sql_fetchrow($result))
{
$delete_albums[] = (int) $row['album_id'];
if ($row['parent_id'] == 0)
{
$num_pegas++;
}
}
$this->db->sql_freeresult($result);
$sql = 'SELECT image_id, image_filename, image_status, image_user_id
FROM ' . $this->images_table . '
WHERE ' . $this->db->sql_in_set('image_album_id', $delete_albums, false, true);
$result = $this->db->sql_query($sql);
$filenames = array();
while ($row = $this->db->sql_fetchrow($result))
{
$delete_images[] = (int) $row['image_id'];
$filenames[(int) $row['image_id']] = $row['image_filename'];
if (($row['image_status'] == $this->block->get_image_status_unapproved()) ||
($row['image_status'] == $this->block->get_image_status_orphan()))
{
continue;
}
if (isset($user_image_count[(int) $row['image_user_id']]))
{
$user_image_count[(int) $row['image_user_id']]++;
}
else
{
$user_image_count[(int) $row['image_user_id']] = 1;
}
}
$this->db->sql_freeresult($result);
if (!empty($delete_images))
{
$this->moderate->delete_images($delete_images, $filenames);
}
$sql = 'DELETE FROM ' . $this->albums_table . '
WHERE ' . $this->db->sql_in_set('album_id', $delete_albums);
$this->db->sql_query($sql);
$this->gallery_config->dec('num_pegas', $num_pegas);
if (in_array($this->gallery_config->get('newest_pega_album_id'), $delete_albums))
{
// Update the config for the statistic on the index
if ($this->gallery_config->get('num_pegas') > 0)
{
$sql_array = array(
'SELECT' => 'a.album_id, u.user_id, u.username, u.user_colour',
'FROM' => array($this->albums_table => 'a'),
'LEFT_JOIN' => array(
array(
'FROM' => array(USERS_TABLE => 'u'),
'ON' => 'u.user_id = a.album_user_id',
),
),
'WHERE' => 'a.album_user_id <> ' . $this->album->get_public() . ' AND a.parent_id = 0',
'ORDER_BY' => 'a.album_id DESC',
);
$sql = $this->db->sql_build_query('SELECT', $sql_array);
$result = $this->db->sql_query_limit($sql, 1);
$newest_pega = $this->db->sql_fetchrow($result);
$this->db->sql_freeresult($result);
}
if (($this->gallery_config->get('num_pegas') > 0) && isset($newest_pega))
{
$this->gallery_config->set('newest_pega_user_id', $newest_pega['user_id']);
$this->gallery_config->set('newest_pega_username', $newest_pega['username']);
$this->gallery_config->set('newest_pega_user_colour', $newest_pega['user_colour']);
$this->gallery_config->set('newest_pega_album_id', $newest_pega['album_id']);
}
else
{
$this->gallery_config->set('newest_pega_user_id', 0);
$this->gallery_config->set('newest_pega_username', '');
$this->gallery_config->set('newest_pega_user_colour', '');
$this->gallery_config->set('newest_pega_album_id', 0);
if (isset($newest_pega))
{
$this->gallery_config->set('num_pegas', 0);
}
}
}
/*
foreach ($user_image_count as $user_id => $images)
{
//phpbb_gallery_hookup::add_image($user_id, (0 - $images));
$uploader = new \phpbbgallery\core\user($this->db, $user_id, false);
$uploader->update_images((0 - $images));
}
\phpbbgallery\core\user::update_users($delete_pegas, array('personal_album_id' => 0));
*/
$return = array();
if ($obsolent_pegas)
{
$return[] = 'CLEAN_PERSONALS_DONE';
}
if ($unwanted_pegas)
{
$return[] = 'CLEAN_PERSONALS_BAD_DONE';
}
return $return;
}
/**
*
*/
public function prune($pattern)
{
$sql_where = '';
if (isset($pattern['image_album_id']))
{
$pattern['image_album_id'] = array_map('intval', explode(',', $pattern['image_album_id']));
}
if (isset($pattern['image_user_id']))
{
$pattern['image_user_id'] = array_map('intval', explode(',', $pattern['image_user_id']));
}
foreach ($pattern as $field => $value)
{
if (is_array($value))
{
$sql_where .= (($sql_where) ? ' AND ' : ' WHERE ') . $this->db->sql_in_set($field, $value);
continue;
}
$sql_where .= (($sql_where) ? ' AND ' : ' WHERE ') . $field . ' < ' . $value;
}
$sql = 'SELECT image_id, image_filename
FROM ' . $this->images_table . '
' . $sql_where;
$result = $this->db->sql_query($sql);
$image_ids = $filenames = $update_albums = array();
while ($row = $this->db->sql_fetchrow($result))
{
$image_ids[] = (int) $row['image_id'];
$filenames[(int) $row['image_id']] = $row['image_filename'];
}
$this->db->sql_freeresult($result);
if ($image_ids)
{
$this->moderate->delete_images($image_ids, $filenames);
}
return 'CLEAN_PRUNE_DONE';
}
/**
*
*/
public function lang_prune_pattern($pattern)
{
if (isset($pattern['image_album_id']))
{
$pattern['image_album_id'] = array_map('intval', explode(',', $pattern['image_album_id']));
}
if (isset($pattern['image_user_id']))
{
$pattern['image_user_id'] = array_map('intval', explode(',', $pattern['image_user_id']));
}
$lang_pattern = '';
foreach ($pattern as $field => $value)
{
$field = (strpos($field, 'image_') === 0) ? substr($field, 6) : $field;
switch ($field)
{
case 'album_id':
$sql = 'SELECT album_name
FROM ' . $this->albums_table . '
WHERE ' . $this->db->sql_in_set('album_id', $value) . '
ORDER BY album_id ASC';
$result = $this->db->sql_query($sql);
$value = '';
while ($row = $this->db->sql_fetchrow($result))
{
$value .= (($value) ? ', ' : '') . $row['album_name'];
}
$this->db->sql_freeresult($result);
break;
case 'user_id':
$sql = 'SELECT user_id, user_colour, username
FROM ' . USERS_TABLE . '
WHERE ' . $this->db->sql_in_set('user_id', $value) . '
ORDER BY user_id ASC';
$result = $this->db->sql_query($sql);
$value = '';
while ($row = $this->db->sql_fetchrow($result))
{
$value .= (($value) ? ', ' : '') . get_username_string('full', $row['user_id'], (($row['user_id'] != ANONYMOUS) ? $row['username'] : $this->language->lang('GUEST')), $row['user_colour']);
}
$this->db->sql_freeresult($result);
break;
case 'time':
$value = $this->user->format_date($value, false, true);
break;
case 'rate_avg':
$value = ($value / 100);
break;
}
$lang_pattern .= (($lang_pattern) ? '<br />' : '') . $this->language->lang('PRUNE_PATTERN_' . strtoupper($field), $value);
}
return $lang_pattern;
}
}
|
#!/bin/sh
# Attach the monitoring
./echxute.sh "chaosorca monit start --name hello_world"
|
#include <iostream>
#include "fsmpp2/fsmpp2.hpp"
#include "fsmpp2/plantuml.hpp"
namespace events
{
struct Ev1 : fsmpp2::event {};
struct Ev2 : fsmpp2::event {};
struct Ev3 : fsmpp2::event {};
} // namespace events
namespace states
{
struct EmptyContext {};
struct A;
struct B;
struct C;
struct D;
struct E;
struct F;
struct A : fsmpp2::state<>
{
auto handle(events::Ev1 const&) -> fsmpp2::transitions<B>;
};
struct B : fsmpp2::state<>
{
auto handle(events::Ev1 const&) -> fsmpp2::transitions<C>;
auto handle(events::Ev2 const&) -> fsmpp2::transitions<A>;
};
struct C : fsmpp2::state<>
{
auto handle(events::Ev1 const&) -> fsmpp2::transitions<A, D>;
auto handle(events::Ev2 const&) -> fsmpp2::transitions<F>;
};
struct D : fsmpp2::state<>
{
auto handle(events::Ev3 const&) -> fsmpp2::transitions<E>;
};
struct E : fsmpp2::state<>
{
auto handle(events::Ev3 const&) -> fsmpp2::transitions<F>;
};
struct F : fsmpp2::state<> {};
} // namespace states
int main()
{
using States = fsmpp2::states<states::A, states::B, states::C, states::D, states::E, states::F>;
using Events = fsmpp2::events<events::Ev1, events::Ev2, events::Ev3>;
fsmpp2::plantuml::print_state_diagram<States, Events>(std::cout);
}
|
package ga.rugal.fridge.core.service;
import ga.rugal.fridge.core.dao.HistoryDao;
public interface HistoryService extends BaseService<HistoryDao> {
}
|
<?php
// // Create database variables
$servername = "localhost";
$username = "root"; // For localhost it will be root
$password = "mysql"; // If using Ammps "mysql" if other leave empty
$dbname = "200385752Comp1006Assignment2"; // replace with your database name
// Create connection
// $conn = new mysqli($servername, $username, $password, $dbname) ;
$conn = new PDO("mysql:host=$servername;dbname=$dbname",$username,$password);
//enable SQL debugging
$conn->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);
?>
|
package com.zhongya.havefun.app.util
import android.content.Context
import android.util.TypedValue
class DensityUtils private constructor() {
companion object {
@JvmStatic
fun dp2px(context: Context, dpVal: Float): Float {
return TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP,
dpVal, context.resources.displayMetrics)
}
fun sp2px(context: Context, spVal: Float): Int {
return TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_SP,
spVal, context.resources.displayMetrics).toInt()
}
fun px2dp(context: Context, pxVal: Float): Float {
val scale = context.resources.displayMetrics.density
return pxVal / scale
}
fun px2sp(context: Context, pxVal: Float): Float {
return pxVal / context.resources.displayMetrics.scaledDensity
}
}
init {
throw UnsupportedOperationException("cannot be instantiated")
}
}
|
package ktx.dev.engine.media
import com.luck.picture.lib.config.PictureMimeType
import com.luck.picture.lib.entity.LocalMedia
import dev.engine.media.IMediaEngine
/**
* detail: Local Media Selector Data
* @author Ttt
*/
class LocalMediaData : IMediaEngine.MediaData {
var localMedia: LocalMedia? = null
constructor()
constructor(localMedia: LocalMedia?) {
this.localMedia = localMedia
}
/**
* 获取本地资源路径
* @param original 是否使用原图地址
* @return 本地资源路径
*/
fun getLocalMediaPath(original: Boolean): String? {
localMedia?.let {
if (original) return it.path
// 判断资源类型
val mimeType = it.mimeType
return if (PictureMimeType.isHasImage(mimeType)) { // 图片
if (it.isCompressed) { // 是否压缩图片
it.compressPath
} else if (it.isCut) { // 是否裁减图片
it.cutPath
} else { // 获取原图
it.path
}
} else {
it.path
}
}
return null
}
}
|
from test import TCBase, check_status_code
class SaveUrlTest(TCBase):
@check_status_code(201)
def test_success_save_url(self):
rv = self.save_url_request()
self.assertEqual(rv.json['url'], 'http://localhost/b')
return rv
@check_status_code(201)
def test_exist_url(self):
rv = self.save_url_request()
url = rv.json['url']
self.assertEqual(rv.status_code, 201)
rv2 = self.save_url_request()
self.assertEqual(rv.json['url'], url)
return rv2
|
import React from 'react';
import { storiesOf } from '@storybook/react';
import { RequestStatusContainer } from './index';
storiesOf('Request status', module)
.add('Success', () => (
<RequestStatusContainer
statuses={[
{ type: 'success', message: 'Cool bananas' },
{ type: 'success', message: 'Everything is looking 👌' },
{ type: 'success', message: 'Nothing broke' },
]}
/>
))
.add('Warn', () => (
<RequestStatusContainer
statuses={[
{ type: 'warn', message: 'Oh shucks' },
{ type: 'warn', message: 'Something feels wrong 🙈' },
{ type: 'warn', message: 'Wait for it...' },
]}
/>
))
.add('Error', () => (
<RequestStatusContainer
statuses={[
{ type: 'error', message: 'Dang' },
{ type: 'error', message: 'Not looking good 🥴' },
{ type: 'error', message: 'Houston we have a problem.' },
]}
/>
));
|
/**
* Copyright [2012] [Datasalt Systems S.L.]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datasalt.pangool.serialization;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.io.serializer.Serializer;
/**
* You can use this utility class to serialize / deserialize anything in the
* Hadoop context. It is thread safe. Instantiate once, reuse many times.
* Otherwise it is not efficient.
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public class HadoopSerialization {
private SerializationFactory serialization;
public HadoopSerialization(Configuration conf) throws IOException {
serialization = new SerializationFactory(conf);
}
private ThreadLocal<DataInputBuffer>
cachedInputStream = new ThreadLocal<DataInputBuffer>() {
@Override
protected DataInputBuffer initialValue() {
return new DataInputBuffer();
}
};
private ThreadLocal<Map<Class, Serializer>>
cachedSerializers = new ThreadLocal<Map<Class, Serializer>>() {
@Override
protected Map<Class, Serializer> initialValue() {
return new HashMap<Class, Serializer>();
}
};
private ThreadLocal<Map<Class, Deserializer>>
cachedDeserializers = new ThreadLocal<Map<Class, Deserializer>>() {
@Override
protected Map<Class, Deserializer> initialValue() {
return new HashMap<Class, Deserializer>();
}
};
/**
* Serializes the given object using the Hadoop serialization system.
*/
public void ser(Object datum, OutputStream output) throws IOException {
Map<Class, Serializer> serializers = cachedSerializers.get();
Serializer ser = serializers.get(datum.getClass());
if(ser == null) {
ser = serialization.getSerializer(datum.getClass());
if(ser == null) {
throw new IOException("Serializer for class " + datum.getClass() + " not found");
}
serializers.put(datum.getClass(), ser);
}
ser.open(output);
ser.serialize(datum);
ser.close();
}
/**
* Deseerializes into the given object using the Hadoop serialization system.
* Object cannot be null.
*/
public <T> T deser(Object obj, InputStream in) throws IOException {
Map<Class, Deserializer> deserializers = cachedDeserializers.get();
Deserializer deSer = deserializers.get(obj.getClass());
if(deSer == null) {
deSer = serialization.getDeserializer(obj.getClass());
deserializers.put(obj.getClass(), deSer);
}
deSer.open(in);
obj = deSer.deserialize(obj);
deSer.close();
return (T) obj;
}
/**
* Return a new instance of the given class with the deserialized data from
* the input stream.
*/
public <T> T deser(Class clazz, InputStream in) throws IOException {
Map<Class, Deserializer> deserializers = cachedDeserializers.get();
Deserializer deSer = deserializers.get(clazz);
if(deSer == null) {
deSer = serialization.getDeserializer(clazz);
deserializers.put(clazz, deSer);
}
deSer.open(in);
Object obj = deSer.deserialize(null);
deSer.close();
return (T) obj;
}
/**
* Deserialize an object using Hadoop serialization from a byte array. The
* object cannot be null.
*/
public <T> T deser(Object obj, byte[] array, int offset, int length) throws IOException {
Map<Class, Deserializer> deserializers = cachedDeserializers.get();
Deserializer deSer = deserializers.get(obj.getClass());
if(deSer == null) {
deSer = serialization.getDeserializer(obj.getClass());
deserializers.put(obj.getClass(), deSer);
}
DataInputBuffer baIs = cachedInputStream.get();
baIs.reset(array, offset, length);
deSer.open(baIs);
obj = deSer.deserialize(obj);
deSer.close();
baIs.close();
return (T) obj;
}
}
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require "storage_helper"
require "net/http"
require "uri"
require "zlib"
describe Google::Cloud::Storage, :signed_url, :v4, :storage do
let :bucket do
storage.bucket(bucket_name) ||
safe_gcs_execute { storage.create_bucket(bucket_name) }
end
let(:bucket_name) { $bucket_names.first }
let(:files) do
{ logo: { path: "acceptance/data/CloudPlatform_128px_Retina.png" },
big: { path: "acceptance/data/three-mb-file.tif" } }
end
before do
# always create the bucket
bucket
end
after do
bucket.files(versions: true).all { |f| f.delete generation: true rescue nil }
end
describe Google::Cloud::Storage::Project, :signed_url do
it "should create a signed read url version v4 with space in file name" do
local_file = File.new files[:logo][:path]
file = bucket.create_file local_file, "CloudLogoSignedUrl GetBucket.png"
five_min_from_now = 5 * 60
url = storage.signed_url bucket.name, file.name, method: "GET",
expires: five_min_from_now, version: :v4
uri = URI url
http = Net::HTTP.new uri.host, uri.port
http.use_ssl = true
http.ca_file ||= ENV["SSL_CERT_FILE"] if ENV["SSL_CERT_FILE"]
resp = http.get uri.request_uri
_(resp.code).must_equal "200"
Tempfile.open ["google-cloud", ".png"] do |tmpfile|
tmpfile.binmode
tmpfile.write resp.body
_(tmpfile.size).must_equal local_file.size
_(File.read(local_file.path, mode: "rb")).must_equal File.read(tmpfile.path, mode: "rb")
end
end
it "should create a signed POST url version v4 with space in file name" do
five_min_from_now = 60 * 60
url = storage.signed_url bucket.name,
"CloudLogoProjectSignedUrl Post.png",
method: "POST",
expires: five_min_from_now,
headers: { "x-goog-resumable" => "start" },
version: :v4
uri = URI url
https = Net::HTTP.new uri.host,uri.port
https.use_ssl = true
req = Net::HTTP::Post.new url, { "X-Goog-Resumable" => "start" }
resp = https.request(req)
_(resp.message).must_equal "Created"
_(resp.code).must_equal "201"
end
end
describe Google::Cloud::Storage::Bucket, :signed_url do
it "should create a signed read url version v4 with space in file name" do
local_file = File.new files[:logo][:path]
file = bucket.create_file local_file, "CloudLogoSignedUrl GetBucket.png"
five_min_from_now = 5 * 60
url = bucket.signed_url file.name, method: "GET",
expires: five_min_from_now, version: :v4
uri = URI url
http = Net::HTTP.new uri.host, uri.port
http.use_ssl = true
http.ca_file ||= ENV["SSL_CERT_FILE"] if ENV["SSL_CERT_FILE"]
resp = http.get uri.request_uri
_(resp.code).must_equal "200"
Tempfile.open ["google-cloud", ".png"] do |tmpfile|
tmpfile.binmode
tmpfile.write resp.body
_(tmpfile.size).must_equal local_file.size
_(File.read(local_file.path, mode: "rb")).must_equal File.read(tmpfile.path, mode: "rb")
end
end
it "should create a signed read url v4 using IAM signBlob API" do
local_file = File.new files[:logo][:path]
file = bucket.create_file local_file, "CloudLogoSignedUrlGetBucket.png"
iam_client = Google::Apis::IamcredentialsV1::IAMCredentialsService.new
# Get the environment configured authorization
iam_client.authorization = bucket.service.credentials.client
# Only defined when using a service account
issuer = iam_client.authorization.issuer
signer = lambda do |string_to_sign|
request = Google::Apis::IamcredentialsV1::SignBlobRequest.new(
payload: string_to_sign
)
resource = "projects/-/serviceAccounts/#{issuer}"
response = iam_client.sign_service_account_blob resource, request
response.signed_blob
end
five_min_from_now = 5 * 60
url = bucket.signed_url file.name,
method: "GET",
expires: five_min_from_now,
version: :v4,
issuer: issuer,
signer: signer
uri = URI url
http = Net::HTTP.new uri.host, uri.port
http.use_ssl = true
http.ca_file ||= ENV["SSL_CERT_FILE"] if ENV["SSL_CERT_FILE"]
resp = http.get uri.request_uri
_(resp.code).must_equal "200"
Tempfile.open ["google-cloud", ".png"] do |tmpfile|
tmpfile.binmode
tmpfile.write resp.body
_(tmpfile.size).must_equal local_file.size
_(File.read(local_file.path, mode: "rb")).must_equal File.read(tmpfile.path, mode: "rb")
end
end
it "should create a signed read url to list objects with version v4" do
local_file = File.new files[:logo][:path]
file = bucket.create_file local_file, "CloudLogoSignedUrlGetBucket.png"
five_min_from_now = 5 * 60
url = bucket.signed_url method: "GET", expires: five_min_from_now, version: :v4
uri = URI url
_(uri.path).must_equal "/#{bucket_name}"
http = Net::HTTP.new uri.host, uri.port
http.use_ssl = true
http.ca_file ||= ENV["SSL_CERT_FILE"] if ENV["SSL_CERT_FILE"]
resp = http.get uri.request_uri
_(resp.code).must_equal "200"
_(resp.body).must_match "CloudLogoSignedUrlGetBucket.png" # in XML
end
it "should create a signed POST url version v4" do
five_min_from_now = 60 * 60
url = bucket.signed_url "CloudLogoBucketSignedUrlPost.png",
method: "POST",
expires: five_min_from_now,
headers: { "x-goog-resumable" => "start"},
version: :v4
uri = URI url
https = Net::HTTP.new uri.host,uri.port
https.use_ssl = true
req = Net::HTTP::Post.new url, { "x-goog-resumable" => "start" }
req.body = "abc123"
resp = https.request(req)
_(resp.message).must_equal "Created"
_(resp.code).must_equal "201"
end
end
describe Google::Cloud::Storage::File, :signed_url do
it "should create a signed read url version v4 with space in file name" do
local_file = File.new files[:logo][:path]
file = bucket.create_file local_file, "CloudLogoSignedUrl GetFile.png"
five_min_from_now = 5 * 60
url = file.signed_url method: "GET",
expires: five_min_from_now, version: :v4
uri = URI url
http = Net::HTTP.new uri.host, uri.port
http.use_ssl = true
http.ca_file ||= ENV["SSL_CERT_FILE"] if ENV["SSL_CERT_FILE"]
resp = http.get uri.request_uri
_(resp.code).must_equal "200"
Tempfile.open ["google-cloud", ".png"] do |tmpfile|
tmpfile.binmode
tmpfile.write resp.body
_(tmpfile.size).must_equal local_file.size
_(File.read(local_file.path, mode: "rb")).must_equal File.read(tmpfile.path, mode: "rb")
end
end
it "should create a signed POST url version v4 with space in file name" do
five_min_from_now = 60 * 60
file = bucket.file "CloudLogoFileSignedUrl Post.png", skip_lookup: true
url = file.signed_url method: "POST",
expires: five_min_from_now,
headers: { "x-goog-resumable" => "start"},
version: :v4
uri = URI url
https = Net::HTTP.new uri.host,uri.port
https.use_ssl = true
req = Net::HTTP::Post.new url, { "x-goog-resumable" => "start" }
req.body = "abc123"
resp = https.request(req)
_(resp.message).must_equal "Created"
_(resp.code).must_equal "201"
end
end
end
|
import { InputElement } from "./inputElement";
export class InputTextElement extends InputElement {
constructor(attributes) {
super({...attributes, type: 'text'}, 'input');
}
}
|
<?hh // strict
namespace NS_mixed;
class C {
const mixed THING = 'abc';
private mixed $prop = true;
public function setProp(mixed $val): void {
$this->prop = $val;
}
public function getProp(): mixed {
return $this->prop;
}
}
|
/* Copyright 2014 Ooyala, Inc. All rights reserved.
*
* This file is licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*/
package common
import (
"crypto/rand"
"io"
)
var randomChars = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")
// NOTE[jigish]: yes, i know this has modulo bias. i don't care. we don't need a truly random string, just
// one that won't collide often.
func CreateRandomID(size int) string {
randomBytes := make([]byte, size)
randomCharsLen := byte(len(randomChars))
// ignore error here because manas said so. randomBytes is static so if there was an error here we'd be
// completely screwed anyways.
io.ReadFull(rand.Reader, randomBytes)
for i, b := range randomBytes {
randomBytes[i] = randomChars[b%randomCharsLen]
}
return string(randomBytes)
}
|
/* (C) Copr. 1986-92 Numerical Recipes Software ?421.1-9. */
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "psrgeom.h"
void nrerror(char *error_text)
{
fprintf(stderr,"Numerical recipes run time error...\r\n");
fprintf(stderr,"%s\r\n",error_text);
fprintf(stderr,"... now exiting to system\r\n");
exit(1);
}
double chebev(double a, double b, double c[], int m, double x)
{
double d=0.0,dd=0.0,sv,y,y2;
int j;
if ((x-a)*(x-b) > 0.0) nrerror("x not in range in routine CHEBEV");
y2=2.0*(y=(2.0*x-a-b)/(b-a));
for (j=m-1;j>=1;j--) {
sv=d;
d=y2*d-dd+c[j];
dd=sv;
}
return y*d-dd+0.5*c[0];
}
#define NUSE1 5
#define NUSE2 5
void beschb(double x, double *gam1, double *gam2, double *gampl, double *gammi)
{
double xx;
static double c1[] = {
-1.142022680371168e0,6.5165112670737e-3,
3.087090173086e-4,-3.4706269649e-6,6.9437664e-9,
3.67795e-11,-1.356e-13};
static double c2[] = {
1.843740587300905e0,-7.68528408447867e-2,
1.2719271366546e-3,-4.9717367042e-6,-3.31261198e-8,
2.423096e-10,-1.702e-13,-1.49e-15};
xx=8.0*x*x-1.0;
*gam1=chebev(-1.0,1.0,c1,NUSE1,xx);
*gam2=chebev(-1.0,1.0,c2,NUSE2,xx);
*gampl= *gam2-x*(*gam1);
*gammi= *gam2+x*(*gam1);
}
#undef NUSE1
#undef NUSE2
#define EPS 1.0e-16
#define FPMIN 1.0e-30
#define MAXIT 10000
#define XMIN 2.0
//#define PI 3.141592653589793 /* Already defined elsewhere in PSRGEOM */
void bessik(double x, double xnu, double *ri, double *rk, double *rip, double *rkp)
/* I've made a few minor changes: testing the output pointers, and only
* writing to them if they are not NULL.
*/
{
int i,l,nl;
double a,a1,b,c,d,del,del1,delh,dels,e,f,fact,fact2,ff,gam1,gam2,
gammi,gampl,h,p,pimu,q,q1,q2,qnew,ril,ril1,rimu,rip1,ripl,
ritemp,rk1,rkmu,rkmup,rktemp,s,sum,sum1,x2,xi,xi2,xmu,xmu2;
if (x <= 0.0 || xnu < 0.0) nrerror("bad arguments in bessik");
nl=(int)(xnu+0.5);
xmu=xnu-nl;
xmu2=xmu*xmu;
xi=1.0/x;
xi2=2.0*xi;
h=xnu*xi;
if (h < FPMIN) h=FPMIN;
b=xi2*xnu;
d=0.0;
c=h;
for (i=1;i<=MAXIT;i++) {
b += xi2;
d=1.0/(b+d);
c=b+1.0/c;
del=c*d;
h=del*h;
if (fabs(del-1.0) < EPS) break;
}
if (i > MAXIT) nrerror("x too large in bessik; try asymptotic expansion");
ril=FPMIN;
ripl=h*ril;
ril1=ril;
rip1=ripl;
fact=xnu*xi;
for (l=nl;l>=1;l--) {
ritemp=fact*ril+ripl;
fact -= xi;
ripl=fact*ritemp+ril;
ril=ritemp;
}
f=ripl/ril;
if (x < XMIN) {
x2=0.5*x;
pimu=PI*xmu;
fact = (fabs(pimu) < EPS ? 1.0 : pimu/sin(pimu));
d = -log(x2);
e=xmu*d;
fact2 = (fabs(e) < EPS ? 1.0 : sinh(e)/e);
beschb(xmu,&gam1,&gam2,&gampl,&gammi);
ff=fact*(gam1*cosh(e)+gam2*fact2*d);
sum=ff;
e=exp(e);
p=0.5*e/gampl;
q=0.5/(e*gammi);
c=1.0;
d=x2*x2;
sum1=p;
for (i=1;i<=MAXIT;i++) {
ff=(i*ff+p+q)/(i*i-xmu2);
c *= (d/i);
p /= (i-xmu);
q /= (i+xmu);
del=c*ff;
sum += del;
del1=c*(p-i*ff);
sum1 += del1;
if (fabs(del) < fabs(sum)*EPS) break;
}
if (i > MAXIT) nrerror("bessk series failed to converge");
rkmu=sum;
rk1=sum1*xi2;
} else {
b=2.0*(1.0+x);
d=1.0/b;
h=delh=d;
q1=0.0;
q2=1.0;
a1=0.25-xmu2;
q=c=a1;
a = -a1;
s=1.0+q*delh;
for (i=2;i<=MAXIT;i++) {
a -= 2*(i-1);
c = -a*c/i;
qnew=(q1-b*q2)/a;
q1=q2;
q2=qnew;
q += c*qnew;
b += 2.0;
d=1.0/(b+a*d);
delh=(b*d-1.0)*delh;
h += delh;
dels=q*delh;
s += dels;
if (fabs(dels/s) < EPS) break;
}
if (i > MAXIT) nrerror("bessik: failure to converge in cf2");
h=a1*h;
rkmu=sqrt(PI/(2.0*x))*exp(-x)/s;
rk1=rkmu*(xmu+x+0.5-h)*xi;
}
rkmup=xmu*xi*rkmu-rk1;
rimu=xi/(f*rkmu-rkmup);
if (ri != NULL)
*ri=(rimu*ril1)/ril;
if (rip != NULL)
*rip=(rimu*rip1)/ril;
for (i=1;i<=nl;i++) {
rktemp=(xmu+i)*xi2*rk1+rkmu;
rkmu=rk1;
rk1=rktemp;
}
if (rk != NULL)
*rk=rkmu;
if (rkp != NULL)
*rkp=xnu*xi*rkmu-rk1;
}
#undef EPS
#undef FPMIN
#undef MAXIT
#undef XMIN
|
using Archiving.Core.Operation;
using Archiving.Entity;
using Archiving.Operation;
using Archiving.Operation.FileExt.Core;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Archiving.Core.Options;
namespace Archiving.Operation.FileExt
{
public class DeleteFileOperation : IObserverOperation
{
private readonly IFileTask _man;
public DeleteFileOperation(
IFileTask man,
OperationOptions options)
{
_man = NamedNullException.Assert(man, nameof(man));
NamedNullException.Assert(options, nameof(options));
NotTrueException.Assert(options.DoDelete, nameof(options.DoDelete));
}
public void Dispose() { }
public bool LockResource => true;
public int Priority => PriorityDefinationRefTable.FileDelete;
public void Handle(FileEntity entity)
{
_man.Delete(entity.FullPath);
entity.LockBy(this);
}
}
}
|
TestBattle:
ret
.loop
call GBPalNormal
; Don't mess around
; with obedience.
ld a, %10000000 ; EARTHBADGE
ld [wObtainedBadges], a
ld hl, wFlags_D733
set BIT_TEST_BATTLE, [hl]
; Reset the party.
ld hl, wPartyCount
xor a
ld [hli], a
dec a
ld [hl], a
; Give the player a
; level 20 Rhydon.
ld a, RHYDON
ld [wcf91], a
ld a, 20
ld [wCurEnemyLVL], a
xor a
ld [wMonDataLocation], a
ld [wCurMap], a
call AddPartyMon
; Fight against a
; level 20 Rhydon.
ld a, RHYDON
ld [wCurOpponent], a
predef InitOpponent
; When the battle ends,
; do it all again.
ld a, 1
ld [wUpdateSpritesEnabled], a
ld [H_AUTOBGTRANSFERENABLED], a
jr .loop
|
-- Unused file
CREATE TABLE RESERVATION (
ID BIGINT PRIMARY KEY AUTO_INCREMENT NOT NULL,
RESERVATION_NAME VARCHAR(255) NOT NULL,
ADDED TIMESTAMP AS CURRENT_TIMESTAMP NOT NULL
);
|
package queue
type Tube struct {
name string
queue *Queue
reserved int
}
|
# 广告平台业务说明
广告平台使用 redis 存储用户点击广告数据,数据结构如下
```python
DeviceID: {
aid: {
"view": 10 //剩余访问次数(int)
"clicked": False, //是否点击过(boolean)
},
aid: {
"view": 10
"clicked": True,
},
...
}
```
数据一以 *key:value* 的形式存储于redis 中,key 是设备的id,value 为广告数组
## 存储数据量评估
### 缓存数据存储量(redis)
- 单条广告数据 **37 字节**
- 每个设备持有的广告数目商务决定,现阶段不超 **100**
- 日活跃用户 **500万**
- 数据只存储一天,即一天一清
总存储数据量 37*100*500万 = **1858MB**
### 持久化数据存储量(mongodb)
- 单条广告数据 **最大10k**
- 广告数据总数(当前8,预计不超过1000条)
## 数据访问量评估
### 广告请求规则
单个设备每日每次从接口最多取**20**条广告数据用以用户点击展示,当用户完成下列动作表示单条广告生命
周期结束:
- 用户点击完成
- 展示次数完成
当用户设备的所有广告生命周期结束,开始请求下一批广告
因此接口**访问频率**由以下因素决定
- 用户点击广告频率 (user_click_frequency)
- 广告展示频率 (ad_show_frequency)
- 总的广告数据量(由商务决定) (ad_count)
- 广告位数量 (ad_postion_count)
### 预计每个设备每日最大请求数:
- 单组广告生命周期 (user_click_frequency, ad_show_frequency)
- 用户活跃度
这俩个因素决定单个设备对广告的接口的每日访问频度
## 广告服务当前状态
- 单机tornado, 单进程服务
- 单点redis
- mongodb adesk01 集群
|
#!/bin/bash
echo " >> Assert that current usage of 50% is higher than maximum of 40%"
MOCK_SWAP_USAGE=50 MAX_ALLOWED_PERCENTAGE=40 ../checks/swap-usage-max-percent
if [[ $? != 1 ]]; then
echo " >> Test failed."
exit 1
fi
echo " >> Assert that swap is not used"
MOCK_SWAP_USAGE=0 MAX_ALLOWED_PERCENTAGE=0 ../checks/swap-usage-max-percent
if [[ $? != 0 ]]; then
echo " >> Test failed."
exit 1
fi
echo " >> Assert 10% is ok, when maximum is defined at 15%"
MOCK_SWAP_USAGE=10 MAX_ALLOWED_PERCENTAGE=15 ../checks/swap-usage-max-percent
if [[ $? != 0 ]]; then
echo " >> Test failed."
exit 1
fi
exit 0
|
#!/usr/bin/env bash
# Suspends computer and automatically waits it up at specified date/time parameter
# Source: http://askubuntu.com/questions/61708/automatically-sleep-and-wake-up-at-specific-times
#
# Takes a 24hour time HH:MM and date YYYY-MM-DD as its arguments
# Example:
# suspend_until 9:30
# suspend_until 18:45
# suspend_until 18:45 2016-02-05
# suspend_until 14:23 && echo "Finished"
function suspend_until() {
# Argument check
if [ $# -lt 1 -o $# -gt 2 ]; then
echo "Usage: sleep_until HH:MM "
echo "... or: sleep_until HH:MM YYYY-MM-DD"
exit
fi
NOW=$(date +%s)
if [ $# -eq 1 ]; then
# Check whether specified time is today or tomorrow
DESIRED="$(date +%s -d "$1")"
if [ $DESIRED -lt $NOW ]; then
DESIRED=$((`date +%s -d "$1"` + 24*60*60))
fi
else
DESIRED=$(date +%s -d "$1 $2")
fi
echo "Wakeup time: $(date -d @$DESIRED)"
# Kill rtcwake if already running
sudo killall rtcwake 2>/dev/null || true;
# Set RTC wakeup time
# N.B. change "mem" for the suspend option
# find this by "man rtcwake"
sudo rtcwake -l -m mem -t $DESIRED 1>/dev/null &
# feedback
echo "Suspending..."
# give rtcwake some time to make its stuff
sleep 2
# then suspend
# N.B. dont usually require this bit
#sudo pm-suspend
# Any commands you want to launch after wakeup can be placed here
# Remember: sudo may have expired by now
# Wake up with monitor enabled N.B. change "on" for "off" if
# you want the monitor to be disabled on wake
xset dpms force on
}
|
<?php
$result = '';
if ( !empty($_REQUEST['data']) ) {
$app = new PARSER_DOCX();
$result = $app->parse($_REQUEST['data']);
}
echo $result;
//
class PARSER_DOCX {
function __construct() {
}
/**
* Парсер
*
* @access public
*/
public function parse($data) {
$result = '';
if ( !empty($data) ) {
$matches = array();
preg_match('~/<h1>(.?*)</h1>~imsU', $data, $matches, PREG_OFFSET_CAPTURE);
print_r($matches);
}
return $result;
}
}
?>
|
!> 通过简单的粒子间距与光滑长度相匹配进行最近相邻粒子搜索的子程序。详见第 4 章 148 页。
!> subroutine to calculate the smoothing funciton for each particle and
!> the interaction parameters used by the sph algorithm. interaction
!> pairs are determined by directly comparing the particle distance
!> with the corresponding smoothing length.
!> see p.148 in chapter 4
subroutine direct_find(itimestep, ntotal, hsml, x, niac, pair_i, pair_j, w, dwdx, countiac)
use sph_kind, only: rk
use parameter
implicit none
!> 当前时间步
!> current time step
integer, intent(in) :: itimestep
!> 在模拟中所使用的粒子总数
!> number of particles in simulation
integer, intent(in) :: ntotal
!> 粒子的光滑长度
!> smoothing length
real(rk), intent(in) :: hsml(maxn)
!> 粒子的坐标
!> coordinates of all particles
real(rk), intent(in) :: x(dim, maxn)
!> 相互作用对的数目
!> number of interaction pairs
integer, intent(out) :: niac
!> 相互作用对的第一个粒子
!> first partner of interaction pair
integer, intent(out) :: pair_i(max_interaction)
!> 相互作用对的第二个粒子
!> second partner of interaction pair
integer, intent(out) :: pair_j(max_interaction)
!> 给定相互作用对的光滑核函数
!> kernel for all interaction pairs
real(rk), intent(out) :: w(max_interaction)
!> 核函数对 x, y, z 的导数
!> derivative of kernel with respect to x, y and z
real(rk), intent(out) :: dwdx(dim, max_interaction)
!> 相互作用对的数目
!> number of neighboring particles
integer, intent(out) :: countiac(ntotal)
integer :: i, j, d, sumiac, maxiac, miniac, noiac, maxp, minp, scale_k
real(rk) :: dxiac(dim), driac, r, mhsml, tdwdx(dim)
!> smoothing kernel function
!> skf = 1, cubic spline kernel by w4 - spline (monaghan 1985)
!> = 2, gauss kernel (gingold and monaghan 1981)
!> = 3, quintic kernel (morris 1997)
if (skf == 1) then; scale_k = 2
else if (skf == 2) then; scale_k = 3
else if (skf == 3) then; scale_k = 3
end if
countiac(1:ntotal) = 0
niac = 0
do i = 1, ntotal - 1
do j = i + 1, ntotal
dxiac(1) = x(1, i) - x(1, j)
driac = dxiac(1)*dxiac(1)
do d = 2, dim
dxiac(d) = x(d, i) - x(d, j)
driac = driac + dxiac(d)*dxiac(d)
end do
mhsml = (hsml(i) + hsml(j))/2.0_rk
if (sqrt(driac) < scale_k*mhsml) then
if (niac < max_interaction) then
!> neighboring pair list, and totalinteraction number and
!> the interaction number for each particle
niac = niac + 1
pair_i(niac) = i
pair_j(niac) = j
r = sqrt(driac)
countiac(i) = countiac(i) + 1
countiac(j) = countiac(j) + 1
!> kernel and derivations of kernel
call kernel(r, dxiac, mhsml, w(niac), tdwdx)
do d = 1, dim
dwdx(d, niac) = tdwdx(d)
end do
else
error stop ' >>> error <<< : too many interactions'
end if
end if
end do
end do
!> statistics for the interaction
sumiac = 0
maxiac = 0
miniac = 1000
noiac = 0
do i = 1, ntotal
sumiac = sumiac + countiac(i)
if (countiac(i) > maxiac) then
maxiac = countiac(i)
maxp = i
end if
if (countiac(i) < miniac) then
miniac = countiac(i)
minp = i
end if
if (countiac(i) == 0) noiac = noiac + 1
end do
if (mod(itimestep, print_step) == 0) then
if (int_stat) then
print *, ' >> statistics: interactions per particle:'
print 100, '**** particle: ', maxp, ' maximal interactions: ', maxiac
print 100, '**** particle: ', minp, ' minimal interactions: ', miniac
print 101, '**** average : ', real(sumiac)/real(ntotal)
print 100, '**** total pairs : ', niac
print 100, '**** particles with no interactions: ', noiac
end if
end if
101 format(1x,*(a,g0.2))
100 format(1x,*(a,i0))
end subroutine direct_find
|
module AmaLayout
class BreadcrumbBuilder < BreadcrumbsOnRails::Breadcrumbs::Builder
def render
@elements.map { |e| render_element(e) }.join(@options[:separator])
end
private
def render_element(element)
name = compute_name(element)
path = element.path && compute_path(element) || '#'
render_list_element(name, path, element)
end
def render_list_element(name, path, element)
if element.options.delete(:disabled)
@context.content_tag :li, @context.link_to(name, '#', class: 'breadcrumbs__link--disabled', rel: 'nofollow')
else
@context.content_tag :li, @context.link_to(name, path, element.options)
end
end
end
end
|
library stamp_image;
import 'dart:io';
import 'dart:typed_data';
import 'dart:ui';
import 'package:flutter/material.dart';
import 'package:flutter/rendering.dart';
import 'package:path_provider/path_provider.dart';
class StampImage {
///Create watermark to an existing image file [image] and custom Widget as the watermark item.
///You can customize the position using alignment
static void create({
required BuildContext context,
required File image,
required List<Widget> children,
bool? saveFile = false,
String? savePath,
required Function(File) onSuccess,
}) async {
OverlayState? overlayState = Overlay.of(context, rootOverlay: true);
OverlayEntry? entry;
/// Wait until initial entries available
while (true) {
if (overlayState!.widget.initialEntries.length > 0) {
break;
}
}
OverlayEntry? lastEntry = overlayState.widget.initialEntries.first;
entry = OverlayEntry(
builder: (context) {
return StampWidget(
image: image,
children: children,
onSuccess: (file) => onSuccess(file),
);
},
);
///Set root overlay to top and watermark entry to below,
///we need this because we want to invisible the watermark entry
overlayState.insert(entry, above: lastEntry);
}
}
class StampWidget extends StatefulWidget {
final List<Widget> children;
final File? image;
final bool? saveFile;
final String? savePath;
final Function(File) onSuccess;
StampWidget({
required this.children,
required this.image,
this.saveFile,
this.savePath,
required this.onSuccess,
});
@override
_StampWidgetState createState() => _StampWidgetState();
}
class _StampWidgetState extends State<StampWidget> {
///Global frame key
final frameKey = GlobalKey();
///Set widget from RepaintBoundary into uint8List
///and convert into File
Future showResult() async {
Uint8List? currentFrame = await getUint8List(frameKey);
Directory? dir = Platform.isAndroid
? await getExternalStorageDirectory()
: await getApplicationDocumentsDirectory();
String? path = dir?.path;
final file = createFile(
'$path/stamp_image_${DateTime.now().toString()}.png',
currentFrame,
);
///When user want to use saveFile, then
///it will saved to selected path location
if (widget.saveFile == true && widget.savePath != null) {
createFile(widget.savePath, currentFrame);
}
widget.onSuccess(file);
}
File createFile(String? path, Uint8List? data) {
final file = File(path!);
file.create();
file.writeAsBytesSync(data!);
return file;
}
///Converting Widget to PNG
Future<Uint8List?> getUint8List(GlobalKey widgetKey) async {
await Future.delayed(Duration(milliseconds: 500));
RenderObject? renderObject = widgetKey.currentContext?.findRenderObject();
RenderRepaintBoundary boundary = renderObject as RenderRepaintBoundary;
var image = await boundary.toImage(pixelRatio: 5.0);
ByteData? byteData = await (image.toByteData(
format: ImageByteFormat.png,
));
return byteData?.buffer.asUint8List();
}
///Generating list of children for watermark item
List<Widget> generateWidget() => widget.children.map((e) => e).toList();
@override
void initState() {
super.initState();
this.showResult();
}
@override
Widget build(BuildContext context) {
return Scaffold(
body: RepaintBoundary(
key: frameKey,
child: Stack(
children: [
Image.file(
widget.image!,
fit: BoxFit.cover,
),
...generateWidget()
],
),
),
);
}
}
|
if [ `git branch --show-current` != 'master' ]; then
echo 'you are not in master branch' >&2
exit -1
fi
yarn dist
rm -rf temp/*
mv dist temp/
mv bundles temp/
git checkout builds || exit -1
rm -rf bundles dist
cp -r temp/* ./
git checkout master README.md &&
git checkout master LICENSE.txt &&
git checkout master source &&
git checkout master inc-version.js &&
git checkout master package.json &&
git restore --staged . &&
node inc-version.js &&
git add . &&
if [ `git log -1 --pretty=%B` = `cat version.out` ]; then
git commit -C HEAD --amend
else
git commit -F version.out
fi &&
rm version.out &&
git push -f
git checkout .
git checkout master
rm -rf bundles dist
mv temp/* ./
|
import 'package:ui_bits/src/ui_bits_internal.dart';
class BitInputPasswordField extends StatefulWidget {
final FieldLabels messages;
final Field<String> field;
final BitAnimation animation;
const BitInputPasswordField(
this.messages, {
this.field,
this.animation = const BitNoAnimation(),
});
@override
_BitInputPasswordFieldState createState() => _BitInputPasswordFieldState();
}
class _BitInputPasswordFieldState extends State<BitInputPasswordField> {
var _obscureText = true;
@override
Widget build(BuildContext context) {
return widget.animation.wrapWidget(
child: TextFormField(
obscureText: _obscureText,
controller: widget.field?.controller,
decoration: InputDecoration(
labelText: widget.messages.label,
prefixIcon: BitInputFieldIcon(widget.messages.icon),
suffixIcon: _buildSuffixIcon(context),
),
),
);
}
Widget _buildSuffixIcon(BuildContext context) {
return GestureDetector(
onTap: () => setState(() => _obscureText = !_obscureText),
child: BitFadeInAnimationWidget(
duration: context.animation.short,
animateAfter: widget.animation.animateAfter,
child: BitToggleAnimation(
_obscureText,
BitInputFieldIcon(Icons.visibility),
BitInputFieldIcon(Icons.visibility_off),
),
),
);
}
}
|
use std::io::Cursor;
use rocket::request::Request;
use rocket::response::{Response, Responder};
use rocket::http::ContentType;
use rocket::http::Status;
#[derive(Debug)]
pub struct ApiResponder {
pub error: Status,
pub message: String
}
#[derive(Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
struct RespBody {
message: String
}
impl RespBody {
pub fn new(api_error: &ApiResponder) -> RespBody{
return RespBody { message: api_error.message.clone() }
}
}
impl<'a> Responder<'a> for ApiResponder {
fn respond_to(self, _request: &Request) -> Result<Response<'a>, Status> {
let resp_body = serde_json::to_string(&RespBody::new(&self)).unwrap_or("".to_string());
Response::build()
.header(ContentType::JSON)
.status(self.error)
.sized_body(Cursor::new(resp_body))
.ok()
}
}
pub fn handle_404<'r>(req: &'r rocket::Request) -> rocket::response::Result<'r> {
let api_responder = ApiResponder {error: Status::NotFound, message: "Not Found.".to_string()};
let resp_body = serde_json::to_string(&RespBody::new(&api_responder)).unwrap_or("".to_string());
let resp = rocket::response::Response::build()
.header(ContentType::JSON)
.status(Status::NotFound)
.sized_body(Cursor::new(resp_body))
.finalize();
resp.respond_to(req)
}
pub fn handle_403<'r>(req: &'r rocket::Request) -> rocket::response::Result<'r> {
let api_responder = ApiResponder {error: Status::NotFound, message: "Access Denied.".to_string()};
let resp_body = serde_json::to_string(&RespBody::new(&api_responder)).unwrap_or("".to_string());
let resp = rocket::response::Response::build()
.header(ContentType::JSON)
.status(Status::Forbidden)
.sized_body(Cursor::new(resp_body))
.finalize();
resp.respond_to(req)
}
pub fn handle_401<'r>(req: &'r rocket::Request) -> rocket::response::Result<'r> {
let api_responder = ApiResponder {error: Status::NotFound, message: "Not Authorized.".to_string()};
let resp_body = serde_json::to_string(&RespBody::new(&api_responder)).unwrap_or("".to_string());
let resp = rocket::response::Response::build()
.header(ContentType::JSON)
.status(Status::Unauthorized)
.sized_body(Cursor::new(resp_body))
.finalize();
resp.respond_to(req)
}
pub fn handle_500<'r>(req: &'r rocket::Request) -> rocket::response::Result<'r> {
let api_responder = ApiResponder {error: Status::NotFound, message: "Server Error.".to_string()};
let resp_body = serde_json::to_string(&RespBody::new(&api_responder)).unwrap_or("".to_string());
let resp = rocket::response::Response::build()
.header(ContentType::JSON)
.status(Status::Unauthorized)
.sized_body(Cursor::new(resp_body))
.finalize();
resp.respond_to(req)
}
|
var modalOpen = false;
var currentModal;
// Open the Modal
function openModal(carLightbox) {
modalOpen = true;
document.getElementById('lightBoxModal').style.display = "block";
document.body.style.overflow = "hidden";
document.getElementById("mainNav").style.display = "none";
$(".table-large")[0].style.overflow = "hidden";
currentModal = carLightbox;
document.getElementById(currentModal).style.display = "block";
}
// Close the Modal
function closeModal() {
modalOpen = false;
document.getElementById('lightBoxModal').style.display = "none";
document.body.style.overflow = "auto";
document.getElementById("mainNav").style.display = "initial";
$(".table-large")[0].style.overflow = "auto";
// hide last open carmodal
document.getElementById(currentModal).style.display = "none";
}
// var slideIndex = 1;
// showSlides(slideIndex);
// Next/previous controls
function plusSlides(n) {
showSlides(slideIndex += n);
}
// Thumbnail image controls
function currentSlide(n) {
showSlides(slideIndex = n);
}
function print(args) {
console.log(args);
}
function showSlides(n) {
var i;
var curr_modal = document.getElementById(currentModal);
var curr_imgs = curr_modal.getElementsByClassName("slideImgs")[0].children;
var curr_slide = curr_modal.getElementsByClassName('slide')[0];
var curr_thumbs = curr_modal.getElementsByClassName("thumbnails");
if (n > curr_imgs.length) {
slideIndex = 1
}
if (n < 1) {
slideIndex = curr_imgs.length
}
// hide all slides and thumbnails
for (i = 0; i < curr_thumbs.length; i++) {
curr_thumbs[i].className = curr_thumbs[i].className.replace(" active", "");
}
// show current slide
curr_slide.children[0].src = curr_imgs[slideIndex - 1].src;
// curr_slide.children[0].style.display = "block";
curr_thumbs[slideIndex - 1].className += " active";
}
// close modal if it's open
// check for clicks outside image
$("#lightBoxModal").on("click", function (e) {
if (e.target === this) {
closeModal();
}
});
// escape is pressed
$(document).on("keydown", function (e) {
switch (e.keyCode) {
case 27: // esc
closeModal();
break;
case 37: // left
plusSlides(-1);
break;
case 39: // right
plusSlides(1);
break;
default:
}
});
|
-- -----------------------------------------------------------------------------
--
-- DFA.hs, part of Alex
--
-- (c) Chris Dornan 1995-2000, Simon Marlow 2003
--
-- This module generates a DFA from a scanner by first converting it
-- to an NFA and then converting the NFA with the subset construction.
--
-- See the chapter on `Finite Automata and Lexical Analysis' in the
-- dragon book for an excellent overview of the algorithms in this
-- module.
--
-- ----------------------------------------------------------------------------}
module DFA(scanner2dfa) where
import AbsSyn
import qualified Map
import qualified Data.IntMap as IntMap
import NFA
import Sort ( msort, nub' )
import CharSet
import Data.Array ( (!) )
import Data.Maybe ( fromJust )
{- Defined in the Scan Module
-- (This section should logically belong to the DFA module but it has been
-- placed here to make this module self-contained.)
--
-- `DFA' provides an alternative to `Scanner' (described in the RExp module);
-- it can be used directly to scan text efficiently. Additionally it has an
-- extra place holder for holding action functions for generating
-- application-specific tokens. When this place holder is not being used, the
-- unit type will be used.
--
-- Each state in the automaton consist of a list of `Accept' values, descending
-- in priority, and an array mapping characters to new states. As the array
-- may only cover a sub-range of the characters, a default state number is
-- given in the third field. By convention, all transitions to the -1 state
-- represent invalid transitions.
--
-- A list of accept states is provided for as the original specification may
-- have been ambiguous, in which case the highest priority token should be
-- taken (the one appearing earliest in the specification); this can not be
-- calculated when the DFA is generated in all cases as some of the tokens may
-- be associated with leading or trailing context or start codes.
--
-- `scan_token' (see above) can deal with unconditional accept states more
-- efficiently than those associated with context; to save it testing each time
-- whether the list of accept states contains an unconditional state, the flag
-- in the first field of `St' is set to true whenever the list contains an
-- unconditional state.
--
-- The `Accept' structure contains the priority of the token being accepted
-- (lower numbers => higher priorities), the name of the token, a place holder
-- that can be used for storing the `action' function for constructing the
-- token from the input text and thge scanner's state, a list of start codes
-- (listing the start codes that the scanner must be in for the token to be
-- accepted; empty => no restriction), the leading and trailing context (both
-- `Nothing' if there is none).
--
-- The leading context consists simply of a character predicate that will
-- return true if the last character read is acceptable. The trailing context
-- consists of an alternative starting state within the DFA; if this `sub-dfa'
-- turns up any accepting state when applied to the residual input then the
-- trailing context is acceptable (see `scan_token' above).
type DFA a = Array SNum (State a)
type SNum = Int
data State a = St Bool [Accept a] SNum (Array Char SNum)
data Accept a = Acc Int String a [StartCode] (MB(Char->Bool)) (MB SNum)
type StartCode = Int
-}
-- Scanners are converted to DFAs by converting them to NFAs first. Converting
-- an NFA to a DFA works by identifying the states of the DFA with subsets of
-- the NFA. The PartDFA is used to construct the DFA; it is essentially a DFA
-- in which the states are represented directly by state sets of the NFA.
-- `nfa2pdfa' constructs the partial DFA from the NFA by searching for all the
-- transitions from a given list of state sets, initially containing the start
-- state of the partial DFA, until all possible state sets have been considered
-- The final DFA is then constructed with a `mk_dfa'.
scanner2dfa:: Encoding -> Scanner -> [StartCode] -> DFA SNum Code
scanner2dfa enc scanner scs = nfa2dfa scs (scanner2nfa enc scanner scs)
nfa2dfa:: [StartCode] -> NFA -> DFA SNum Code
nfa2dfa scs nfa = mk_int_dfa nfa (nfa2pdfa nfa pdfa (dfa_start_states pdfa))
where
pdfa = new_pdfa n_starts nfa
n_starts = length scs -- number of start states
-- `nfa2pdfa' works by taking the next outstanding state set to be considered
-- and and ignoring it if the state is already in the partial DFA, otherwise
-- generating all possible transitions from it, adding the new state to the
-- partial DFA and continuing the closure with the extra states. Note the way
-- it incorporates the trailing context references into the search (by
-- including `rctx_ss' in the search).
nfa2pdfa:: NFA -> DFA StateSet Code -> [StateSet] -> DFA StateSet Code
nfa2pdfa _ pdfa [] = pdfa
nfa2pdfa nfa pdfa (ss:umkd)
| ss `in_pdfa` pdfa = nfa2pdfa nfa pdfa umkd
| otherwise = nfa2pdfa nfa pdfa' umkd'
where
pdfa' = add_pdfa ss (State accs (IntMap.fromList ss_outs)) pdfa
umkd' = rctx_sss ++ map snd ss_outs ++ umkd
-- for each character, the set of states that character would take
-- us to from the current set of states in the NFA.
ss_outs :: [(Int, StateSet)]
ss_outs = [ (fromIntegral ch, mk_ss nfa ss')
| ch <- byteSetElems $ setUnions [p | (p,_) <- outs],
let ss' = [ s' | (p,s') <- outs, byteSetElem p ch ],
not (null ss')
]
rctx_sss = [ mk_ss nfa [s]
| Acc _ _ _ (RightContextRExp s) <- accs ]
outs :: [(ByteSet,SNum)]
outs = [ out | s <- ss, out <- nst_outs (nfa!s) ]
accs = sort_accs [acc| s<-ss, acc<-nst_accs (nfa!s)]
-- `sort_accs' sorts a list of accept values into decending order of priority,
-- eliminating any elements that follow an unconditional accept value.
sort_accs:: [Accept a] -> [Accept a]
sort_accs accs = foldr chk [] (msort le accs)
where
chk acc@(Acc _ _ Nothing NoRightContext) _ = [acc]
chk acc rst = acc:rst
le (Acc{accPrio = n}) (Acc{accPrio=n'}) = n<=n'
{------------------------------------------------------------------------------
State Sets and Partial DFAs
------------------------------------------------------------------------------}
-- A `PartDFA' is a partially constructed DFA in which the states are
-- represented by sets of states of the original NFA. It is represented by a
-- triple consisting of the start state of the partial DFA, the NFA from which
-- it is derived and a map from state sets to states of the partial DFA. The
-- state set for a given list of NFA states is calculated by taking the epsilon
-- closure of all the states, sorting the result with duplicates eliminated.
type StateSet = [SNum]
new_pdfa:: Int -> NFA -> DFA StateSet a
new_pdfa starts nfa
= DFA { dfa_start_states = start_ss,
dfa_states = Map.empty
}
where
start_ss = [ msort (<=) (nst_cl(nfa!n)) | n <- [0..(starts-1)]]
-- starts is the number of start states
-- constructs the epsilon-closure of a set of NFA states
mk_ss:: NFA -> [SNum] -> StateSet
mk_ss nfa l = nub' (<=) [s'| s<-l, s'<-nst_cl(nfa!s)]
add_pdfa:: StateSet -> State StateSet a -> DFA StateSet a -> DFA StateSet a
add_pdfa ss pst (DFA st mp) = DFA st (Map.insert ss pst mp)
in_pdfa:: StateSet -> DFA StateSet a -> Bool
in_pdfa ss (DFA _ mp) = ss `Map.member` mp
-- Construct a DFA with numbered states, from a DFA whose states are
-- sets of states from the original NFA.
mk_int_dfa:: NFA -> DFA StateSet a -> DFA SNum a
mk_int_dfa nfa (DFA start_states mp)
= DFA [0 .. length start_states-1]
(Map.fromList [ (lookup' st, cnv pds) | (st, pds) <- Map.toAscList mp ])
where
mp' = Map.fromList (zip (start_states ++
(map fst . Map.toAscList) (foldr Map.delete mp start_states)) [0..])
lookup' = fromJust . flip Map.lookup mp'
cnv :: State StateSet a -> State SNum a
cnv (State accs as) = State accs' as'
where
as' = IntMap.mapWithKey (\_ch s -> lookup' s) as
accs' = map cnv_acc accs
cnv_acc (Acc p a lctx rctx) = Acc p a lctx rctx'
where rctx' =
case rctx of
RightContextRExp s ->
RightContextRExp (lookup' (mk_ss nfa [s]))
other -> other
{-
-- `mk_st' constructs a state node from the list of accept values and a list of
-- transitions. The transitions list all the valid transitions out of the
-- node; all invalid transitions should be represented in the array by state
-- -1. `mk_st' has to work out whether the accept states contain an
-- unconditional entry, in which case the first field of `St' should be true,
-- and which default state to use in constructing the array (the array may span
-- a sub-range of the character set, the state number given the third argument
-- of `St' being taken as the default if an input character lies outside the
-- range). The default values is chosen to minimise the bounds of the array
-- and so there are two candidates: the value that 0 maps to (in which case
-- some initial segment of the array may be omitted) or the value that 255 maps
-- to (in which case a final segment of the array may be omitted), hence the
-- calculation of `(df,bds)'.
--
-- Note that empty arrays are avoided as they can cause severe problems for
-- some popular Haskell compilers.
mk_st:: [Accept Code] -> [(Char,Int)] -> State Code
mk_st accs as =
if null as
then St accs (-1) (listArray ('0','0') [-1])
else St accs df (listArray bds [arr!c| c<-range bds])
where
bds = if sz==0 then ('0','0') else bds0
(sz,df,bds0) | sz1 < sz2 = (sz1,df1,bds1)
| otherwise = (sz2,df2,bds2)
(sz1,df1,bds1) = mk_bds(arr!chr 0)
(sz2,df2,bds2) = mk_bds(arr!chr 255)
mk_bds df = (t-b, df, (chr b, chr (255-t)))
where
b = length (takeWhile id [arr!c==df| c<-['\0'..'\xff']])
t = length (takeWhile id [arr!c==df| c<-['\xff','\xfe'..'\0']])
arr = listArray ('\0','\xff') (take 256 (repeat (-1))) // as
-}
|
package com.kenny.tripscout.ui.city
import android.os.Bundle
import androidx.fragment.app.Fragment
import android.view.LayoutInflater
import android.view.View
import android.view.ViewGroup
import androidx.core.view.children
import androidx.lifecycle.Observer
import androidx.navigation.fragment.findNavController
import androidx.viewpager2.adapter.FragmentStateAdapter
import com.bumptech.glide.Glide
import com.google.android.material.tabs.TabItem
import com.google.android.material.tabs.TabLayout
import com.google.android.material.tabs.TabLayoutMediator
import com.kenny.tripscout.R
import com.kenny.tripscout.dataC.cityDestList
import com.kenny.tripscout.dataC.destKey
import com.kenny.tripscout.databinding.FragmentCityBinding
import com.kenny.tripscout.ui.city.tab.TabFragment
import com.kenny.tripscout.ui.home.personalize.bucket.BucketListFragment
import com.kenny.tripscout.ui.home.personalize.topic.TopicFragment
import com.kenny.tripscout.ui.trip.TripFragment
import org.koin.androidx.viewmodel.ext.android.viewModel
import org.koin.core.parameter.parametersOf
import timber.log.Timber
class CityFragment : Fragment() {
private lateinit var binding:FragmentCityBinding
private val viewModel by viewModel<CityViewModel>{
// parametersOf(arguments?.getString(destKey) ?: "Netherlands/Amsterdam")
parametersOf("Netherlands/Amsterdam")
}
private var dest = "Netherlands/Amsterdam"
override fun onCreateView(
inflater: LayoutInflater, container: ViewGroup?,
savedInstanceState: Bundle?
): View? {
// Inflate the layout for this fragment
return inflater.inflate(R.layout.fragment_city, container, false)
}
override fun onViewCreated(view: View, savedInstanceState: Bundle?) {
binding = FragmentCityBinding.bind(view)
initUI()
initObserver()
}
private fun initObserver() {
viewModel.bannerData.observe(viewLifecycleOwner, Observer {
Glide.with(requireContext())
.load(it.img)
.centerCrop()
.into(binding.cityBackImg)
})
}
private fun initUI() {
binding.cityAttractionMap.setOnClickListener {
findNavController().navigate(R.id.action_cityFragment_to_cityAttractionsMapFragment)
}
binding.cityPager.adapter = CityAdapter(this)
TabLayoutMediator(binding.cityTabLayout, binding.cityPager) { tab, position ->
tab.text = cityDestList[position]
}.attach()
}
inner class CityAdapter(fragment: Fragment) : FragmentStateAdapter(fragment) {
override fun getItemCount(): Int = cityDestList.size
override fun createFragment(position: Int): Fragment {
return TabFragment("/$dest", cityDestList[position])
}
}
}
|
setup ()
{
lib_path="$POSIT_DIR/../../lib/workshop"
. "$lib_path/common.sh"
. "$lib_path/dispatch.sh"
}
test_dispatch_with_empty_placeholder ()
{
expected_string="Called empty placeholder"
example () ( dispatch example "${@:-}" )
example_ () ( echo "$expected_string" )
OLDPS4="$PS4" # Prevent debugger from changing the output on MinGW
set +e # We care only about the output on this test
help_call="$(: | dispatch example)"
set -e
PS4="$OLDPS4"
[ "$expected_string" = "$help_call" ]
}
test_dispatch_with_call_placeholder ()
{
expected_string="Called empty command placeholder"
example () ( dispatch example "$@" )
example_call_ () ( echo "$expected_string $@" )
OLDPS4="$PS4" # Prevent debugger from changing the output on MinGW
set +e # We care only about the output on this test
help_call="$(: | dispatch example foaks)"
set -e
PS4="$OLDPS4"
[ "$expected_string example foaks" = "$help_call" ]
}
test_dispatch_command ()
{
expected_string="Called command"
example () ( dispatch example "$@" )
example_command_foo () ( echo "$expected_string $@")
OLDPS4="$PS4" # Prevent debugger from changing the output on MinGW
set +e # We care only about the output on this test
command_call="$(: | dispatch example foo bar baz)"
set -e
PS4="$OLDPS4"
[ "$expected_string bar baz" = "$command_call" ]
}
test_dispatch_option_short ()
{
expected_string="Called option"
example () ( dispatch example "$@" )
example_option_f () ( echo "$expected_string $@"; shift )
short_call="$(: | example -f bar baz)"
[ "$expected_string bar baz" = "$short_call" ]
}
test_dispatch_option_short_repassing ()
{
expected_string="Called!"
example () ( dispatch example "$@" )
example_command_foo () ( printf %s "Command $expected_string" )
example_option_f ()
{
printf %s "Option $expected_string $@"
dispatch example "$@"
}
short_repassing_call="$(: | example -f foo)"
[ "Option ${expected_string} fooCommand ${expected_string}" = "$short_repassing_call" ]
}
test_dispatch_option_long ()
{
expected_string="Called option"
example () ( dispatch example "$@" )
example_option_fanz () ( echo "$expected_string $@"; shift )
long_call="$(: | example --fanz bar baz)"
[ "$expected_string bar baz" = "$long_call" ]
}
test_dispatch_option_long_repassing ()
{
expected_string="Called!"
example () ( dispatch example "$@" )
example_command_foo () ( printf %s "Command $expected_string $@" )
example_option_fanz ()
{
printf %s "Option $expected_string $@"
dispatch example "$@"
}
short_repassing_call="$(: | example --fanz foo)"
[ "Option ${expected_string} fooCommand ${expected_string} " = "$short_repassing_call" ]
}
test_dispatch_option_long_with_equal_sign ()
{
expected_string="Called option"
example () ( dispatch example "$@" )
example_option_fanz () ( echo "$expected_string $@"; shift )
long_call="$(: | example --fanz=bar baz)"
[ "$expected_string bar baz" = "$long_call" ]
}
test_dispatch_option_long_with_equal_sign_and_quotes ()
{
expected_string="Called option"
example () ( dispatch example "$@" )
example_option_fanz () ( echo "$expected_string $@"; shift )
long_call="$(: | example --fanz="bar baz")"
[ "$expected_string bar baz " = "$long_call" ]
}
test_dispatch_option_long_with_equal_sign_quotes_and_equal_value ()
{
expected_string="Called option"
example () ( dispatch example "$@" )
example_option_fanz () ( echo "$expected_string $@"; shift )
long_call="$(: | example --fanz="bar=baz")"
[ "$expected_string bar=baz " = "$long_call" ]
}
test_dispatch_option_long_repassing_with_equal_sign ()
{
expected_string="Called!"
example () ( dispatch example "$@" )
example_command_foo () ( printf %s "Command $expected_string $@" )
example_option_fanz ()
{
printf %s "Option $expected_string $@"
shift
dispatch example "$@"
}
long_repassing_call="$(: | example --fanz="bla bar" foo)"
[ "Option ${expected_string} bla barfooCommand ${expected_string} " = "$long_repassing_call" ]
}
|
# Write Any File!
[![NPM version][npm-image]][npm-url]
[![Build Status][travis-image]][travis-url]
[![Test Coverage][cov-image]][cov-url]
[![Dependency Status][daviddm-image]][daviddm-url]
[![DevDependency Status][daviddm-image-dev]][daviddm-url-dev]
[![License][license-image]][license-url]
[![PR Welcome][pr-image]][pr-url]
Write to any file you want in any format.
## Design Philosophy
This is a handy tool for save user configuration files in user's preferred
format. Use with [Load Any File!](https://github.com/zhangkaiyulw/load-any-file)
for better coding experience.
## Installation
```bash
npm i write-any-file -s
```
## Usage
To write to a file asynchronously:
```js
await writeFile(data, '/home/john/config.json');
await writeFile(data, '/home/john/config.coffee');
await writeFile(data, '/home/john/config.yaml');
```
To write to a file synchronously:
```ts
writeFile.sync(data, '/home/john/config.json');
writeFile.sync(data, '/home/john/config.coffee');
writeFile.sync(data, '/home/john/config.yaml');
```
## API
### writeFile(data: any, location: string): Promise<void>;
Write `data` to `location` asynchronously.
### writeFile.sync(data: any, location: string): void;
Write `data` to `location` synchronously.
## Change Log
- 0.1.0 (2019-05-23)
- basic functionality
## License
MIT © [Zhang Kai Yu][license-url]
[npm-image]: https://badge.fury.io/js/write-any-file.svg
[npm-url]: https://npmjs.org/package/write-any-file
[travis-image]: https://travis-ci.org/zhangkaiyulw/write-any-file.svg?branch=master
[travis-url]: https://travis-ci.org/zhangkaiyulw/write-any-file
[cov-image]: https://codecov.io/gh/zhangkaiyulw/write-any-file/branch/master/graph/badge.svg
[cov-url]: https://codecov.io/gh/zhangkaiyulw/write-any-file
[daviddm-image]: https://david-dm.org/zhangkaiyulw/write-any-file.svg?theme=shields.io
[daviddm-url]: https://david-dm.org/zhangkaiyulw/write-any-file
[daviddm-image-dev]: https://david-dm.org/zhangkaiyulw/write-any-file/dev-status.svg
[daviddm-url-dev]: https://david-dm.org/zhangkaiyulw/write-any-file?type=dev
[license-image]: https://img.shields.io/github/license/zhangkaiyulw/write-any-file.svg
[license-url]: https://github.com/zhangkaiyulw/write-any-file/blob/master/LICENSE
[pr-image]: https://img.shields.io/badge/PRs-welcome-brightgreen.svg
[pr-url]: https://github.com/zhangkaiyulw/write-any-file/blob/master/CONTRIBUTING.md
|
using System.Collections.Generic;
namespace Fhi.HelseId.Web.Hpr.Core
{
public static partial class Kodekonstanter
{
public static OId9060 OId9060Ambulansearbeider = new OId9060("AA", "Ambulansearbeider");
public static OId9060 OId9060Apotektekniker = new OId9060("AT", "Apotektekniker");
public static OId9060 OId9060Audiograf = new OId9060("AU", "Audiograf");
public static OId9060 OId9060Bioingeniør = new OId9060("BI", "Bioingeniør");
public static OId9060 OId9060Ergoterapeut = new OId9060("ET", "Ergoterapeut");
public static OId9060 OId9060Provisorfarmasøyt = new OId9060("FA1", "Provisorfarmasøyt");
public static OId9060 OId9060Reseptarfarmasøyt = new OId9060("FA2", "Reseptarfarmasøyt");
public static OId9060 OId9060Fiskehelsebiolog = new OId9060("FB", "Fiskehelsebiolog");
public static OId9060 OId9060Fotterapeut = new OId9060("FO", "Fotterapeut");
public static OId9060 OId9060Fysioterapeut = new OId9060("FT", "Fysioterapeut");
public static OId9060 OId9060Helsesekretær = new OId9060("HE", "Helsesekretær");
public static OId9060 OId9060Helsefagarbeider = new OId9060("HF", "Helsefagarbeider");
public static OId9060 OId9060Hjelpepleier = new OId9060("HP", "Hjelpepleier");
public static OId9060 OId9060Jordmor = new OId9060("JO", "Jordmor");
public static OId9060 OId9060Klinisk_ernæringsfysiolog = new OId9060("KE", "Klinisk_ernæringsfysiolog");
public static OId9060 OId9060Kiropraktor = new OId9060("KI", "Kiropraktor");
public static OId9060 OId9060Lege = new OId9060("LE", "Lege");
public static OId9060 OId9060Omsorgsarbeider = new OId9060("OA", "Omsorgsarbeider");
public static OId9060 OId9060Ortopediingeniør = new OId9060("OI", "Ortopediingeniør");
public static OId9060 OId9060Optiker = new OId9060("OP", "Optiker");
public static OId9060 OId9060Ortoptist = new OId9060("OR", "Ortoptist");
public static OId9060 OId9060Perfusjonist = new OId9060("PE", "Perfusjonist");
public static OId9060 OId9060Psykolog = new OId9060("PS", "Psykolog");
public static OId9060 OId9060Radiograf = new OId9060("RA", "Radiograf");
public static OId9060 OId9060Sykepleier = new OId9060("SP", "Sykepleier");
public static OId9060 OId9060Tannhelsesekretær = new OId9060("TH", "Tannhelsesekretær");
public static OId9060 OId9060Tannlege = new OId9060("TL", "Tannlege");
public static OId9060 OId9060Tannpleier = new OId9060("TP", "Tannpleier");
public static OId9060 OId9060Tanntekniker = new OId9060("TT", "Tanntekniker");
public static OId9060 OId9060Veterinær = new OId9060("VE", "Veterinær");
public static OId9060 OId9060Vernepleier = new OId9060("VP", "Vernepleier");
public static OId9060 OId9060Ukjent_uspesifisert = new OId9060("XX", "Ukjent/uspesifisert");
public static List<OId9060> KodeList = new List<OId9060> {
OId9060Ambulansearbeider,
OId9060Apotektekniker,
OId9060Audiograf,
OId9060Bioingeniør,
OId9060Ergoterapeut,
OId9060Provisorfarmasøyt,
OId9060Reseptarfarmasøyt,
OId9060Fiskehelsebiolog,
OId9060Fotterapeut,
OId9060Fysioterapeut,
OId9060Helsesekretær,
OId9060Helsefagarbeider,
OId9060Hjelpepleier,
OId9060Jordmor,
OId9060Klinisk_ernæringsfysiolog,
OId9060Kiropraktor,
OId9060Lege,
OId9060Omsorgsarbeider,
OId9060Ortopediingeniør,
OId9060Optiker,
OId9060Ortoptist,
OId9060Perfusjonist,
OId9060Psykolog,
OId9060Radiograf,
OId9060Sykepleier,
OId9060Tannhelsesekretær,
OId9060Tannlege,
OId9060Tannpleier,
OId9060Tanntekniker,
OId9060Veterinær,
OId9060Vernepleier,
OId9060Ukjent_uspesifisert,
};
}
}
|
/* eslint-disable import/prefer-default-export */
import forge from 'node-forge';
import axios from 'axios';
import { WriteStream } from 'fs';
import scrypt from 'scrypt-js';
import { CursorBuffer } from './CursorBuffer';
// IV is decryptions is 16 bytes
const IVLength = 16;
// Salt is 32 bytes
const SaltLength = 32;
const AesKeyLength = 32;
const HmacKeyLength = 32;
const HmacLength = 64;
const ScryptKeyLength = AesKeyLength + HmacKeyLength;
/// Decodes data using decryption key and writes its value to writestream
export const writeDecodedData = async (
decryptionKey: Uint8Array,
ivBytes: Uint8Array,
data: Uint8Array,
writer: WriteStream
): Promise<void> => {
const decipher = forge.cipher.createDecipher(
'AES-CTR',
forge.util.createBuffer(decryptionKey)
);
decipher.start({ iv: forge.util.createBuffer(ivBytes) });
decipher.update(forge.util.createBuffer(data));
writer.write(forge.util.binary.raw.decode(decipher.output.getBytes()));
const decipherSuccess = decipher.finish();
if (!decipherSuccess) {
console.error('Error decrypting', decipherSuccess);
}
};
const computeDataHMAC = (
scryptKeys: Uint8Array,
data: Uint8Array
): {
computedDigest: forge.util.ByteStringBuffer;
digestInFile: forge.util.ByteStringBuffer;
} => {
const hmacKey = scryptKeys.slice(AesKeyLength, AesKeyLength + HmacKeyLength);
const hmacReader = new CursorBuffer(data);
hmacReader.skipXBytes(4); // <--- usually empty
const hmac = forge.hmac.create();
hmac.start(
'sha512' as forge.hmac.Algorithm,
forge.util.createBuffer(hmacKey)
);
hmac.update(
forge.util
.createBuffer(
hmacReader.readXBytes(hmacReader.bytesLeft - HmacLength + 1)
)
.getBytes()
);
const hmacBytes = hmacReader.readXBytes(HmacLength);
return {
computedDigest: hmac.digest(),
digestInFile: forge.util.createBuffer(hmacBytes),
};
};
interface EncryptedFileInfo {
key: Uint8Array;
iv: Uint8Array;
encryptedData: Uint8Array;
}
const getHashDownloadUrl = (hash: string): string => {
const baseUrl =
process.env.GATSBY_IPFS_GATEWAY_BASE_URL || 'https://hub.textile.io';
return `${baseUrl}/ipfs/${hash}`;
};
// Fetch hash from ipfs and parses decryption information
export const downloadEncryptedFile = async (
hash: string,
password: string
): Promise<EncryptedFileInfo | null> => {
let res;
try {
res = await axios.get<ArrayBuffer>(getHashDownloadUrl(hash), {
responseType: 'arraybuffer',
maxRedirects: 0,
});
} catch (err) {
throw new Error(
'Downloading file failed. Confirm you have a correct share link and try again later.'
);
}
const resultReader = new CursorBuffer(new Uint8Array(res.data));
resultReader.skipXBytes(4); // <-- skip 4 bytes (usually blank)
// read int32 (4 bytes) for -> iterations
const iterations = resultReader.read32();
// read SaltLength bytes from res.data -> salt
const saltBytes = resultReader.readXBytes(SaltLength);
// read IVLength bytes from res.data -> iv
const ivBytes = resultReader.readXBytes(IVLength);
// use password, salt, and iterations
const scryptKeys = scrypt.syncScrypt(
new TextEncoder().encode(password.normalize('NFKC')),
saltBytes,
iterations,
8,
1,
ScryptKeyLength
);
const decryptionKey = scryptKeys.slice(0, AesKeyLength);
const { computedDigest, digestInFile } = computeDataHMAC(
scryptKeys,
new Uint8Array(res.data)
);
if (computedDigest.toHex() !== digestInFile.toHex()) {
throw new Error('Incorrect password provided.');
}
return {
key: decryptionKey,
iv: ivBytes,
encryptedData: resultReader.readXBytes(
resultReader.bytesLeft - HmacLength + 1
),
};
};
|
AgentX
===========
AgentX是alinode团队开发的agent命令程序,用于协助alinode的性能数据上报和问题诊断。
- [](https://travis-ci.org/aliyun-node/agentx)
- [](https://david-dm.org/aliyun-node/agentx)
- [](https://codecov.io/gh/aliyun-node/agentx)
## Installation
```
$ npm install agentx -g
```
以上命令会将agentx安装为一个全局的命令行工具。
## Usage
agentx需要一个配置文件来进行使用,agentx仅会在配置指定下的目录执行命令或读取日志。
该配置格式如下:
```
{
"server": "<SERVER IP>:8080",
"appid": "<YOUR APPID>",
"secret": "<YOUR SECRET>",
"cmddir": "</path/to/your/command/dir>",
"logdir": "</path/to/your/log/dir>",
"reconnectDelay": 10,
"heartbeatInterval": 60,
"reportInterval": 60,
"error_log": [
"</path/to/your/error.log>",
"您的应用在业务层面产生的异常日志的路径",
"例如:/root/.logs/error.#YYYY#-#MM#-#DD#-#HH#.log",
"可选"
],
"packages": [
"</path/to/your/package.json>",
"可以输入多个package.json的路径",
"可选"
]
}
```
> 配置中的#YYYY#、#MM#、#DD#、#HH#是通配符,如果您的异常日志是按时间生成的,请使用它。
保存为`config.json`。上述不明确的地方请咨询旺旺群:1406236180。
完成配置后,请使用以下命令进行执行:
```
$ nohup agentx config.json &
```
agentx将以常驻进程的方式执行。部署完成后,请访问<http://alinode.aliyun.com/dashboard>查看您的应用详情。如果一切正常,稍等片刻(1分钟)即可收到你的应用性能数据。
## License
The agentx is released under the MIT license.
|
use rustc_serialize::Encodable;
use rustc_serialize::json;
use iron::prelude::*;
use hyper::status::StatusCode;
pub trait AsApiResponse {
fn as_response(&self) -> Response;
}
impl<D: Sized + Encodable> AsApiResponse for D {
fn as_response(&self) -> Response {
let mut response = Response::with(StatusCode::Ok);
response.body = Some(box json::encode(self).unwrap());
response
}
}
|
# encoding: utf-8
require 'test_helper'
class Erlang::AssociableTest < Minitest::Test
def test_update_in
map = Erlang::Map[
"A" => "aye",
"B" => Erlang::Map["C" => "see", "D" => Erlang::Map["E" => "eee"]],
"F" => Erlang::Tuple["G", Erlang::Map["H" => "eitch"], "I"]
]
tuple = Erlang::Tuple[
100,
101,
102,
Erlang::Tuple[200, 201, Erlang::Tuple[300, 301, 302]],
Erlang::Map["A" => "alpha", "B" => "bravo"],
[400, 401, 402]
]
# Context: with one level on existing key
## Map passes the value to the block
map.update_in("A") { |value| assert_equal("aye", value) }
## Tuple passes the value to the block
tuple.update_in(1) { |value| assert_equal(101, value) }
## Map replaces the value with the result of the block
result = map.update_in("A") { |value| "FLIBBLE" }
assert_equal "FLIBBLE", result.get("A")
## Tuple replaces the value with the result of the block
result = tuple.update_in(1) { |value| "FLIBBLE" }
assert_equal "FLIBBLE", result.get(1)
## Map should preserve the original
result = map.update_in("A") { |value| "FLIBBLE" }
assert_equal "aye", map.get("A")
## Tuple should preserve the original
result = tuple.update_in(1) { |value| "FLIBBLE" }
assert_equal 101, tuple.get(1)
# Context: with multi-level on existing keys
## Map passes the value to the block
map.update_in("B", "D", "E") { |value| assert_equal("eee", value) }
## Tuple passes the value to the block
tuple.update_in(3, 2, 0) { |value| assert_equal(300, value) }
## Map replaces the value with the result of the block
result = map.update_in("B", "D", "E") { |value| "FLIBBLE" }
assert_equal "FLIBBLE", result["B"]["D"]["E"]
## Tuple replaces the value with the result of the block
result = tuple.update_in(3, 2, 0) { |value| "FLIBBLE" }
assert_equal "FLIBBLE", result[3][2][0]
## Map should preserve the original
result = map.update_in("B", "D", "E") { |value| "FLIBBLE" }
assert_equal "eee", map["B"]["D"]["E"]
## Tuple should preserve the original
result = tuple.update_in(3, 2, 0) { |value| "FLIBBLE" }
assert_equal 300, tuple[3][2][0]
# Context: with multi-level creating sub-maps when keys don't exist
## Map passes nil to the block
map.update_in("B", "X", "Y") { |value| assert value == nil }
## Tuple passes nil to the block
tuple.update_in(3, 3, "X", "Y") { |value| assert value == nil }
## Map creates submaps on the way to set the value
result = map.update_in("B", "X", "Y") { |value| "NEWVALUE" }
assert_equal "NEWVALUE", result["B"]["X"]["Y"]
assert_equal "eee", result["B"]["D"]["E"]
## Tuple creates submaps on the way to set the value
result = tuple.update_in(3, 3, "X", "Y") { |value| "NEWVALUE" }
assert_equal "NEWVALUE", result[3][3]["X"]["Y"]
assert_equal 300, result[3][2][0]
# Context: Map with multi-level including Tuple with existing keys
## passes the value to the block
map.update_in("F", 1, "H") { |value| assert_equal("eitch", value) }
## replaces the value with the result of the block
result = map.update_in("F", 1, "H") { |value| "FLIBBLE" }
assert_equal "FLIBBLE", result["F"][1]["H"]
## should preserve the original
result = map.update_in("F", 1, "H") { |value| "FLIBBLE" }
assert_equal "eitch", map["F"][1]["H"]
# Context: Tuple with multi-level including Map with existing keys
## passes the value to the block
tuple.update_in(4, "B") { |value| assert_equal("bravo", value) }
## replaces the value with the result of the block
result = tuple.update_in(4, "B") { |value| "FLIBBLE" }
assert_equal "FLIBBLE", result[4]["B"]
## should preserve the original
result = tuple.update_in(4, "B") { |value| "FLIBBLE" }
assert_equal "bravo", tuple[4]["B"]
# Context: with empty key_path
## Map raises ArgumentError
assert_raises(ArgumentError) { map.update_in() { |v| 42 } }
## Tuple raises ArgumentError
assert_raises(ArgumentError) { tuple.update_in() { |v| 42 } }
end
def test_dig
# Context: Map
m = Erlang::Map[:a => 9, :b => Erlang::Map[:c => 'a', :d => 4], :e => nil]
## returns the value with one argument to dig
assert_equal 9, m.dig(:a)
## returns the value in nested maps
assert_equal 'a', m.dig(:b, :c)
## returns nil if the key is not present
assert m.dig(:f, :foo) == nil
## returns nil if you dig out the end of the map
assert m.dig(:f, :foo, :bar) == nil
## returns nil if a value does not support dig
assert m.dig(:a, :foo) == nil
## returns the correct value when there is a default proc
default_map = Erlang::Map.new { |k| "#{k}-default" }
assert_equal "a-default", default_map.dig(:a)
# Context: Tuple
t = Erlang::Tuple[1, 2, Erlang::Tuple[3, 4]]
## returns value at the index with one argument
assert_equal 1, t.dig(0)
## returns value at index in nested arrays
assert_equal 3, t.dig(2, 0)
## returns nil when indexing deeper than possible
assert t.dig(0, 0) == nil
## returns nil if you index past the end of an array
assert t.dig(5) == nil
## raises a type error when indexing with a key arrays don't understand
assert_raises(ArgumentError) { t.dig(:foo) }
end
end
|
namespace :autoclose do
desc <<~END_DESC
Find affected issues and update them
END_DESC
task :autoclose => :environment do
RedmineAutoclose::Autoclose.autoclose
end
desc <<~END_DESC
Find affected issues and preview them without updating
END_DESC
task :preview => :environment do
RedmineAutoclose::Autoclose.preview
end
end
|
use crate::protos::protobuf::pulsar_api::CompressionType as Protobuf_CompressionType;
#[derive(PartialEq, Eq, Debug, Clone)]
pub enum CompressionType {
NONE,
#[cfg(feature = "with-compression-lz4")]
LZ4,
#[cfg(feature = "with-compression-zlib")]
ZLIB,
}
impl From<CompressionType> for Protobuf_CompressionType {
fn from(ct: CompressionType) -> Self {
match ct {
CompressionType::NONE => Protobuf_CompressionType::NONE,
#[cfg(feature = "with-compression-lz4")]
CompressionType::LZ4 => Protobuf_CompressionType::LZ4,
#[cfg(feature = "with-compression-zlib")]
CompressionType::ZLIB => Protobuf_CompressionType::ZLIB,
}
}
}
impl std::convert::TryFrom<Protobuf_CompressionType> for CompressionType {
type Error = ();
fn try_from(ct: Protobuf_CompressionType) -> Result<Self, Self::Error> {
match ct {
Protobuf_CompressionType::NONE => Ok(Self::NONE),
#[cfg(feature = "with-compression-lz4")]
Protobuf_CompressionType::LZ4 => Ok(Self::LZ4),
#[cfg(feature = "with-compression-zlib")]
Protobuf_CompressionType::ZLIB => Ok(Self::ZLIB),
_ => Err(()),
}
}
}
|
package com.timzowen.idoctor.data
import com.timzowen.idoctor.R
import com.timzowen.idoctor.model.DoctorsProfile
class DataDoctorsProfile {
// temp data for lead counselors
fun loadDoctorsProfile(): List<DoctorsProfile> {
return listOf(
DoctorsProfile(
R.drawable.doctor1,
"Dr. Timz Owen",
R.drawable.ic_baseline_chat_bubble_24,
"Health is welath",
),
DoctorsProfile(
R.drawable.doctor2,
"Dr. Allan Kipkosgei",
R.drawable.ic_baseline_chat_bubble_24,
"Health is everything",
),
DoctorsProfile(
R.drawable.doctor3,
"DR. Marini Clement",
R.drawable.ic_baseline_chat_bubble_24,
"We cure God heals",
),
DoctorsProfile(
R.drawable.doctor4,
"Dr. Julia",
R.drawable.ic_baseline_chat_bubble_24,
"Therapy solves it",
),
DoctorsProfile(
R.drawable.doctor5,
"Dr. Esther",
R.drawable.ic_baseline_chat_bubble_24,
"Health eating is all",
),
DoctorsProfile(
R.drawable.group1,
"Dr. Irene",
R.drawable.ic_baseline_chat_bubble_24,
"We are family medicine",
),
DoctorsProfile(
R.drawable.group3,
"Dr. Kamau",
R.drawable.ic_baseline_chat_bubble_24,
"Let's talk about us",
),
)
}
}
|
-- reset reward claims and point assignments
drop table if exists Transactions;
drop table if exists RewardClaim;
drop table if exists PointAssignment;
drop table if exists PointChange;
-- reset person/reward tables
drop table if exists Rewards;
drop table if exists RewardCategory;
drop table if exists JuvenileEvent;
drop table if exists Juvenile;
-- reset behaviors
drop table if exists Behaviors;
drop table if exists Location;
drop table if exists BehaviorCategory;
-- relations for corresponding parties
create table Juvenile (
Id integer primary key,
FirstName varchar(100) not null,
LastName varchar(100) not null
);
create table JuvenileEvent (
Id integer primary key,
JuvenileId integer,
Active boolean not null,
TotalPoints integer default 0,
EDateTime datetime not null, -- used to determine the most recent event id for a given juvenile
foreign key (JuvenileId) references Juvenile(Id)
);
-- definition of rewards and their specifications
create table RewardCategory (
Id integer primary key,
Description varchar(100)
);
create table Rewards (
Id integer,
Item varchar(100) not null,
Category integer,
Price integer not null,
MaxQuantity integer not null,
Image varchar(100) not null,
primary key (Id, Category),
foreign key (Category) references RewardCategory(Id)
);
-- definition of behaviors and their specifications
create table Location (
Id integer primary key,
Name varchar(100) not null
);
create table BehaviorCategory (
Id integer primary key,
Name varchar(100) not null
);
create table Behaviors (
Id integer primary key,
CategoryId integer,
LocationId integer,
Description varchar(200) not null,
foreign key (CategoryId) references BehaviorCategory(Id),
foreign key (LocationId) references Location(Id)
);
-- relations to track gaining and spending points
create table PointAssignment (
JuvenileId integer,
OfficerName varchar(50),
Behavior integer,
ADateTime datetime,
primary key (OfficerName, JuvenileId, ADateTime),
foreign key (JuvenileId) references Juvenile(Id),
foreign key (Behavior) references Behaviors(Id)
);
create table RewardClaim (
Id integer primary key,
JuvenileId integer,
OfficerName varchar(50),
Points int not null,
CDateTime datetime,
foreign key (JuvenileId) references Juvenile(Id)
);
create table Transactions (
ClaimId integer,
RewardId integer,
Quantity integer not null,
Subtotal integer not null,
primary key (ClaimId, RewardId),
foreign key (ClaimId) references RewardClaim(Id),
foreign key (RewardId) references Rewards(Id)
);
create table PointChange (
AdminName varchar(50),
JuvenileId integer,
Points integer not null,
PDateTime datetime not null,
primary key(AdminName, JuvenileId, PDateTime),
foreign key (JuvenileId) references Juvenile(Id)
);
|
# Copyright 2020 (c) Netguru S.A.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations # for correct annotation of forward reference in Split
from dataclasses import dataclass
from enum import IntEnum
import pandas as pd
from opoca.data.dataset import Dataset
SPLIT_FLAG_NAME = 'split_flag'
class SplitFlag(IntEnum):
TRAIN = 0
VAL = 1
TEST = 2
# IGNORE can be used to mark rows that should be omitted completely,
# e.g. with wrong labels
IGNORE = -1
@dataclass
class Split:
""" A simple dataclass used for handling train/val/test splits
`to_split_table` allows exporting the current split to a human-readable
format and further saving it as csv. If a file name corresponds to data set
file name, the split can be automatically reloaded with `DataHandler.load`
method with `return_as='split'`.
"""
train: Dataset
val: Dataset
test: Dataset
def __iter__(self):
return iter([self.train, self.val, self.test])
def __str__(self):
return f'Split\n * train: {self.train}\n * val: {self.val}\n ' \
f'* test: {self.test}\n'
@classmethod
def from_split_table(cls, dataset: Dataset, split_table: pd.Series) -> Split:
""" Factory method to create a Split object from split table
Parameters
----------
dataset: Dataset
Dataset that contains all the data or its subsample.
split_table: pd.Series
Series with int values indicating to which dataset a given row
belongs. See `SplitFlag` enum for details. Any value that is not in
SplitFlag is ignored.
Notes
-----
Every element of the index in `dataset` should have a corresponding
element in the index of `split_table`. Opposite is not necessary, i.e.
`split_table` can have elements that are absent in `dataset` (for
example when subsampling).
Returns
-------
split: Split
Split object with dataset split according to the `split_table`.
"""
train_dataset = dataset[split_table == SplitFlag.TRAIN]
train_dataset.name += '/train'
val_dataset = dataset[split_table == SplitFlag.VAL]
val_dataset.name += '/val'
test_dataset = dataset[split_table == SplitFlag.TEST]
test_dataset.name += '/test'
return cls(train=train_dataset, val=val_dataset, test=test_dataset)
def to_split_table(self, sort_index: bool = True) -> pd.Series:
""" Generate a split table from a Split object
Parameters
----------
sort_index: bool, optional
If True (default) the index is be sorted before returning the split
table. Consider setting to False for huge data sets.
Returns
-------
split_table: pd.Series
Series with int flags indicating to which dataset each row belongs.
See `SplitFlag` enum for exact meanings of the flags.
"""
ind_train = pd.Series(SplitFlag.TRAIN, index=self.train.x.index)
ind_val = pd.Series(SplitFlag.VAL, index=self.val.x.index)
ind_test = pd.Series(SplitFlag.TEST, index=self.test.x.index)
split_table = pd.concat([ind_train, ind_test, ind_val])
if sort_index:
split_table.sort_index(inplace=True)
return split_table
|
import IUser from '@domain/entities/IUser'
export default interface IUserRepository {
insert(user: IUser ): Promise<IUser>
getAll( ): Promise<IUser[]>
getOne(id: number ): Promise<IUser>
update(id: number, user: IUser): Promise<IUser>
delete(id: number ): Promise<number>
}
|
package com.ybx.xiangxue.kotlin.kt.core
/**
* @Author 55HAITAO
* @Date 2020/7/6 4:52 PM
*/
// lambada 15种写法
fun main() {
// : 无实现
// : 有实现
// = 有实现
// 没有实现的
var m01: () -> Unit
var m02: (Int) -> String
var m03: (Int, Int, String?) -> String
var m04: (Int, Int) -> String
// 有实现的
var m09: () -> Unit = {
println("m09")
}
m09()
var m10: () -> String = {
"我是m10的返回值呀"
}
println("m10 ${m10()}")
var m11: (Int) -> String = {
when (it) {
1 -> "我是1"
2 -> "我是2"
else -> "我不是"
}
}
println("m11 ${m11(11)}")
var m12: (Int, Int, String) -> String = { n1, n2, s3 ->
var result = "我是无结果"
if (!s3.isNullOrEmpty()) {
result = "我是结果 ${n1 + n2}"
}
result
}
println("m12 : ${m12(3, 3, " ")}")
// 有=的是有实现的 能定义 能调用
var m05 = { n: Int -> Unit }
var m06 = { n: Int -> 8 + n }
println("m06 ${m06(4)}")
var m07 = { n1: Int, n2: Int -> n1 + n2 }
println("m07 ${m07(3, 4)}")
var m08 = { n1: Int, n2: Int -> "我是两数的和 ${n1 + n2}" }
println("m08 ${m08(6, 9)}")
// fun m06() = { n1: Int, s1: String -> Unit }
// fun m06() = { n1: Int, s1: String -> String() }
//
//
// m03()
// m05(8)
var m13 = {}
var m14 = { it: Boolean -> if (it) "是对的" else "是错的" }
var m15 = { b: Boolean, int: Int -> if (b) 100 + int else "没结果" }
println("m15: ${m15(false, 10)}")
// loginService("ybx945", "123455") { name, pwd ->
// println("用户名 $name , 密码 $pwd 登录了 ")
// }
loginService2("ybx", "123456") {
if (it) {
println("登录成功了")
} else {
println("登录失败了")
}
}
val result = loginTest("ybx","123456"){name,pwd->
return@loginTest name == "ybx" && pwd == "123456"
}
println("logtest $result")
}
typealias Request = (String, String) -> Unit
fun loginService(name: String, pwd: String, req: Request) {
req(name, pwd)
}
fun loginService2(name: String, pwd: String, respon: (Boolean) -> Unit) {
if (name == "ybx" && pwd == "123456") {
respon(true)
} else {
respon(false)
}
}
fun loginTest(name: String, pwd: String, mm: (String, String) -> Boolean): Int {
val result = mm(name, pwd)
println("mm $result")
return if (result) 666 else 0
}
|
import express from 'express';
export interface IController {
path: string,
router: express.Router,
intializeRoutes: () => void
}
|
---
title: List of Algorithms
category: Tutorial
order: 4
---
- aaa
{:toc}
### Global optimization
|Name|Applicable Prolems' Tags|
|-|-|
|[Simulated Annealing](../../Instance/algorithm/simulated annealing)|`ConOP`|
|[DE/rand/1](../../Instance/algorithm/canonical DE/#derand1)|`ConOP` `GOP` `MMOP`|
|[DE/best/2](../../Instance/algorithm/canonical DE/#debest2)|`ConOP` `GOP` `MMOP`|
|[SPSO-07](../../Instance/algorithm/standard PSO/#spso-07)|`ConOP` `GOP` `MMOP`|
|[SPSO-11](../../Instance/algorithm/standard PSO/#spso-11)|`ConOP` `GOP` `MMOP`|
### Multi-modal optimization
|Name|Applicable Prolems' Tags|
|-|-|
|[DE/nrand/1](../../Instance/algorithm/canonical DE/#denrand1)|`ConOP` `MMOP` `GOP`|
### Multi-objective optimization
|Name|Applicable Prolems' Tags|
|-|-|
|[NSGAII-SBX](../../Instance/algorithm/NSGAII/#nsgaii-sbx)|`ConOP` `MOP` |
|[NSGAIII-SBX](../../Instance/algorithm/NSGAIII/#nsgaiii-sbx)|`ConOP` `MOP`|
### Constraint optimization
|Name|Applicable Prolems' Tags|
|-|-|
|||
### Dynamic optimization
|Name|Applicable Prolems' Tags|
|-|-|
|||
### Expensive optimization
|Name|Applicable Prolems' Tags|
|-|-|
|||
### Large-scale optimization
|Name|Applicable Prolems' Tags|
|-|-|
|||
### Robust optimization over time
|Name|Applicable Prolems' Tags|
|-|-|
|||
### Combinatorial optimization
|Name|Applicable Prolems' Tags|
|-|-|
|||
### Real world optimization
|Name|Applicable Prolems' Tags|
|-|-|
|||
|
class HttpRequestInteractionDiagramMapper
def initialize(formatter, write_request_body, display_cookies)
@formatter = formatter
@write_request_body = write_request_body
@display_cookies = display_cookies
end
def note_from(http_request)
body_lines = []
body_lines << "Cookie: #{http_request.cookie_header}" if http_request.cookie_header && @display_cookies
if @write_request_body && http_request.body
if http_request.content_type_header
body_lines << ' \n'
body_lines << http_request.content_type_header
end
body_lines << ' \n'
body_lines << http_request.body
end
@formatter.multiline_text_from(body_lines).strip
end
def message_from(http_request)
"#{http_request.method} #{http_request.path}"
end
end
|
/*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
#include "SliderPage.h"
#include <Gallery/ui_SliderPage.h>
#include <AzQtComponents/Components/Widgets/Slider.h>
SliderPage::SliderPage(QWidget* parent)
: QWidget(parent)
, ui(new Ui::SliderPage)
{
ui->setupUi(this);
ui->percentage->setToolTipFormatting("Opacity ", "%");
ui->percentage->setRange(0, 100);
AzQtComponents::Slider::applyMidPointStyle(ui->midPointSliderDisabled);
AzQtComponents::Slider::applyMidPointStyle(ui->midPointSliderEnabled);
AzQtComponents::Slider::applyMidPointStyle(ui->verticalMidPointSliderEnabled);
AzQtComponents::Slider::applyMidPointStyle(ui->verticalMidPointSliderDisabled);
AzQtComponents::Slider::applyMidPointStyle(ui->doubleMidPointSliderDisabled);
AzQtComponents::Slider::applyMidPointStyle(ui->doubleMidPointSliderEnabled);
AzQtComponents::Slider::applyMidPointStyle(ui->doubleVerticalMidPointSliderEnabled);
AzQtComponents::Slider::applyMidPointStyle(ui->doubleVerticalMidPointSliderDisabled);
{
ui->sliderEnabled->setValue(20);
ui->sliderDisabled->setValue(40);
ui->verticalSliderEnabled->setValue(20);
ui->verticalSliderDisabled->setValue(0);
ui->doubleSliderEnabled->setRange(0, 10);
ui->doubleSliderEnabled->setValue(6.0);
ui->doubleSliderDisabled->setValue(0);
}
{
int min = -10;
int max = 10;
ui->midPointSliderDisabled->setRange(min, max);
ui->midPointSliderDisabled->setValue(5);
ui->midPointSliderEnabled->setRange(min, max);
ui->midPointSliderEnabled->setValue(2);
ui->verticalMidPointSliderEnabled->setRange(min, max);
ui->verticalMidPointSliderEnabled->setValue(-2);
ui->verticalMidPointSliderDisabled->setRange(min, max);
ui->verticalMidPointSliderDisabled->setValue(-5);
}
{
int min = -20;
int max = 20;
ui->doubleMidPointSliderDisabled->setRange(min, max);
ui->doubleMidPointSliderDisabled->setValue(10.0);
ui->doubleMidPointSliderEnabled->setRange(min, max);
ui->doubleMidPointSliderEnabled->setValue(4);
ui->doubleVerticalMidPointSliderEnabled->setRange(min, max);
ui->doubleVerticalMidPointSliderEnabled->setValue(-4.0);
ui->doubleVerticalMidPointSliderDisabled->setRange(min, max);
ui->doubleVerticalMidPointSliderDisabled->setValue(-10.0);
}
{
double min = 0.0;
double max = 1.0;
double value = 0.5;
const auto curveMidpointSliders =
{
ui->curveMidpoint25,
ui->curveMidpoint5,
ui->curveMidpoint75,
ui->verticalcurveMidpoint25,
ui->verticalcurveMidpoint5,
ui->verticalcurveMidpoint75
};
for (auto slider : curveMidpointSliders)
{
slider->setRange(min, max);
slider->setValue(value);
AzQtComponents::Slider::applyMidPointStyle(slider);
}
ui->curveMidpoint25->setCurveMidpoint(0.25);
ui->verticalcurveMidpoint25->setCurveMidpoint(0.25);
ui->curveMidpoint75->setCurveMidpoint(0.75);
ui->verticalcurveMidpoint75->setCurveMidpoint(0.75);
}
QString exampleText = R"(
A Slider is a wrapper around a QSlider.<br/>
There are two variants: SliderInt and SliderDouble, for working with signed integers and doubles, respectively.<br/>
They add tooltip functionality, as well as conversion to the proper data types (int and double).
<br/>
<pre>
#include <AzQtComponents/Components/Widgets/Slider.h>
#include <QDebug>
// Here's an example that creates a slider and sets the hover/tooltip indicator to display as a percentage
SliderInt* sliderInt = new SliderInt();
sliderInt->setRange(0, 100);
sliderInt->setToolTipFormatting("", "%");
// Assuming you've created a slider already (either in code or via .ui file), give it the mid point style like this:
Slider::applyMidPointStyle(sliderInt);
// Disable it like this:
sliderInt->setEnabled(false);
// Here's an example of creating a SliderDouble and setting it up:
SliderDouble* sliderDouble = new SliderDouble();
double min = -10.0;
double max = 10.0;
int numSteps = 21;
sliderDouble->setRange(min, max, numSteps);
sliderDouble->setValue(0.0);
// Listen for changes; same format for SliderInt as SliderDouble
connect(sliderDouble, &SliderDouble::valueChanged, sliderDouble, [sliderDouble](double newValue){
qDebug() << "Slider value changed to " << newValue;
});
</pre>
)";
ui->exampleText->setHtml(exampleText);
}
SliderPage::~SliderPage()
{
}
#include <Gallery/SliderPage.moc>
|
# pkg:
## 说明:
- 本目录, 用于存放与`业务无关`的通用代码
- 常用的 utility, wrap, hook, 中间件等等
- 后期可逐步迁移到外部, 供其他项目使用.
## 区别 app/std 目录:
- app/std 是`项目内`全局`公共依赖`: 标准状态码, 标准配置
## 目录结构建议:
- 参考 go 标准库目录组织方式.
|
# restapi-rust
Este projeto foi criado para estudo rust, foi um api rest usando a linguagem Rust
## Build a CRUD API with Rust
Para escrever api em rust é mas complicado do que escrever com nodejs ou golang.
## Frameworks
Frameworks que serão usados para auxiliar no projeto
* [Rocket](https://rocket.rs/) -- web framework for writing fast web applications
* [Serde ](https://serde.rs/) -- framework for serializing and deserializing Rust data structures
* [Diesel ](http://diesel.rs/) -- safe, extensible ORM and query builder
## Criando o projeto
Crie o projeto usando o gerenciador de depencia cargo
```
$ cargo new hero-api
```
Depois siga o processo, por que Rocket requer que use a versão nightly do Rust.
```
$ rustup default nightly
$ rustup update && cargo update
```
|
using System;
using System.Threading.Tasks;
using Common.Log;
using AzureStorage;
using AzureStorage.Tables;
using Lykke.SettingsReader;
using Lykke.Service.Stellar.Api.Core.Domain.Transaction;
namespace Lykke.Service.Stellar.Api.AzureRepositories.Transaction
{
public class TxBroadcastRepository : ITxBroadcastRepository
{
private const string TableName = "Transaction";
private static string GetPartitionKey() => "Broadcast";
private static string GetRowKey(Guid operationId) => operationId.ToString();
private INoSQLTableStorage<TxBroadcastEntity> _table;
private INoSQLTableStorage<IndexEntity> _tableIndex;
public TxBroadcastRepository(IReloadingManager<string> dataConnStringManager, ILog log)
{
_table = AzureTableStorage<TxBroadcastEntity>.Create(dataConnStringManager, TableName, log);
_tableIndex = AzureTableStorage<IndexEntity>.Create(dataConnStringManager, TableName, log);
}
public async Task<TxBroadcast> GetAsync(Guid operationId)
{
var entity = await _table.GetDataAsync(GetPartitionKey(), GetRowKey(operationId));
if (entity != null)
{
var broadcast = entity.ToDomain();
return broadcast;
}
return null;
}
public async Task<Guid?> GetOperationId(string hash)
{
var index = await _tableIndex.GetDataAsync(IndexEntity.GetPartitionKeyHash(), hash);
if (index != null)
{
return Guid.Parse(index.Value);
}
return null;
}
public async Task InsertOrReplaceAsync(TxBroadcast broadcast)
{
var entity = broadcast.ToEntity(GetPartitionKey(), GetRowKey(broadcast.OperationId));
await _table.InsertOrReplaceAsync(entity);
// add index
if (!string.IsNullOrEmpty(broadcast.Hash))
{
var index = new IndexEntity
{
PartitionKey = IndexEntity.GetPartitionKeyHash(),
RowKey = broadcast.Hash,
Value = entity.RowKey
};
await _tableIndex.InsertOrReplaceAsync(index);
}
}
public async Task MergeAsync(TxBroadcast broadcast)
{
TxBroadcastEntity MergeAction(TxBroadcastEntity entity)
{
entity.State = broadcast.State;
entity.Amount = broadcast.Amount;
entity.Fee = broadcast.Fee;
entity.Ledger = broadcast.Ledger;
entity.CreatedAt = broadcast.CreatedAt;
entity.Error = broadcast.Error;
entity.ErrorCode = broadcast.ErrorCode;
return entity;
}
await _table.MergeAsync(GetPartitionKey(), GetRowKey(broadcast.OperationId), MergeAction);
}
public async Task DeleteAsync(Guid operationId)
{
var entity = await _table.DeleteAsync(GetPartitionKey(), GetRowKey(operationId));
// delete index
if (entity != null && !string.IsNullOrEmpty(entity.Hash))
{
await _tableIndex.DeleteIfExistAsync(IndexEntity.GetPartitionKeyHash(), entity.Hash);
}
}
}
}
|
require 'test_helper'
class EntryTest < ActiveSupport::TestCase
test "should not save entry without content" do
entry = Entry.new
assert_not entry.save
end
test "should not save entry without day_id" do
entry = Entry.new
assert_not entry.save
end
test "should not save entry without category_id" do
entry = Entry.new
assert_not entry.save
end
# test "the truth" do
# assert true
# end
end
|
package com.fmt.kotlin.eyepetizer.home
import androidx.lifecycle.LiveData
import androidx.lifecycle.MutableLiveData
import androidx.lifecycle.SavedStateHandle
import com.fmt.kotlin.eyepetizer.common.base.viewmodel.BaseViewModel
import dagger.hilt.android.lifecycle.HiltViewModel
import javax.inject.Inject
@HiltViewModel
class HomeViewModel @Inject constructor(private val savedStateHandle: SavedStateHandle) : BaseViewModel() {
private val HOME_PAGE_INDEX = "home_page_index"
private val mLiveData = MutableLiveData<Int>()
fun getSelected(): LiveData<Int> {
//从缓存中读取,防止Activity因内存不知等原因被回收重建后,Fragment重叠问题
if (mLiveData.value == null) {
val index = savedStateHandle.get<Int>(HOME_PAGE_INDEX) ?: 0
mLiveData.postValue(index)
}
return mLiveData
}
//保存每一次的下标选中
fun saveSelect(selectIndex: Int) {
savedStateHandle.set(HOME_PAGE_INDEX, selectIndex)
}
}
|
#[allow(unused_imports)]
use tokio::sync::oneshot::{Receiver as OneshotReceiver, Sender as OneshotSender};
#[allow(unused_imports)]
use tokio::sync::watch::{Receiver as WatchReceiver, Sender as WatchSender};
pub mod cli;
pub mod client;
pub mod config;
pub mod database;
pub mod mainactor;
pub mod stats;
pub mod metrics;
pub use metrics::METRICS;
#[derive(serde_derive::Serialize, serde_derive::Deserialize)]
struct Message {
#[serde(rename = "T")]
tag: String,
#[serde(skip_serializing_if = "Option::is_none")]
id: Option<u64>,
/// Probably contains `MinutelyData`.
#[serde(flatten)]
pub rest: serde_json::Value,
}
fn main() -> anyhow::Result<()> {
cli::main()
}
|
package utf.commons.http
/**
* Created by Sławomir Kluz on 04/10/2017.
*/
object Method extends Enumeration {
type Method = Value
val GET, POST, PUT, PATCH, DELETE = Value
}
|
/*-
* Copyright (c) 1999 MAEKAWA Masahide <bishop@rr.iij4u.or.jp>,
* Nick Hibma <n_hibma@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
* $NetBSD: umass.c,v 1.28 2000/04/02 23:46:53 augustss Exp $
*/
/* Also already merged from NetBSD:
* $NetBSD: umass.c,v 1.67 2001/11/25 19:05:22 augustss Exp $
* $NetBSD: umass.c,v 1.90 2002/11/04 19:17:33 pooka Exp $
* $NetBSD: umass.c,v 1.108 2003/11/07 17:03:25 wiz Exp $
* $NetBSD: umass.c,v 1.109 2003/12/04 13:57:31 keihan Exp $
*/
/*
* Universal Serial Bus Mass Storage Class specs:
* http://www.usb.org/developers/devclass_docs/usb_msc_overview_1.2.pdf
* http://www.usb.org/developers/devclass_docs/usbmassbulk_10.pdf
* http://www.usb.org/developers/devclass_docs/usb_msc_cbi_1.1.pdf
* http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf
*/
/*
* Ported to NetBSD by Lennart Augustsson <augustss@NetBSD.org>.
* Parts of the code written by Jason R. Thorpe <thorpej@shagadelic.org>.
*/
/*
* The driver handles 3 Wire Protocols
* - Command/Bulk/Interrupt (CBI)
* - Command/Bulk/Interrupt with Command Completion Interrupt (CBI with CCI)
* - Mass Storage Bulk-Only (BBB)
* (BBB refers Bulk/Bulk/Bulk for Command/Data/Status phases)
*
* Over these wire protocols it handles the following command protocols
* - SCSI
* - UFI (floppy command set)
* - 8070i (ATAPI)
*
* UFI and 8070i (ATAPI) are transformed versions of the SCSI command set. The
* sc->sc_transform method is used to convert the commands into the appropriate
* format (if at all necessary). For example, UFI requires all commands to be
* 12 bytes in length amongst other things.
*
* The source code below is marked and can be split into a number of pieces
* (in this order):
*
* - probe/attach/detach
* - generic transfer routines
* - BBB
* - CBI
* - CBI_I (in addition to functions from CBI)
* - CAM (Common Access Method)
* - SCSI
* - UFI
* - 8070i (ATAPI)
*
* The protocols are implemented using a state machine, for the transfers as
* well as for the resets. The state machine is contained in umass_t_*_callback.
* The state machine is started through either umass_command_start() or
* umass_reset().
*
* The reason for doing this is a) CAM performs a lot better this way and b) it
* avoids using tsleep from interrupt context (for example after a failed
* transfer).
*/
/*
* The SCSI related part of this driver has been derived from the
* dev/ppbus/vpo.c driver, by Nicolas Souchu (nsouch@FreeBSD.org).
*
* The CAM layer uses so called actions which are messages sent to the host
* adapter for completion. The actions come in through umass_cam_action. The
* appropriate block of routines is called depending on the transport protocol
* in use. When the transfer has finished, these routines call
* umass_cam_cb again to complete the CAM command.
*/
#include <sys/stdint.h>
#include <sys/param.h>
#include <sys/queue.h>
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/module.h>
#include <sys/lock.h>
#include <sys/condvar.h>
#include <sys/sysctl.h>
#include <sys/unistd.h>
#include <sys/callout.h>
#include <sys/malloc.h>
#include <sys/priv.h>
#include <bus/u4b/usb.h>
#include <bus/u4b/usbdi.h>
#include <bus/u4b/usbdi_util.h>
#include "usbdevs.h"
#include <bus/u4b/quirk/usb_quirk.h>
#include <bus/cam/cam.h>
#include <bus/cam/cam_ccb.h>
#include <bus/cam/cam_sim.h>
#include <bus/cam/cam_xpt_sim.h>
#include <bus/cam/scsi/scsi_all.h>
#include <bus/cam/scsi/scsi_da.h>
#include <bus/cam/cam_periph.h>
#if 0
#define UMASS_EXT_BUFFER
#ifdef UMASS_EXT_BUFFER
/* this enables loading of virtual buffers into DMA */
#define UMASS_USB_FLAGS .ext_buffer=1,
#else
#define UMASS_USB_FLAGS
#endif
#endif
#ifdef USB_DEBUG
#define DIF(m, x) \
do { \
if (umass_debug & (m)) { x ; } \
} while (0)
#define DPRINTF(sc, m, fmt, ...) \
do { \
if (umass_debug & (m)) { \
kprintf("%s:%s: " fmt, \
(sc) ? (const char *)(sc)->sc_name : \
(const char *)"umassX", \
__func__ ,## __VA_ARGS__); \
} \
} while (0)
#define UDMASS_GEN 0x00010000 /* general */
#define UDMASS_SCSI 0x00020000 /* scsi */
#define UDMASS_UFI 0x00040000 /* ufi command set */
#define UDMASS_ATAPI 0x00080000 /* 8070i command set */
#define UDMASS_CMD (UDMASS_SCSI|UDMASS_UFI|UDMASS_ATAPI)
#define UDMASS_USB 0x00100000 /* USB general */
#define UDMASS_BBB 0x00200000 /* Bulk-Only transfers */
#define UDMASS_CBI 0x00400000 /* CBI transfers */
#define UDMASS_WIRE (UDMASS_BBB|UDMASS_CBI)
#define UDMASS_ALL 0xffff0000 /* all of the above */
static int umass_debug = 0;
static int umass_throttle = 0;
static SYSCTL_NODE(_hw_usb, OID_AUTO, umass, CTLFLAG_RW, 0, "USB umass");
SYSCTL_INT(_hw_usb_umass, OID_AUTO, debug, CTLFLAG_RW,
&umass_debug, 0, "umass debug level");
TUNABLE_INT("hw.usb.umass.debug", &umass_debug);
SYSCTL_INT(_hw_usb_umass, OID_AUTO, throttle, CTLFLAG_RW,
&umass_throttle, 0, "Forced delay between commands in milliseconds");
TUNABLE_INT("hw.usb.umass.throttle", &umass_throttle);
#else
#define DIF(...) do { } while (0)
#define DPRINTF(...) do { } while (0)
#endif
#define UMASS_BULK_SIZE (1 << 17)
#define UMASS_CBI_DIAGNOSTIC_CMDLEN 12 /* bytes */
#define UMASS_MAX_CMDLEN MAX(12, CAM_MAX_CDBLEN) /* bytes */
/* USB transfer definitions */
#define UMASS_T_BBB_RESET1 0 /* Bulk-Only */
#define UMASS_T_BBB_RESET2 1
#define UMASS_T_BBB_RESET3 2
#define UMASS_T_BBB_COMMAND 3
#define UMASS_T_BBB_DATA_READ 4
#define UMASS_T_BBB_DATA_RD_CS 5
#define UMASS_T_BBB_DATA_WRITE 6
#define UMASS_T_BBB_DATA_WR_CS 7
#define UMASS_T_BBB_STATUS 8
#define UMASS_T_BBB_MAX 9
#define UMASS_T_CBI_RESET1 0 /* CBI */
#define UMASS_T_CBI_RESET2 1
#define UMASS_T_CBI_RESET3 2
#define UMASS_T_CBI_COMMAND 3
#define UMASS_T_CBI_DATA_READ 4
#define UMASS_T_CBI_DATA_RD_CS 5
#define UMASS_T_CBI_DATA_WRITE 6
#define UMASS_T_CBI_DATA_WR_CS 7
#define UMASS_T_CBI_STATUS 8
#define UMASS_T_CBI_RESET4 9
#define UMASS_T_CBI_MAX 10
#define UMASS_T_MAX MAX(UMASS_T_CBI_MAX, UMASS_T_BBB_MAX)
/* Generic definitions */
/* Direction for transfer */
#define DIR_NONE 0
#define DIR_IN 1
#define DIR_OUT 2
/* device name */
#define DEVNAME "umass"
#define DEVNAME_SIM "umass-sim"
/* Approximate maximum transfer speeds (assumes 33% overhead). */
#define UMASS_FULL_TRANSFER_SPEED 1000
#define UMASS_HIGH_TRANSFER_SPEED 40000
#define UMASS_SUPER_TRANSFER_SPEED 400000
#define UMASS_FLOPPY_TRANSFER_SPEED 20
#define UMASS_TIMEOUT 5000 /* ms */
/* CAM specific definitions */
#define UMASS_SCSIID_MAX 1 /* maximum number of drives expected */
#define UMASS_SCSIID_HOST UMASS_SCSIID_MAX
/* Bulk-Only features */
#define UR_BBB_RESET 0xff /* Bulk-Only reset */
#define UR_BBB_GET_MAX_LUN 0xfe /* Get maximum lun */
/* Command Block Wrapper */
typedef struct {
uDWord dCBWSignature;
#define CBWSIGNATURE 0x43425355
uDWord dCBWTag;
uDWord dCBWDataTransferLength;
uByte bCBWFlags;
#define CBWFLAGS_OUT 0x00
#define CBWFLAGS_IN 0x80
uByte bCBWLUN;
uByte bCDBLength;
#define CBWCDBLENGTH 16
uByte CBWCDB[CBWCDBLENGTH];
} __packed umass_bbb_cbw_t;
#define UMASS_BBB_CBW_SIZE 31
/* Command Status Wrapper */
typedef struct {
uDWord dCSWSignature;
#define CSWSIGNATURE 0x53425355
#define CSWSIGNATURE_IMAGINATION_DBX1 0x43425355
#define CSWSIGNATURE_OLYMPUS_C1 0x55425355
uDWord dCSWTag;
uDWord dCSWDataResidue;
uByte bCSWStatus;
#define CSWSTATUS_GOOD 0x0
#define CSWSTATUS_FAILED 0x1
#define CSWSTATUS_PHASE 0x2
} __packed umass_bbb_csw_t;
#define UMASS_BBB_CSW_SIZE 13
/* CBI features */
#define UR_CBI_ADSC 0x00
typedef union {
struct {
uint8_t type;
#define IDB_TYPE_CCI 0x00
uint8_t value;
#define IDB_VALUE_PASS 0x00
#define IDB_VALUE_FAIL 0x01
#define IDB_VALUE_PHASE 0x02
#define IDB_VALUE_PERSISTENT 0x03
#define IDB_VALUE_STATUS_MASK 0x03
} __packed common;
struct {
uint8_t asc;
uint8_t ascq;
} __packed ufi;
} __packed umass_cbi_sbl_t;
struct umass_softc; /* see below */
typedef void (umass_callback_t)(struct umass_softc *sc, union ccb *ccb,
uint32_t residue, uint8_t status);
#define STATUS_CMD_OK 0 /* everything ok */
#define STATUS_CMD_UNKNOWN 1 /* will have to fetch sense */
#define STATUS_CMD_FAILED 2 /* transfer was ok, command failed */
#define STATUS_WIRE_FAILED 3 /* couldn't even get command across */
typedef uint8_t (umass_transform_t)(struct umass_softc *sc, uint8_t *cmd_ptr,
uint8_t cmd_len);
/* Wire and command protocol */
#define UMASS_PROTO_BBB 0x0001 /* USB wire protocol */
#define UMASS_PROTO_CBI 0x0002
#define UMASS_PROTO_CBI_I 0x0004
#define UMASS_PROTO_WIRE 0x00ff /* USB wire protocol mask */
#define UMASS_PROTO_SCSI 0x0100 /* command protocol */
#define UMASS_PROTO_ATAPI 0x0200
#define UMASS_PROTO_UFI 0x0400
#define UMASS_PROTO_RBC 0x0800
#define UMASS_PROTO_COMMAND 0xff00 /* command protocol mask */
/* Device specific quirks */
#define NO_QUIRKS 0x0000
/*
* The drive does not support Test Unit Ready. Convert to Start Unit
*/
#define NO_TEST_UNIT_READY 0x0001
/*
* The drive does not reset the Unit Attention state after REQUEST
* SENSE has been sent. The INQUIRY command does not reset the UA
* either, and so CAM runs in circles trying to retrieve the initial
* INQUIRY data.
*/
#define RS_NO_CLEAR_UA 0x0002
/* The drive does not support START STOP. */
#define NO_START_STOP 0x0004
/* Don't ask for full inquiry data (255b). */
#define FORCE_SHORT_INQUIRY 0x0008
/* Needs to be initialised the Shuttle way */
#define SHUTTLE_INIT 0x0010
/* Drive needs to be switched to alternate iface 1 */
#define ALT_IFACE_1 0x0020
/* Drive does not do 1Mb/s, but just floppy speeds (20kb/s) */
#define FLOPPY_SPEED 0x0040
/* The device can't count and gets the residue of transfers wrong */
#define IGNORE_RESIDUE 0x0080
/* No GetMaxLun call */
#define NO_GETMAXLUN 0x0100
/* The device uses a weird CSWSIGNATURE. */
#define WRONG_CSWSIG 0x0200
/* Device cannot handle INQUIRY so fake a generic response */
#define NO_INQUIRY 0x0400
/* Device cannot handle INQUIRY EVPD, return CHECK CONDITION */
#define NO_INQUIRY_EVPD 0x0800
/* Pad all RBC requests to 12 bytes. */
#define RBC_PAD_TO_12 0x1000
/*
* Device reports number of sectors from READ_CAPACITY, not max
* sector number.
*/
#define READ_CAPACITY_OFFBY1 0x2000
/*
* Device cannot handle a SCSI synchronize cache command. Normally
* this quirk would be handled in the cam layer, but for IDE bridges
* we need to associate the quirk with the bridge and not the
* underlying disk device. This is handled by faking a success
* result.
*/
#define NO_SYNCHRONIZE_CACHE 0x4000
/* Device does not support 'PREVENT/ALLOW MEDIUM REMOVAL'. */
#define NO_PREVENT_ALLOW 0x8000
struct umass_softc {
struct scsi_sense cam_scsi_sense;
struct scsi_test_unit_ready cam_scsi_test_unit_ready;
struct lock sc_lock;
struct {
uint8_t *data_ptr;
union ccb *ccb;
umass_callback_t *callback;
uint32_t data_len; /* bytes */
uint32_t data_rem; /* bytes */
uint32_t data_timeout; /* ms */
uint32_t actlen; /* bytes */
uint8_t cmd_data[UMASS_MAX_CMDLEN];
uint8_t cmd_len; /* bytes */
uint8_t dir;
uint8_t lun;
} sc_transfer;
/* Bulk specific variables for transfers in progress */
umass_bbb_cbw_t cbw; /* command block wrapper */
umass_bbb_csw_t csw; /* command status wrapper */
/* CBI specific variables for transfers in progress */
umass_cbi_sbl_t sbl; /* status block */
device_t sc_dev;
struct usb_device *sc_udev;
struct cam_sim *sc_sim; /* SCSI Interface Module */
struct usb_xfer *sc_xfer[UMASS_T_MAX];
/*
* The command transform function is used to convert the SCSI
* commands into their derivatives, like UFI, ATAPI, and friends.
*/
umass_transform_t *sc_transform;
uint32_t sc_unit;
uint32_t sc_quirks; /* they got it almost right */
uint32_t sc_proto; /* wire and cmd protocol */
uint8_t sc_name[16];
uint8_t sc_iface_no; /* interface number */
uint8_t sc_maxlun; /* maximum LUN number, inclusive */
uint8_t sc_last_xfer_index;
uint8_t sc_status_try;
struct usb_callout sc_rescan_timeout;
};
struct umass_probe_proto {
uint32_t quirks;
uint32_t proto;
int error;
};
/* prototypes */
static device_probe_t umass_probe;
static device_attach_t umass_attach;
static device_detach_t umass_detach;
static usb_callback_t umass_tr_error;
static usb_callback_t umass_t_bbb_reset1_callback;
static usb_callback_t umass_t_bbb_reset2_callback;
static usb_callback_t umass_t_bbb_reset3_callback;
static usb_callback_t umass_t_bbb_command_callback;
static usb_callback_t umass_t_bbb_data_read_callback;
static usb_callback_t umass_t_bbb_data_rd_cs_callback;
static usb_callback_t umass_t_bbb_data_write_callback;
static usb_callback_t umass_t_bbb_data_wr_cs_callback;
static usb_callback_t umass_t_bbb_status_callback;
static usb_callback_t umass_t_cbi_reset1_callback;
static usb_callback_t umass_t_cbi_reset2_callback;
static usb_callback_t umass_t_cbi_reset3_callback;
static usb_callback_t umass_t_cbi_reset4_callback;
static usb_callback_t umass_t_cbi_command_callback;
static usb_callback_t umass_t_cbi_data_read_callback;
static usb_callback_t umass_t_cbi_data_rd_cs_callback;
static usb_callback_t umass_t_cbi_data_write_callback;
static usb_callback_t umass_t_cbi_data_wr_cs_callback;
static usb_callback_t umass_t_cbi_status_callback;
static void umass_cancel_ccb(struct umass_softc *);
static void umass_init_shuttle(struct umass_softc *);
static void umass_reset(struct umass_softc *);
static void umass_t_bbb_data_clear_stall_callback(struct usb_xfer *,
uint8_t, uint8_t, usb_error_t);
static void umass_command_start(struct umass_softc *, uint8_t, void *,
uint32_t, uint32_t, umass_callback_t *, union ccb *);
static uint8_t umass_bbb_get_max_lun(struct umass_softc *);
static void umass_cbi_start_status(struct umass_softc *);
static void umass_t_cbi_data_clear_stall_callback(struct usb_xfer *,
uint8_t, uint8_t, usb_error_t);
static int umass_cam_attach_sim(struct umass_softc *);
static void umass_cam_attach(struct umass_softc *);
static void umass_cam_detach_sim(struct umass_softc *);
static void umass_cam_action(struct cam_sim *, union ccb *);
static void umass_cam_poll(struct cam_sim *);
static void umass_cam_cb(struct umass_softc *, union ccb *, uint32_t,
uint8_t);
static void umass_cam_sense_cb(struct umass_softc *, union ccb *, uint32_t,
uint8_t);
static void umass_cam_quirk_cb(struct umass_softc *, union ccb *, uint32_t,
uint8_t);
static uint8_t umass_scsi_transform(struct umass_softc *, uint8_t *, uint8_t);
static uint8_t umass_rbc_transform(struct umass_softc *, uint8_t *, uint8_t);
static uint8_t umass_ufi_transform(struct umass_softc *, uint8_t *, uint8_t);
static uint8_t umass_atapi_transform(struct umass_softc *, uint8_t *,
uint8_t);
static uint8_t umass_no_transform(struct umass_softc *, uint8_t *, uint8_t);
static uint8_t umass_std_transform(struct umass_softc *, union ccb *, uint8_t
*, uint8_t);
#ifdef USB_DEBUG
static void umass_bbb_dump_cbw(struct umass_softc *, umass_bbb_cbw_t *);
static void umass_bbb_dump_csw(struct umass_softc *, umass_bbb_csw_t *);
static void umass_cbi_dump_cmd(struct umass_softc *, void *, uint8_t);
static void umass_dump_buffer(struct umass_softc *, uint8_t *, uint32_t,
uint32_t);
#endif
static struct usb_config umass_bbb_config[UMASS_T_BBB_MAX] = {
[UMASS_T_BBB_RESET1] = {
.type = UE_CONTROL,
.endpoint = 0x00, /* Control pipe */
.direction = UE_DIR_ANY,
.bufsize = sizeof(struct usb_device_request),
.callback = &umass_t_bbb_reset1_callback,
.timeout = 5000, /* 5 seconds */
.interval = 500, /* 500 milliseconds */
},
[UMASS_T_BBB_RESET2] = {
.type = UE_CONTROL,
.endpoint = 0x00, /* Control pipe */
.direction = UE_DIR_ANY,
.bufsize = sizeof(struct usb_device_request),
.callback = &umass_t_bbb_reset2_callback,
.timeout = 5000, /* 5 seconds */
.interval = 50, /* 50 milliseconds */
},
[UMASS_T_BBB_RESET3] = {
.type = UE_CONTROL,
.endpoint = 0x00, /* Control pipe */
.direction = UE_DIR_ANY,
.bufsize = sizeof(struct usb_device_request),
.callback = &umass_t_bbb_reset3_callback,
.timeout = 5000, /* 5 seconds */
.interval = 50, /* 50 milliseconds */
},
[UMASS_T_BBB_COMMAND] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.bufsize = sizeof(umass_bbb_cbw_t),
.callback = &umass_t_bbb_command_callback,
.timeout = 5000, /* 5 seconds */
},
[UMASS_T_BBB_DATA_READ] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.bufsize = UMASS_BULK_SIZE,
.flags = {.proxy_buffer = 1,.short_xfer_ok = 1,.ext_buffer=1,},
.callback = &umass_t_bbb_data_read_callback,
.timeout = 0, /* overwritten later */
},
[UMASS_T_BBB_DATA_RD_CS] = {
.type = UE_CONTROL,
.endpoint = 0x00, /* Control pipe */
.direction = UE_DIR_ANY,
.bufsize = sizeof(struct usb_device_request),
.callback = &umass_t_bbb_data_rd_cs_callback,
.timeout = 5000, /* 5 seconds */
},
[UMASS_T_BBB_DATA_WRITE] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.bufsize = UMASS_BULK_SIZE,
.flags = {.proxy_buffer = 1,.short_xfer_ok = 1,.ext_buffer=1,},
.callback = &umass_t_bbb_data_write_callback,
.timeout = 0, /* overwritten later */
},
[UMASS_T_BBB_DATA_WR_CS] = {
.type = UE_CONTROL,
.endpoint = 0x00, /* Control pipe */
.direction = UE_DIR_ANY,
.bufsize = sizeof(struct usb_device_request),
.callback = &umass_t_bbb_data_wr_cs_callback,
.timeout = 5000, /* 5 seconds */
},
[UMASS_T_BBB_STATUS] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.bufsize = sizeof(umass_bbb_csw_t),
.flags = {.short_xfer_ok = 1,},
.callback = &umass_t_bbb_status_callback,
.timeout = 5000, /* ms */
},
};
static struct usb_config umass_cbi_config[UMASS_T_CBI_MAX] = {
[UMASS_T_CBI_RESET1] = {
.type = UE_CONTROL,
.endpoint = 0x00, /* Control pipe */
.direction = UE_DIR_ANY,
.bufsize = (sizeof(struct usb_device_request) +
UMASS_CBI_DIAGNOSTIC_CMDLEN),
.callback = &umass_t_cbi_reset1_callback,
.timeout = 5000, /* 5 seconds */
.interval = 500, /* 500 milliseconds */
},
[UMASS_T_CBI_RESET2] = {
.type = UE_CONTROL,
.endpoint = 0x00, /* Control pipe */
.direction = UE_DIR_ANY,
.bufsize = sizeof(struct usb_device_request),
.callback = &umass_t_cbi_reset2_callback,
.timeout = 5000, /* 5 seconds */
.interval = 50, /* 50 milliseconds */
},
[UMASS_T_CBI_RESET3] = {
.type = UE_CONTROL,
.endpoint = 0x00, /* Control pipe */
.direction = UE_DIR_ANY,
.bufsize = sizeof(struct usb_device_request),
.callback = &umass_t_cbi_reset3_callback,
.timeout = 5000, /* 5 seconds */
.interval = 50, /* 50 milliseconds */
},
[UMASS_T_CBI_COMMAND] = {
.type = UE_CONTROL,
.endpoint = 0x00, /* Control pipe */
.direction = UE_DIR_ANY,
.bufsize = (sizeof(struct usb_device_request) +
UMASS_MAX_CMDLEN),
.callback = &umass_t_cbi_command_callback,
.timeout = 5000, /* 5 seconds */
},
[UMASS_T_CBI_DATA_READ] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.bufsize = UMASS_BULK_SIZE,
.flags = {.proxy_buffer = 1,.short_xfer_ok = 1,.ext_buffer=1,},
.callback = &umass_t_cbi_data_read_callback,
.timeout = 0, /* overwritten later */
},
[UMASS_T_CBI_DATA_RD_CS] = {
.type = UE_CONTROL,
.endpoint = 0x00, /* Control pipe */
.direction = UE_DIR_ANY,
.bufsize = sizeof(struct usb_device_request),
.callback = &umass_t_cbi_data_rd_cs_callback,
.timeout = 5000, /* 5 seconds */
},
[UMASS_T_CBI_DATA_WRITE] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.bufsize = UMASS_BULK_SIZE,
.flags = {.proxy_buffer = 1,.short_xfer_ok = 1,.ext_buffer=1,},
.callback = &umass_t_cbi_data_write_callback,
.timeout = 0, /* overwritten later */
},
[UMASS_T_CBI_DATA_WR_CS] = {
.type = UE_CONTROL,
.endpoint = 0x00, /* Control pipe */
.direction = UE_DIR_ANY,
.bufsize = sizeof(struct usb_device_request),
.callback = &umass_t_cbi_data_wr_cs_callback,
.timeout = 5000, /* 5 seconds */
},
[UMASS_T_CBI_STATUS] = {
.type = UE_INTERRUPT,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.flags = {.short_xfer_ok = 1,.no_pipe_ok = 1,},
.bufsize = sizeof(umass_cbi_sbl_t),
.callback = &umass_t_cbi_status_callback,
.timeout = 5000, /* ms */
},
[UMASS_T_CBI_RESET4] = {
.type = UE_CONTROL,
.endpoint = 0x00, /* Control pipe */
.direction = UE_DIR_ANY,
.bufsize = sizeof(struct usb_device_request),
.callback = &umass_t_cbi_reset4_callback,
.timeout = 5000, /* ms */
},
};
/* If device cannot return valid inquiry data, fake it */
static const uint8_t fake_inq_data[SHORT_INQUIRY_LENGTH] = {
0, /* removable */ 0x80, SCSI_REV_2, SCSI_REV_2,
/* additional_length */ 31, 0, 0, 0
};
#define UFI_COMMAND_LENGTH 12 /* UFI commands are always 12 bytes */
#define ATAPI_COMMAND_LENGTH 12 /* ATAPI commands are always 12 bytes */
static devclass_t umass_devclass;
static device_method_t umass_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, umass_probe),
DEVMETHOD(device_attach, umass_attach),
DEVMETHOD(device_detach, umass_detach),
DEVMETHOD_END
};
static driver_t umass_driver = {
.name = "umass",
.methods = umass_methods,
.size = sizeof(struct umass_softc),
};
DRIVER_MODULE(umass, uhub, umass_driver, umass_devclass, NULL, NULL);
MODULE_DEPEND(umass, usb, 1, 1, 1);
MODULE_DEPEND(umass, cam, 1, 1, 1);
MODULE_VERSION(umass, 1);
/*
* USB device probe/attach/detach
*/
static const STRUCT_USB_HOST_ID __used umass_devs[] = {
/* generic mass storage class */
{USB_IFACE_CLASS(UICLASS_MASS),},
};
static uint16_t
umass_get_proto(struct usb_interface *iface)
{
struct usb_interface_descriptor *id;
uint16_t retval;
retval = 0;
/* Check for a standards compliant device */
id = usbd_get_interface_descriptor(iface);
if ((id == NULL) ||
(id->bInterfaceClass != UICLASS_MASS)) {
goto done;
}
switch (id->bInterfaceSubClass) {
case UISUBCLASS_SCSI:
retval |= UMASS_PROTO_SCSI;
break;
case UISUBCLASS_UFI:
retval |= UMASS_PROTO_UFI;
break;
case UISUBCLASS_RBC:
retval |= UMASS_PROTO_RBC;
break;
case UISUBCLASS_SFF8020I:
case UISUBCLASS_SFF8070I:
retval |= UMASS_PROTO_ATAPI;
break;
default:
goto done;
}
switch (id->bInterfaceProtocol) {
case UIPROTO_MASS_CBI:
retval |= UMASS_PROTO_CBI;
break;
case UIPROTO_MASS_CBI_I:
retval |= UMASS_PROTO_CBI_I;
break;
case UIPROTO_MASS_BBB_OLD:
case UIPROTO_MASS_BBB:
retval |= UMASS_PROTO_BBB;
break;
default:
goto done;
}
done:
return (retval);
}
/*
* Match the device we are seeing with the devices supported.
*/
static struct umass_probe_proto
umass_probe_proto(device_t dev, struct usb_attach_arg *uaa)
{
struct umass_probe_proto ret;
uint32_t quirks = NO_QUIRKS;
uint32_t proto = umass_get_proto(uaa->iface);
memset(&ret, 0, sizeof(ret));
ret.error = BUS_PROBE_GENERIC;
/* Search for protocol enforcement */
if (usb_test_quirk(uaa, UQ_MSC_FORCE_WIRE_BBB)) {
proto &= ~UMASS_PROTO_WIRE;
proto |= UMASS_PROTO_BBB;
} else if (usb_test_quirk(uaa, UQ_MSC_FORCE_WIRE_CBI)) {
proto &= ~UMASS_PROTO_WIRE;
proto |= UMASS_PROTO_CBI;
} else if (usb_test_quirk(uaa, UQ_MSC_FORCE_WIRE_CBI_I)) {
proto &= ~UMASS_PROTO_WIRE;
proto |= UMASS_PROTO_CBI_I;
}
if (usb_test_quirk(uaa, UQ_MSC_FORCE_PROTO_SCSI)) {
proto &= ~UMASS_PROTO_COMMAND;
proto |= UMASS_PROTO_SCSI;
} else if (usb_test_quirk(uaa, UQ_MSC_FORCE_PROTO_ATAPI)) {
proto &= ~UMASS_PROTO_COMMAND;
proto |= UMASS_PROTO_ATAPI;
} else if (usb_test_quirk(uaa, UQ_MSC_FORCE_PROTO_UFI)) {
proto &= ~UMASS_PROTO_COMMAND;
proto |= UMASS_PROTO_UFI;
} else if (usb_test_quirk(uaa, UQ_MSC_FORCE_PROTO_RBC)) {
proto &= ~UMASS_PROTO_COMMAND;
proto |= UMASS_PROTO_RBC;
}
/* Check if the protocol is invalid */
if ((proto & UMASS_PROTO_COMMAND) == 0) {
ret.error = ENXIO;
goto done;
}
if ((proto & UMASS_PROTO_WIRE) == 0) {
ret.error = ENXIO;
goto done;
}
/* Search for quirks */
if (usb_test_quirk(uaa, UQ_MSC_NO_TEST_UNIT_READY))
quirks |= NO_TEST_UNIT_READY;
if (usb_test_quirk(uaa, UQ_MSC_NO_RS_CLEAR_UA))
quirks |= RS_NO_CLEAR_UA;
if (usb_test_quirk(uaa, UQ_MSC_NO_START_STOP))
quirks |= NO_START_STOP;
if (usb_test_quirk(uaa, UQ_MSC_NO_GETMAXLUN))
quirks |= NO_GETMAXLUN;
if (usb_test_quirk(uaa, UQ_MSC_NO_INQUIRY))
quirks |= NO_INQUIRY;
if (usb_test_quirk(uaa, UQ_MSC_NO_INQUIRY_EVPD))
quirks |= NO_INQUIRY_EVPD;
if (usb_test_quirk(uaa, UQ_MSC_NO_PREVENT_ALLOW))
quirks |= NO_PREVENT_ALLOW;
if (usb_test_quirk(uaa, UQ_MSC_NO_SYNC_CACHE))
quirks |= NO_SYNCHRONIZE_CACHE;
if (usb_test_quirk(uaa, UQ_MSC_SHUTTLE_INIT))
quirks |= SHUTTLE_INIT;
if (usb_test_quirk(uaa, UQ_MSC_ALT_IFACE_1))
quirks |= ALT_IFACE_1;
if (usb_test_quirk(uaa, UQ_MSC_FLOPPY_SPEED))
quirks |= FLOPPY_SPEED;
if (usb_test_quirk(uaa, UQ_MSC_IGNORE_RESIDUE))
quirks |= IGNORE_RESIDUE;
if (usb_test_quirk(uaa, UQ_MSC_WRONG_CSWSIG))
quirks |= WRONG_CSWSIG;
if (usb_test_quirk(uaa, UQ_MSC_RBC_PAD_TO_12))
quirks |= RBC_PAD_TO_12;
if (usb_test_quirk(uaa, UQ_MSC_READ_CAP_OFFBY1))
quirks |= READ_CAPACITY_OFFBY1;
if (usb_test_quirk(uaa, UQ_MSC_FORCE_SHORT_INQ))
quirks |= FORCE_SHORT_INQUIRY;
done:
ret.quirks = quirks;
ret.proto = proto;
return (ret);
}
static int
umass_probe(device_t dev)
{
struct usb_attach_arg *uaa = device_get_ivars(dev);
struct umass_probe_proto temp;
if (uaa->usb_mode != USB_MODE_HOST) {
return (ENXIO);
}
temp = umass_probe_proto(dev, uaa);
return (temp.error);
}
static int
umass_attach(device_t dev)
{
struct umass_softc *sc = device_get_softc(dev);
struct usb_attach_arg *uaa = device_get_ivars(dev);
struct umass_probe_proto temp = umass_probe_proto(dev, uaa);
struct usb_interface_descriptor *id;
int err;
/*
* NOTE: the softc struct is cleared in device_set_driver.
* We can safely call umass_detach without specifically
* initializing the struct.
*/
sc->sc_dev = dev;
sc->sc_udev = uaa->device;
sc->sc_proto = temp.proto;
sc->sc_quirks = temp.quirks;
sc->sc_unit = device_get_unit(dev);
ksnprintf(sc->sc_name, sizeof(sc->sc_name),
"%s", device_get_nameunit(dev));
device_set_usb_desc(dev);
lockinit(&sc->sc_lock, device_get_nameunit(dev), 0, LK_CANRECURSE);
/* get interface index */
id = usbd_get_interface_descriptor(uaa->iface);
if (id == NULL) {
device_printf(dev, "failed to get "
"interface number\n");
goto detach;
}
sc->sc_iface_no = id->bInterfaceNumber;
#ifdef USB_DEBUG
device_printf(dev, " ");
switch (sc->sc_proto & UMASS_PROTO_COMMAND) {
case UMASS_PROTO_SCSI:
kprintf("SCSI");
break;
case UMASS_PROTO_ATAPI:
kprintf("8070i (ATAPI)");
break;
case UMASS_PROTO_UFI:
kprintf("UFI");
break;
case UMASS_PROTO_RBC:
kprintf("RBC");
break;
default:
kprintf("(unknown 0x%02x)",
sc->sc_proto & UMASS_PROTO_COMMAND);
break;
}
kprintf(" over ");
switch (sc->sc_proto & UMASS_PROTO_WIRE) {
case UMASS_PROTO_BBB:
kprintf("Bulk-Only");
break;
case UMASS_PROTO_CBI: /* uses Comand/Bulk pipes */
kprintf("CBI");
break;
case UMASS_PROTO_CBI_I: /* uses Comand/Bulk/Interrupt pipes */
kprintf("CBI with CCI");
break;
default:
kprintf("(unknown 0x%02x)",
sc->sc_proto & UMASS_PROTO_WIRE);
}
kprintf("; quirks = 0x%04x\n", sc->sc_quirks);
#endif
if (sc->sc_quirks & ALT_IFACE_1) {
err = usbd_set_alt_interface_index
(uaa->device, uaa->info.bIfaceIndex, 1);
if (err) {
DPRINTF(sc, UDMASS_USB, "could not switch to "
"Alt Interface 1\n");
goto detach;
}
}
/* allocate all required USB transfers */
if (sc->sc_proto & UMASS_PROTO_BBB) {
err = usbd_transfer_setup(uaa->device,
&uaa->info.bIfaceIndex, sc->sc_xfer, umass_bbb_config,
UMASS_T_BBB_MAX, sc, &sc->sc_lock);
/* skip reset first time */
sc->sc_last_xfer_index = UMASS_T_BBB_COMMAND;
} else if (sc->sc_proto & (UMASS_PROTO_CBI | UMASS_PROTO_CBI_I)) {
err = usbd_transfer_setup(uaa->device,
&uaa->info.bIfaceIndex, sc->sc_xfer, umass_cbi_config,
UMASS_T_CBI_MAX, sc, &sc->sc_lock);
/* skip reset first time */
sc->sc_last_xfer_index = UMASS_T_CBI_COMMAND;
} else {
err = USB_ERR_INVAL;
}
if (err) {
device_printf(dev, "could not setup required "
"transfers, %s\n", usbd_errstr(err));
goto detach;
}
#ifdef USB_DEBUG
if (umass_throttle > 0) {
uint8_t x;
int iv;
iv = umass_throttle;
if (iv < 1)
iv = 1;
else if (iv > 8000)
iv = 8000;
for (x = 0; x != UMASS_T_MAX; x++) {
if (sc->sc_xfer[x] != NULL)
usbd_xfer_set_interval(sc->sc_xfer[x], iv);
}
}
#endif
sc->sc_transform =
(sc->sc_proto & UMASS_PROTO_SCSI) ? &umass_scsi_transform :
(sc->sc_proto & UMASS_PROTO_UFI) ? &umass_ufi_transform :
(sc->sc_proto & UMASS_PROTO_ATAPI) ? &umass_atapi_transform :
(sc->sc_proto & UMASS_PROTO_RBC) ? &umass_rbc_transform :
&umass_no_transform;
/* from here onwards the device can be used. */
if (sc->sc_quirks & SHUTTLE_INIT) {
umass_init_shuttle(sc);
}
/* get the maximum LUN supported by the device */
if (((sc->sc_proto & UMASS_PROTO_WIRE) == UMASS_PROTO_BBB) &&
!(sc->sc_quirks & NO_GETMAXLUN))
sc->sc_maxlun = umass_bbb_get_max_lun(sc);
else
sc->sc_maxlun = 0;
/* Prepare the SCSI command block */
sc->cam_scsi_sense.opcode = REQUEST_SENSE;
sc->cam_scsi_test_unit_ready.opcode = TEST_UNIT_READY;
/* register the SIM */
err = umass_cam_attach_sim(sc);
if (err) {
goto detach;
}
/* scan the SIM */
umass_cam_attach(sc);
DPRINTF(sc, UDMASS_GEN, "Attach finished\n");
return (0); /* success */
detach:
umass_detach(dev);
return (ENXIO); /* failure */
}
static int
umass_detach(device_t dev)
{
struct umass_softc *sc = device_get_softc(dev);
DPRINTF(sc, UDMASS_USB, "\n");
/* teardown our statemachine */
usbd_transfer_unsetup(sc->sc_xfer, UMASS_T_MAX);
lockmgr(&sc->sc_lock, LK_EXCLUSIVE);
/* cancel any leftover CCBs */
umass_cancel_ccb(sc);
umass_cam_detach_sim(sc);
lockmgr(&sc->sc_lock, LK_RELEASE);
lockuninit(&sc->sc_lock);
return (0); /* success */
}
static void
umass_init_shuttle(struct umass_softc *sc)
{
struct usb_device_request req;
usb_error_t err;
uint8_t status[2] = {0, 0};
/*
* The Linux driver does this, but no one can tell us what the
* command does.
*/
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = 1; /* XXX unknown command */
USETW(req.wValue, 0);
req.wIndex[0] = sc->sc_iface_no;
req.wIndex[1] = 0;
USETW(req.wLength, sizeof(status));
err = usbd_do_request(sc->sc_udev, NULL, &req, &status);
DPRINTF(sc, UDMASS_GEN, "Shuttle init returned 0x%02x%02x\n",
status[0], status[1]);
}
/*
* Generic functions to handle transfers
*/
static void
umass_transfer_start(struct umass_softc *sc, uint8_t xfer_index)
{
DPRINTF(sc, UDMASS_GEN, "transfer index = "
"%d\n", xfer_index);
if (sc->sc_xfer[xfer_index]) {
sc->sc_last_xfer_index = xfer_index;
usbd_transfer_start(sc->sc_xfer[xfer_index]);
} else {
umass_cancel_ccb(sc);
}
}
static void
umass_reset(struct umass_softc *sc)
{
DPRINTF(sc, UDMASS_GEN, "resetting device\n");
/*
* stop the last transfer, if not already stopped:
*/
usbd_transfer_stop(sc->sc_xfer[sc->sc_last_xfer_index]);
umass_transfer_start(sc, 0);
}
static void
umass_cancel_ccb(struct umass_softc *sc)
{
union ccb *ccb;
#if 0
KKASSERT(lockstatus(&sc->sc_lock, curthread) != 0);
#endif
ccb = sc->sc_transfer.ccb;
sc->sc_transfer.ccb = NULL;
sc->sc_last_xfer_index = 0;
if (ccb) {
(sc->sc_transfer.callback)
(sc, ccb, (sc->sc_transfer.data_len -
sc->sc_transfer.actlen), STATUS_WIRE_FAILED);
}
}
static void
umass_tr_error(struct usb_xfer *xfer, usb_error_t error)
{
struct umass_softc *sc = usbd_xfer_softc(xfer);
if (error != USB_ERR_CANCELLED) {
DPRINTF(sc, UDMASS_GEN, "transfer error, %s -> "
"reset\n", usbd_errstr(error));
}
umass_cancel_ccb(sc);
}
/*
* BBB protocol specific functions
*/
static void
umass_t_bbb_reset1_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct umass_softc *sc = usbd_xfer_softc(xfer);
struct usb_device_request req;
struct usb_page_cache *pc;
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
umass_transfer_start(sc, UMASS_T_BBB_RESET2);
return;
case USB_ST_SETUP:
/*
* Reset recovery (5.3.4 in Universal Serial Bus Mass Storage Class)
*
* For Reset Recovery the host shall issue in the following order:
* a) a Bulk-Only Mass Storage Reset
* b) a Clear Feature HALT to the Bulk-In endpoint
* c) a Clear Feature HALT to the Bulk-Out endpoint
*
* This is done in 3 steps, using 3 transfers:
* UMASS_T_BBB_RESET1
* UMASS_T_BBB_RESET2
* UMASS_T_BBB_RESET3
*/
DPRINTF(sc, UDMASS_BBB, "BBB reset!\n");
req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
req.bRequest = UR_BBB_RESET; /* bulk only reset */
USETW(req.wValue, 0);
req.wIndex[0] = sc->sc_iface_no;
req.wIndex[1] = 0;
USETW(req.wLength, 0);
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_in(pc, 0, &req, sizeof(req));
usbd_xfer_set_frame_len(xfer, 0, sizeof(req));
usbd_xfer_set_frames(xfer, 1);
usbd_transfer_submit(xfer);
return;
default: /* Error */
umass_tr_error(xfer, error);
return;
}
}
static void
umass_t_bbb_reset2_callback(struct usb_xfer *xfer, usb_error_t error)
{
umass_t_bbb_data_clear_stall_callback(xfer, UMASS_T_BBB_RESET3,
UMASS_T_BBB_DATA_READ, error);
}
static void
umass_t_bbb_reset3_callback(struct usb_xfer *xfer, usb_error_t error)
{
umass_t_bbb_data_clear_stall_callback(xfer, UMASS_T_BBB_COMMAND,
UMASS_T_BBB_DATA_WRITE, error);
}
static void
umass_t_bbb_data_clear_stall_callback(struct usb_xfer *xfer,
uint8_t next_xfer, uint8_t stall_xfer, usb_error_t error)
{
struct umass_softc *sc = usbd_xfer_softc(xfer);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
tr_transferred:
umass_transfer_start(sc, next_xfer);
return;
case USB_ST_SETUP:
if (usbd_clear_stall_callback(xfer, sc->sc_xfer[stall_xfer])) {
goto tr_transferred;
}
return;
default: /* Error */
umass_tr_error(xfer, error);
return;
}
}
static void
umass_t_bbb_command_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct umass_softc *sc = usbd_xfer_softc(xfer);
union ccb *ccb = sc->sc_transfer.ccb;
struct usb_page_cache *pc;
uint32_t tag;
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
umass_transfer_start
(sc, ((sc->sc_transfer.dir == DIR_IN) ? UMASS_T_BBB_DATA_READ :
(sc->sc_transfer.dir == DIR_OUT) ? UMASS_T_BBB_DATA_WRITE :
UMASS_T_BBB_STATUS));
return;
case USB_ST_SETUP:
sc->sc_status_try = 0;
if (ccb) {
/*
* the initial value is not important,
* as long as the values are unique:
*/
tag = UGETDW(sc->cbw.dCBWTag) + 1;
USETDW(sc->cbw.dCBWSignature, CBWSIGNATURE);
USETDW(sc->cbw.dCBWTag, tag);
/*
* dCBWDataTransferLength:
* This field indicates the number of bytes of data that the host
* intends to transfer on the IN or OUT Bulk endpoint(as indicated by
* the Direction bit) during the execution of this command. If this
* field is set to 0, the device will expect that no data will be
* transferred IN or OUT during this command, regardless of the value
* of the Direction bit defined in dCBWFlags.
*/
USETDW(sc->cbw.dCBWDataTransferLength, sc->sc_transfer.data_len);
/*
* dCBWFlags:
* The bits of the Flags field are defined as follows:
* Bits 0-6 reserved
* Bit 7 Direction - this bit shall be ignored if the
* dCBWDataTransferLength field is zero.
* 0 = data Out from host to device
* 1 = data In from device to host
*/
sc->cbw.bCBWFlags = ((sc->sc_transfer.dir == DIR_IN) ?
CBWFLAGS_IN : CBWFLAGS_OUT);
sc->cbw.bCBWLUN = sc->sc_transfer.lun;
if (sc->sc_transfer.cmd_len > sizeof(sc->cbw.CBWCDB)) {
sc->sc_transfer.cmd_len = sizeof(sc->cbw.CBWCDB);
DPRINTF(sc, UDMASS_BBB, "Truncating long command!\n");
}
sc->cbw.bCDBLength = sc->sc_transfer.cmd_len;
/* copy SCSI command data */
memcpy(sc->cbw.CBWCDB, sc->sc_transfer.cmd_data,
sc->sc_transfer.cmd_len);
/* clear remaining command area */
memset(sc->cbw.CBWCDB +
sc->sc_transfer.cmd_len, 0,
sizeof(sc->cbw.CBWCDB) -
sc->sc_transfer.cmd_len);
DIF(UDMASS_BBB, umass_bbb_dump_cbw(sc, &sc->cbw));
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_in(pc, 0, &sc->cbw, sizeof(sc->cbw));
usbd_xfer_set_frame_len(xfer, 0, sizeof(sc->cbw));
usbd_transfer_submit(xfer);
}
return;
default: /* Error */
umass_tr_error(xfer, error);
return;
}
}
static void
umass_t_bbb_data_read_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct umass_softc *sc = usbd_xfer_softc(xfer);
uint32_t max_bulk = usbd_xfer_max_len(xfer);
int actlen, sumlen;
usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
sc->sc_transfer.data_rem -= actlen;
sc->sc_transfer.data_ptr += actlen;
sc->sc_transfer.actlen += actlen;
if (actlen < sumlen) {
/* short transfer */
sc->sc_transfer.data_rem = 0;
}
case USB_ST_SETUP:
DPRINTF(sc, UDMASS_BBB, "max_bulk=%d, data_rem=%d\n",
max_bulk, sc->sc_transfer.data_rem);
if (sc->sc_transfer.data_rem == 0) {
umass_transfer_start(sc, UMASS_T_BBB_STATUS);
return;
}
if (max_bulk > sc->sc_transfer.data_rem) {
max_bulk = sc->sc_transfer.data_rem;
}
usbd_xfer_set_timeout(xfer, sc->sc_transfer.data_timeout);
usbd_xfer_set_frame_data(xfer, 0, sc->sc_transfer.data_ptr,
max_bulk);
usbd_transfer_submit(xfer);
return;
default: /* Error */
if (error == USB_ERR_CANCELLED) {
umass_tr_error(xfer, error);
} else {
umass_transfer_start(sc, UMASS_T_BBB_DATA_RD_CS);
}
return;
}
}
static void
umass_t_bbb_data_rd_cs_callback(struct usb_xfer *xfer, usb_error_t error)
{
umass_t_bbb_data_clear_stall_callback(xfer, UMASS_T_BBB_STATUS,
UMASS_T_BBB_DATA_READ, error);
}
static void
umass_t_bbb_data_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct umass_softc *sc = usbd_xfer_softc(xfer);
uint32_t max_bulk = usbd_xfer_max_len(xfer);
int actlen, sumlen;
usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
sc->sc_transfer.data_rem -= actlen;
sc->sc_transfer.data_ptr += actlen;
sc->sc_transfer.actlen += actlen;
if (actlen < sumlen) {
/* short transfer */
sc->sc_transfer.data_rem = 0;
}
case USB_ST_SETUP:
DPRINTF(sc, UDMASS_BBB, "max_bulk=%d, data_rem=%d\n",
max_bulk, sc->sc_transfer.data_rem);
if (sc->sc_transfer.data_rem == 0) {
umass_transfer_start(sc, UMASS_T_BBB_STATUS);
return;
}
if (max_bulk > sc->sc_transfer.data_rem) {
max_bulk = sc->sc_transfer.data_rem;
}
usbd_xfer_set_timeout(xfer, sc->sc_transfer.data_timeout);
usbd_xfer_set_frame_data(xfer, 0, sc->sc_transfer.data_ptr,
max_bulk);
usbd_transfer_submit(xfer);
return;
default: /* Error */
if (error == USB_ERR_CANCELLED) {
umass_tr_error(xfer, error);
} else {
umass_transfer_start(sc, UMASS_T_BBB_DATA_WR_CS);
}
return;
}
}
static void
umass_t_bbb_data_wr_cs_callback(struct usb_xfer *xfer, usb_error_t error)
{
umass_t_bbb_data_clear_stall_callback(xfer, UMASS_T_BBB_STATUS,
UMASS_T_BBB_DATA_WRITE, error);
}
static void
umass_t_bbb_status_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct umass_softc *sc = usbd_xfer_softc(xfer);
union ccb *ccb = sc->sc_transfer.ccb;
struct usb_page_cache *pc;
uint32_t residue;
int actlen;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
/*
* Do a full reset if there is something wrong with the CSW:
*/
sc->sc_status_try = 1;
/* Zero missing parts of the CSW: */
if (actlen < (int)sizeof(sc->csw))
memset(&sc->csw, 0, sizeof(sc->csw));
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_out(pc, 0, &sc->csw, actlen);
DIF(UDMASS_BBB, umass_bbb_dump_csw(sc, &sc->csw));
residue = UGETDW(sc->csw.dCSWDataResidue);
if ((!residue) || (sc->sc_quirks & IGNORE_RESIDUE)) {
residue = (sc->sc_transfer.data_len -
sc->sc_transfer.actlen);
}
if (residue > sc->sc_transfer.data_len) {
DPRINTF(sc, UDMASS_BBB, "truncating residue from %d "
"to %d bytes\n", residue, sc->sc_transfer.data_len);
residue = sc->sc_transfer.data_len;
}
/* translate weird command-status signatures: */
if (sc->sc_quirks & WRONG_CSWSIG) {
uint32_t temp = UGETDW(sc->csw.dCSWSignature);
if ((temp == CSWSIGNATURE_OLYMPUS_C1) ||
(temp == CSWSIGNATURE_IMAGINATION_DBX1)) {
USETDW(sc->csw.dCSWSignature, CSWSIGNATURE);
}
}
/* check CSW and handle eventual error */
if (UGETDW(sc->csw.dCSWSignature) != CSWSIGNATURE) {
DPRINTF(sc, UDMASS_BBB, "bad CSW signature 0x%08x != 0x%08x\n",
UGETDW(sc->csw.dCSWSignature), CSWSIGNATURE);
/*
* Invalid CSW: Wrong signature or wrong tag might
* indicate that we lost synchronization. Reset the
* device.
*/
goto tr_error;
} else if (UGETDW(sc->csw.dCSWTag) != UGETDW(sc->cbw.dCBWTag)) {
DPRINTF(sc, UDMASS_BBB, "Invalid CSW: tag 0x%08x should be "
"0x%08x\n", UGETDW(sc->csw.dCSWTag),
UGETDW(sc->cbw.dCBWTag));
goto tr_error;
} else if (sc->csw.bCSWStatus > CSWSTATUS_PHASE) {
DPRINTF(sc, UDMASS_BBB, "Invalid CSW: status %d > %d\n",
sc->csw.bCSWStatus, CSWSTATUS_PHASE);
goto tr_error;
} else if (sc->csw.bCSWStatus == CSWSTATUS_PHASE) {
DPRINTF(sc, UDMASS_BBB, "Phase error, residue = "
"%d\n", residue);
goto tr_error;
} else if (sc->sc_transfer.actlen > sc->sc_transfer.data_len) {
DPRINTF(sc, UDMASS_BBB, "Buffer overrun %d > %d\n",
sc->sc_transfer.actlen, sc->sc_transfer.data_len);
goto tr_error;
} else if (sc->csw.bCSWStatus == CSWSTATUS_FAILED) {
DPRINTF(sc, UDMASS_BBB, "Command failed, residue = "
"%d\n", residue);
sc->sc_transfer.ccb = NULL;
sc->sc_last_xfer_index = UMASS_T_BBB_COMMAND;
(sc->sc_transfer.callback)
(sc, ccb, residue, STATUS_CMD_FAILED);
} else {
sc->sc_transfer.ccb = NULL;
sc->sc_last_xfer_index = UMASS_T_BBB_COMMAND;
(sc->sc_transfer.callback)
(sc, ccb, residue, STATUS_CMD_OK);
}
return;
case USB_ST_SETUP:
usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
return;
default:
tr_error:
DPRINTF(sc, UDMASS_BBB, "Failed to read CSW: %s, try %d\n",
usbd_errstr(error), sc->sc_status_try);
if ((error == USB_ERR_CANCELLED) ||
(sc->sc_status_try)) {
umass_tr_error(xfer, error);
} else {
sc->sc_status_try = 1;
umass_transfer_start(sc, UMASS_T_BBB_DATA_RD_CS);
}
return;
}
}
static void
umass_command_start(struct umass_softc *sc, uint8_t dir,
void *data_ptr, uint32_t data_len,
uint32_t data_timeout, umass_callback_t *callback,
union ccb *ccb)
{
sc->sc_transfer.lun = ccb->ccb_h.target_lun;
/*
* NOTE: assumes that "sc->sc_transfer.cmd_data" and
* "sc->sc_transfer.cmd_len" has been properly
* initialized.
*/
sc->sc_transfer.dir = data_len ? dir : DIR_NONE;
sc->sc_transfer.data_ptr = data_ptr;
sc->sc_transfer.data_len = data_len;
sc->sc_transfer.data_rem = data_len;
sc->sc_transfer.data_timeout = (data_timeout + UMASS_TIMEOUT);
sc->sc_transfer.actlen = 0;
sc->sc_transfer.callback = callback;
sc->sc_transfer.ccb = ccb;
if (sc->sc_xfer[sc->sc_last_xfer_index]) {
usbd_transfer_start(sc->sc_xfer[sc->sc_last_xfer_index]);
} else {
umass_cancel_ccb(sc);
}
}
static uint8_t
umass_bbb_get_max_lun(struct umass_softc *sc)
{
struct usb_device_request req;
usb_error_t err;
uint8_t buf = 0;
/* The Get Max Lun command is a class-specific request. */
req.bmRequestType = UT_READ_CLASS_INTERFACE;
req.bRequest = UR_BBB_GET_MAX_LUN;
USETW(req.wValue, 0);
req.wIndex[0] = sc->sc_iface_no;
req.wIndex[1] = 0;
USETW(req.wLength, 1);
err = usbd_do_request(sc->sc_udev, NULL, &req, &buf);
if (err) {
buf = 0;
/* Device doesn't support Get Max Lun request. */
kprintf("%s: Get Max Lun not supported (%s)\n",
sc->sc_name, usbd_errstr(err));
}
return (buf);
}
/*
* Command/Bulk/Interrupt (CBI) specific functions
*/
static void
umass_cbi_start_status(struct umass_softc *sc)
{
if (sc->sc_xfer[UMASS_T_CBI_STATUS]) {
umass_transfer_start(sc, UMASS_T_CBI_STATUS);
} else {
union ccb *ccb = sc->sc_transfer.ccb;
sc->sc_transfer.ccb = NULL;
sc->sc_last_xfer_index = UMASS_T_CBI_COMMAND;
(sc->sc_transfer.callback)
(sc, ccb, (sc->sc_transfer.data_len -
sc->sc_transfer.actlen), STATUS_CMD_UNKNOWN);
}
}
static void
umass_t_cbi_reset1_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct umass_softc *sc = usbd_xfer_softc(xfer);
struct usb_device_request req;
struct usb_page_cache *pc;
uint8_t buf[UMASS_CBI_DIAGNOSTIC_CMDLEN];
uint8_t i;
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
umass_transfer_start(sc, UMASS_T_CBI_RESET2);
break;
case USB_ST_SETUP:
/*
* Command Block Reset Protocol
*
* First send a reset request to the device. Then clear
* any possibly stalled bulk endpoints.
*
* This is done in 3 steps, using 3 transfers:
* UMASS_T_CBI_RESET1
* UMASS_T_CBI_RESET2
* UMASS_T_CBI_RESET3
* UMASS_T_CBI_RESET4 (only if there is an interrupt endpoint)
*/
DPRINTF(sc, UDMASS_CBI, "CBI reset!\n");
req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
req.bRequest = UR_CBI_ADSC;
USETW(req.wValue, 0);
req.wIndex[0] = sc->sc_iface_no;
req.wIndex[1] = 0;
USETW(req.wLength, UMASS_CBI_DIAGNOSTIC_CMDLEN);
/*
* The 0x1d code is the SEND DIAGNOSTIC command. To
* distinguish between the two, the last 10 bytes of the CBL
* is filled with 0xff (section 2.2 of the CBI
* specification)
*/
buf[0] = 0x1d; /* Command Block Reset */
buf[1] = 0x04;
for (i = 2; i < UMASS_CBI_DIAGNOSTIC_CMDLEN; i++) {
buf[i] = 0xff;
}
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_in(pc, 0, &req, sizeof(req));
pc = usbd_xfer_get_frame(xfer, 1);
usbd_copy_in(pc, 0, buf, sizeof(buf));
usbd_xfer_set_frame_len(xfer, 0, sizeof(req));
usbd_xfer_set_frame_len(xfer, 1, sizeof(buf));
usbd_xfer_set_frames(xfer, 2);
usbd_transfer_submit(xfer);
break;
default: /* Error */
if (error == USB_ERR_CANCELLED)
umass_tr_error(xfer, error);
else
umass_transfer_start(sc, UMASS_T_CBI_RESET2);
break;
}
}
static void
umass_t_cbi_reset2_callback(struct usb_xfer *xfer, usb_error_t error)
{
umass_t_cbi_data_clear_stall_callback(xfer, UMASS_T_CBI_RESET3,
UMASS_T_CBI_DATA_READ, error);
}
static void
umass_t_cbi_reset3_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct umass_softc *sc = usbd_xfer_softc(xfer);
umass_t_cbi_data_clear_stall_callback
(xfer, (sc->sc_xfer[UMASS_T_CBI_RESET4] &&
sc->sc_xfer[UMASS_T_CBI_STATUS]) ?
UMASS_T_CBI_RESET4 : UMASS_T_CBI_COMMAND,
UMASS_T_CBI_DATA_WRITE, error);
}
static void
umass_t_cbi_reset4_callback(struct usb_xfer *xfer, usb_error_t error)
{
umass_t_cbi_data_clear_stall_callback(xfer, UMASS_T_CBI_COMMAND,
UMASS_T_CBI_STATUS, error);
}
static void
umass_t_cbi_data_clear_stall_callback(struct usb_xfer *xfer,
uint8_t next_xfer, uint8_t stall_xfer, usb_error_t error)
{
struct umass_softc *sc = usbd_xfer_softc(xfer);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
tr_transferred:
if (next_xfer == UMASS_T_CBI_STATUS) {
umass_cbi_start_status(sc);
} else {
umass_transfer_start(sc, next_xfer);
}
break;
case USB_ST_SETUP:
if (usbd_clear_stall_callback(xfer, sc->sc_xfer[stall_xfer])) {
goto tr_transferred; /* should not happen */
}
break;
default: /* Error */
umass_tr_error(xfer, error);
break;
}
}
static void
umass_t_cbi_command_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct umass_softc *sc = usbd_xfer_softc(xfer);
union ccb *ccb = sc->sc_transfer.ccb;
struct usb_device_request req;
struct usb_page_cache *pc;
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
if (sc->sc_transfer.dir == DIR_NONE) {
umass_cbi_start_status(sc);
} else {
umass_transfer_start
(sc, (sc->sc_transfer.dir == DIR_IN) ?
UMASS_T_CBI_DATA_READ : UMASS_T_CBI_DATA_WRITE);
}
break;
case USB_ST_SETUP:
if (ccb) {
/*
* do a CBI transfer with cmd_len bytes from
* cmd_data, possibly a data phase of data_len
* bytes from/to the device and finally a status
* read phase.
*/
req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
req.bRequest = UR_CBI_ADSC;
USETW(req.wValue, 0);
req.wIndex[0] = sc->sc_iface_no;
req.wIndex[1] = 0;
req.wLength[0] = sc->sc_transfer.cmd_len;
req.wLength[1] = 0;
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_in(pc, 0, &req, sizeof(req));
pc = usbd_xfer_get_frame(xfer, 1);
usbd_copy_in(pc, 0, sc->sc_transfer.cmd_data,
sc->sc_transfer.cmd_len);
usbd_xfer_set_frame_len(xfer, 0, sizeof(req));
usbd_xfer_set_frame_len(xfer, 1, sc->sc_transfer.cmd_len);
usbd_xfer_set_frames(xfer,
sc->sc_transfer.cmd_len ? 2 : 1);
DIF(UDMASS_CBI,
umass_cbi_dump_cmd(sc,
sc->sc_transfer.cmd_data,
sc->sc_transfer.cmd_len));
usbd_transfer_submit(xfer);
}
break;
default: /* Error */
/*
* STALL on the control pipe can be result of the command error.
* Attempt to clear this STALL same as for bulk pipe also
* results in command completion interrupt, but ASC/ASCQ there
* look like not always valid, so don't bother about it.
*/
if ((error == USB_ERR_STALLED) ||
(sc->sc_transfer.callback == &umass_cam_cb)) {
sc->sc_transfer.ccb = NULL;
(sc->sc_transfer.callback)
(sc, ccb, sc->sc_transfer.data_len,
STATUS_CMD_UNKNOWN);
} else {
umass_tr_error(xfer, error);
/* skip reset */
sc->sc_last_xfer_index = UMASS_T_CBI_COMMAND;
}
break;
}
}
static void
umass_t_cbi_data_read_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct umass_softc *sc = usbd_xfer_softc(xfer);
uint32_t max_bulk = usbd_xfer_max_len(xfer);
int actlen, sumlen;
usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
sc->sc_transfer.data_rem -= actlen;
sc->sc_transfer.data_ptr += actlen;
sc->sc_transfer.actlen += actlen;
if (actlen < sumlen) {
/* short transfer */
sc->sc_transfer.data_rem = 0;
}
case USB_ST_SETUP:
DPRINTF(sc, UDMASS_CBI, "max_bulk=%d, data_rem=%d\n",
max_bulk, sc->sc_transfer.data_rem);
if (sc->sc_transfer.data_rem == 0) {
umass_cbi_start_status(sc);
break;
}
if (max_bulk > sc->sc_transfer.data_rem) {
max_bulk = sc->sc_transfer.data_rem;
}
usbd_xfer_set_timeout(xfer, sc->sc_transfer.data_timeout);
usbd_xfer_set_frame_data(xfer, 0, sc->sc_transfer.data_ptr,
max_bulk);
usbd_transfer_submit(xfer);
break;
default: /* Error */
if ((error == USB_ERR_CANCELLED) ||
(sc->sc_transfer.callback != &umass_cam_cb)) {
umass_tr_error(xfer, error);
} else {
umass_transfer_start(sc, UMASS_T_CBI_DATA_RD_CS);
}
break;
}
}
static void
umass_t_cbi_data_rd_cs_callback(struct usb_xfer *xfer, usb_error_t error)
{
umass_t_cbi_data_clear_stall_callback(xfer, UMASS_T_CBI_STATUS,
UMASS_T_CBI_DATA_READ, error);
}
static void
umass_t_cbi_data_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct umass_softc *sc = usbd_xfer_softc(xfer);
uint32_t max_bulk = usbd_xfer_max_len(xfer);
int actlen, sumlen;
usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
sc->sc_transfer.data_rem -= actlen;
sc->sc_transfer.data_ptr += actlen;
sc->sc_transfer.actlen += actlen;
if (actlen < sumlen) {
/* short transfer */
sc->sc_transfer.data_rem = 0;
}
case USB_ST_SETUP:
DPRINTF(sc, UDMASS_CBI, "max_bulk=%d, data_rem=%d\n",
max_bulk, sc->sc_transfer.data_rem);
if (sc->sc_transfer.data_rem == 0) {
umass_cbi_start_status(sc);
break;
}
if (max_bulk > sc->sc_transfer.data_rem) {
max_bulk = sc->sc_transfer.data_rem;
}
usbd_xfer_set_timeout(xfer, sc->sc_transfer.data_timeout);
usbd_xfer_set_frame_data(xfer, 0, sc->sc_transfer.data_ptr,
max_bulk);
usbd_transfer_submit(xfer);
break;
default: /* Error */
if ((error == USB_ERR_CANCELLED) ||
(sc->sc_transfer.callback != &umass_cam_cb)) {
umass_tr_error(xfer, error);
} else {
umass_transfer_start(sc, UMASS_T_CBI_DATA_WR_CS);
}
break;
}
}
static void
umass_t_cbi_data_wr_cs_callback(struct usb_xfer *xfer, usb_error_t error)
{
umass_t_cbi_data_clear_stall_callback(xfer, UMASS_T_CBI_STATUS,
UMASS_T_CBI_DATA_WRITE, error);
}
static void
umass_t_cbi_status_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct umass_softc *sc = usbd_xfer_softc(xfer);
union ccb *ccb = sc->sc_transfer.ccb;
struct usb_page_cache *pc;
uint32_t residue;
uint8_t status;
int actlen;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
if (actlen < (int)sizeof(sc->sbl)) {
goto tr_setup;
}
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_out(pc, 0, &sc->sbl, sizeof(sc->sbl));
residue = (sc->sc_transfer.data_len -
sc->sc_transfer.actlen);
/* dissect the information in the buffer */
if (sc->sc_proto & UMASS_PROTO_UFI) {
/*
* Section 3.4.3.1.3 specifies that the UFI command
* protocol returns an ASC and ASCQ in the interrupt
* data block.
*/
DPRINTF(sc, UDMASS_CBI, "UFI CCI, ASC = 0x%02x, "
"ASCQ = 0x%02x\n", sc->sbl.ufi.asc,
sc->sbl.ufi.ascq);
status = (((sc->sbl.ufi.asc == 0) &&
(sc->sbl.ufi.ascq == 0)) ?
STATUS_CMD_OK : STATUS_CMD_FAILED);
sc->sc_transfer.ccb = NULL;
sc->sc_last_xfer_index = UMASS_T_CBI_COMMAND;
(sc->sc_transfer.callback)
(sc, ccb, residue, status);
break;
} else {
/* Command Interrupt Data Block */
DPRINTF(sc, UDMASS_CBI, "type=0x%02x, value=0x%02x\n",
sc->sbl.common.type, sc->sbl.common.value);
if (sc->sbl.common.type == IDB_TYPE_CCI) {
status = (sc->sbl.common.value & IDB_VALUE_STATUS_MASK);
status = ((status == IDB_VALUE_PASS) ? STATUS_CMD_OK :
(status == IDB_VALUE_FAIL) ? STATUS_CMD_FAILED :
(status == IDB_VALUE_PERSISTENT) ? STATUS_CMD_FAILED :
STATUS_WIRE_FAILED);
sc->sc_transfer.ccb = NULL;
sc->sc_last_xfer_index = UMASS_T_CBI_COMMAND;
(sc->sc_transfer.callback)
(sc, ccb, residue, status);
break;
}
}
/* fallthrough */
case USB_ST_SETUP:
tr_setup:
usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
break;
default: /* Error */
DPRINTF(sc, UDMASS_CBI, "Failed to read CSW: %s\n",
usbd_errstr(error));
umass_tr_error(xfer, error);
break;
}
}
/*
* CAM specific functions (used by SCSI, UFI, 8070i (ATAPI))
*/
static int
umass_cam_attach_sim(struct umass_softc *sc)
{
struct cam_devq *devq; /* Per device Queue */
/*
* A HBA is attached to the CAM layer.
*
* The CAM layer will then after a while start probing for devices on
* the bus. The number of SIMs is limited to one.
*/
devq = cam_simq_alloc(1 /* maximum openings */ );
if (devq == NULL) {
return (ENOMEM);
}
sc->sc_sim = cam_sim_alloc
(umass_cam_action, umass_cam_poll,
DEVNAME_SIM,
sc /* priv */ ,
sc->sc_unit /* unit number */ ,
&sc->sc_lock /* mutex */ ,
1 /* maximum device openings */ ,
0 /* maximum tagged device openings */ ,
devq);
cam_simq_release(devq);
if (sc->sc_sim == NULL) {
return (ENOMEM);
}
usb_callout_init_mtx(&sc->sc_rescan_timeout, &sc->sc_lock, 0);
lockmgr(&sc->sc_lock, LK_EXCLUSIVE);
if (xpt_bus_register(sc->sc_sim, sc->sc_unit) != CAM_SUCCESS) {
lockmgr(&sc->sc_lock, LK_RELEASE);
return (ENOMEM);
}
lockmgr(&sc->sc_lock, LK_RELEASE);
return (0);
}
/*
* (mp) We need this for DragonflyBSD to realise that there
* is a new device present
*/
static void
umass_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
{
#ifdef USB_DEBUG
if (ccb->ccb_h.status != CAM_REQ_CMP) {
kprintf("%s:%d Rescan failed, 0x%04x\n",
periph->periph_name, periph->unit_number,
ccb->ccb_h.status);
} else {
kprintf("%s%d: Rescan succeeded\n",
periph->periph_name, periph->unit_number);
}
#endif
xpt_free_path(ccb->ccb_h.path);
kfree(ccb, M_USBDEV);
}
/*
* Rescan the SCSI bus to detect newly added devices. We use
* an async rescan to avoid reentrancy issues.
*/
static void
umass_cam_rescan(void *addr)
{
struct umass_softc *sc = (struct umass_softc *) addr;
struct cam_path *path;
union ccb *ccb;
ccb = kmalloc(sizeof(union ccb), M_USBDEV, M_INTWAIT|M_ZERO);
DPRINTF(sc, UDMASS_SCSI, "scbus%d: scanning for %s:%d:%d:%d\n",
cam_sim_path(sc->sc_sim),
device_get_nameunit(sc->sc_dev), cam_sim_path(sc->sc_sim),
device_get_unit(sc->sc_dev), CAM_LUN_WILDCARD);
if (xpt_create_path(&path, xpt_periph, cam_sim_path(sc->sc_sim),
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP)
{
kfree(ccb, M_USBDEV);
return;
}
xpt_setup_ccb(&ccb->ccb_h, path, 5/*priority (low)*/);
ccb->ccb_h.func_code = XPT_SCAN_BUS;
ccb->ccb_h.cbfcnp = umass_cam_rescan_callback;
ccb->crcn.flags = CAM_FLAG_NONE;
xpt_action_async(ccb);
/* The scan is in progress now. */
}
static void
umass_cam_attach(struct umass_softc *sc)
{
#ifndef USB_DEBUG
if (bootverbose)
#endif
kprintf("%s:%d:%d:%d: Attached to scbus%d\n",
sc->sc_name, cam_sim_path(sc->sc_sim),
sc->sc_unit, CAM_LUN_WILDCARD,
cam_sim_path(sc->sc_sim));
if(!cold) {
usb_callout_reset(&sc->sc_rescan_timeout, USB_MS_TO_TICKS(200),
umass_cam_rescan, sc);
}
}
/* umass_cam_detach
* detach from the CAM layer
*/
static void
umass_cam_detach_sim(struct umass_softc *sc)
{
if (sc->sc_sim != NULL) {
usb_callout_stop(&sc->sc_rescan_timeout);
if (xpt_bus_deregister(cam_sim_path(sc->sc_sim))) {
/* accessing the softc is not possible after this */
sc->sc_sim->softc = NULL;
cam_sim_free(sc->sc_sim);
} else {
panic("%s: CAM layer is busy\n",
sc->sc_name);
}
sc->sc_sim = NULL;
}
}
/* umass_cam_action
* CAM requests for action come through here
*/
static void
umass_cam_action(struct cam_sim *sim, union ccb *ccb)
{
struct umass_softc *sc = (struct umass_softc *)sim->softc;
if (sc == NULL) {
ccb->ccb_h.status = CAM_SEL_TIMEOUT;
xpt_done(ccb);
return;
}
/* Perform the requested action */
switch (ccb->ccb_h.func_code) {
case XPT_SCSI_IO:
{
uint8_t *cmd;
uint8_t dir;
if (ccb->csio.ccb_h.flags & CAM_CDB_POINTER) {
cmd = (uint8_t *)(ccb->csio.cdb_io.cdb_ptr);
} else {
cmd = (uint8_t *)(ccb->csio.cdb_io.cdb_bytes);
}
DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:XPT_SCSI_IO: "
"cmd: 0x%02x, flags: 0x%02x, "
"%db cmd/%db data/%db sense\n",
cam_sim_path(sc->sc_sim), ccb->ccb_h.target_id,
(uintmax_t)ccb->ccb_h.target_lun, cmd[0],
ccb->ccb_h.flags & CAM_DIR_MASK, ccb->csio.cdb_len,
ccb->csio.dxfer_len, ccb->csio.sense_len);
if (sc->sc_transfer.ccb) {
DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:XPT_SCSI_IO: "
"I/O in progress, deferring\n",
cam_sim_path(sc->sc_sim), ccb->ccb_h.target_id,
(uintmax_t)ccb->ccb_h.target_lun);
ccb->ccb_h.status = CAM_SCSI_BUSY;
xpt_done(ccb);
goto done;
}
switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
case CAM_DIR_IN:
dir = DIR_IN;
break;
case CAM_DIR_OUT:
dir = DIR_OUT;
DIF(UDMASS_SCSI,
umass_dump_buffer(sc, ccb->csio.data_ptr,
ccb->csio.dxfer_len, 48));
break;
default:
dir = DIR_NONE;
}
ccb->ccb_h.status = CAM_REQ_INPROG | CAM_SIM_QUEUED;
/*
* sc->sc_transform will convert the command to the
* command format needed by the specific command set
* and return the converted command in
* "sc->sc_transfer.cmd_data"
*/
if (umass_std_transform(sc, ccb, cmd, ccb->csio.cdb_len)) {
if (sc->sc_transfer.cmd_data[0] == INQUIRY) {
const char *pserial;
pserial = usb_get_serial(sc->sc_udev);
/*
* Umass devices don't generally report their serial numbers
* in the usual SCSI way. Emulate it here.
*/
if ((sc->sc_transfer.cmd_data[1] & SI_EVPD) &&
(sc->sc_transfer.cmd_data[2] == SVPD_UNIT_SERIAL_NUMBER) &&
(pserial[0] != '\0')) {
struct scsi_vpd_unit_serial_number *vpd_serial;
vpd_serial = (struct scsi_vpd_unit_serial_number *)ccb->csio.data_ptr;
vpd_serial->length = strlen(pserial);
if (vpd_serial->length > sizeof(vpd_serial->serial_num))
vpd_serial->length = sizeof(vpd_serial->serial_num);
memcpy(vpd_serial->serial_num, pserial, vpd_serial->length);
ccb->csio.scsi_status = SCSI_STATUS_OK;
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
goto done;
}
/*
* Handle EVPD inquiry for broken devices first
* NO_INQUIRY also implies NO_INQUIRY_EVPD
*/
if ((sc->sc_quirks & (NO_INQUIRY_EVPD | NO_INQUIRY)) &&
(sc->sc_transfer.cmd_data[1] & SI_EVPD)) {
struct scsi_sense_data *sense;
sense = &ccb->csio.sense_data;
bzero(sense, sizeof(*sense));
sense->error_code = SSD_CURRENT_ERROR;
sense->flags = SSD_KEY_ILLEGAL_REQUEST;
sense->add_sense_code = 0x24;
sense->extra_len = 10;
ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
ccb->ccb_h.status =
CAM_SCSI_STATUS_ERROR |
CAM_AUTOSNS_VALID |
CAM_DEV_QFRZN;
xpt_freeze_devq(ccb->ccb_h.path, 1);
xpt_done(ccb);
goto done;
}
/*
* Return fake inquiry data for
* broken devices
*/
if (sc->sc_quirks & NO_INQUIRY) {
memcpy(ccb->csio.data_ptr, &fake_inq_data,
sizeof(fake_inq_data));
ccb->csio.scsi_status = SCSI_STATUS_OK;
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
goto done;
}
if (sc->sc_quirks & FORCE_SHORT_INQUIRY) {
ccb->csio.dxfer_len = SHORT_INQUIRY_LENGTH;
}
} else if (sc->sc_transfer.cmd_data[0] == PREVENT_ALLOW) {
if (sc->sc_quirks & NO_PREVENT_ALLOW) {
ccb->csio.scsi_status = SCSI_STATUS_OK;
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
goto done;
}
} else if (sc->sc_transfer.cmd_data[0] == SYNCHRONIZE_CACHE) {
if (sc->sc_quirks & NO_SYNCHRONIZE_CACHE) {
ccb->csio.scsi_status = SCSI_STATUS_OK;
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
goto done;
}
}
umass_command_start(sc, dir, ccb->csio.data_ptr,
ccb->csio.dxfer_len,
ccb->ccb_h.timeout,
&umass_cam_cb, ccb);
}
break;
}
case XPT_PATH_INQ:
{
struct ccb_pathinq *cpi = &ccb->cpi;
DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:XPT_PATH_INQ:.\n",
sc ? cam_sim_path(sc->sc_sim) : -1, ccb->ccb_h.target_id,
(uintmax_t)ccb->ccb_h.target_lun);
/* host specific information */
cpi->version_num = 1;
cpi->hba_inquiry = 0;
cpi->target_sprt = 0;
cpi->hba_misc = PIM_NO_6_BYTE;
cpi->hba_eng_cnt = 0;
cpi->max_target = UMASS_SCSIID_MAX; /* one target */
cpi->initiator_id = UMASS_SCSIID_HOST;
strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
strlcpy(cpi->hba_vid, "USB SCSI", HBA_IDLEN);
strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(sim);
cpi->bus_id = sc->sc_unit;
cpi->protocol = PROTO_SCSI;
cpi->protocol_version = SCSI_REV_2;
cpi->transport = XPORT_USB;
cpi->transport_version = 0;
if (sc == NULL) {
cpi->base_transfer_speed = 0;
cpi->max_lun = 0;
} else {
if (sc->sc_quirks & FLOPPY_SPEED) {
cpi->base_transfer_speed =
UMASS_FLOPPY_TRANSFER_SPEED;
} else {
switch (usbd_get_speed(sc->sc_udev)) {
case USB_SPEED_SUPER:
cpi->base_transfer_speed =
UMASS_SUPER_TRANSFER_SPEED;
#if 0 /* XXX */
cpi->maxio = MAXPHYS;
#endif
break;
case USB_SPEED_HIGH:
cpi->base_transfer_speed =
UMASS_HIGH_TRANSFER_SPEED;
break;
default:
cpi->base_transfer_speed =
UMASS_FULL_TRANSFER_SPEED;
break;
}
}
cpi->max_lun = sc->sc_maxlun;
}
cpi->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
case XPT_RESET_DEV:
{
DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:XPT_RESET_DEV:.\n",
cam_sim_path(sc->sc_sim), ccb->ccb_h.target_id,
(uintmax_t)ccb->ccb_h.target_lun);
umass_reset(sc);
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
case XPT_GET_TRAN_SETTINGS:
{
struct ccb_trans_settings *cts = &ccb->cts;
DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:XPT_GET_TRAN_SETTINGS:.\n",
cam_sim_path(sc->sc_sim), ccb->ccb_h.target_id,
(uintmax_t)ccb->ccb_h.target_lun);
cts->protocol = PROTO_SCSI;
cts->protocol_version = SCSI_REV_2;
cts->transport = XPORT_USB;
cts->transport_version = 0;
cts->xport_specific.valid = 0;
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
case XPT_SET_TRAN_SETTINGS:
{
DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:XPT_SET_TRAN_SETTINGS:.\n",
cam_sim_path(sc->sc_sim), ccb->ccb_h.target_id,
(uintmax_t)ccb->ccb_h.target_lun);
ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
xpt_done(ccb);
break;
}
case XPT_CALC_GEOMETRY:
{
cam_calc_geometry(&ccb->ccg, /* extended */ 1);
xpt_done(ccb);
break;
}
case XPT_NOOP:
{
DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:XPT_NOOP:.\n",
sc ? cam_sim_path(sc->sc_sim) : -1, ccb->ccb_h.target_id,
(uintmax_t)ccb->ccb_h.target_lun);
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
default:
DPRINTF(sc, UDMASS_SCSI, "%d:%d:%jx:func_code 0x%04x: "
"Not implemented\n",
sc ? cam_sim_path(sc->sc_sim) : -1, ccb->ccb_h.target_id,
(uintmax_t)ccb->ccb_h.target_lun, ccb->ccb_h.func_code);
ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
xpt_done(ccb);
break;
}
done:
return;
}
static void
umass_cam_poll(struct cam_sim *sim)
{
struct umass_softc *sc = (struct umass_softc *)sim->softc;
if (sc == NULL)
return;
DPRINTF(sc, UDMASS_SCSI, "CAM poll\n");
usbd_transfer_poll(sc->sc_xfer, UMASS_T_MAX);
}
/* umass_cam_cb
* finalise a completed CAM command
*/
static void
umass_cam_cb(struct umass_softc *sc, union ccb *ccb, uint32_t residue,
uint8_t status)
{
ccb->csio.resid = residue;
switch (status) {
case STATUS_CMD_OK:
ccb->ccb_h.status = CAM_REQ_CMP;
if ((sc->sc_quirks & READ_CAPACITY_OFFBY1) &&
(ccb->ccb_h.func_code == XPT_SCSI_IO) &&
(ccb->csio.cdb_io.cdb_bytes[0] == READ_CAPACITY)) {
struct scsi_read_capacity_data *rcap;
uint32_t maxsector;
rcap = (void *)(ccb->csio.data_ptr);
maxsector = scsi_4btoul(rcap->addr) - 1;
scsi_ulto4b(maxsector, rcap->addr);
}
/*
* We have to add SVPD_UNIT_SERIAL_NUMBER to the list
* of pages supported by the device - otherwise, CAM
* will never ask us for the serial number if the
* device cannot handle that by itself.
*/
if (ccb->ccb_h.func_code == XPT_SCSI_IO &&
sc->sc_transfer.cmd_data[0] == INQUIRY &&
(sc->sc_transfer.cmd_data[1] & SI_EVPD) &&
sc->sc_transfer.cmd_data[2] == SVPD_SUPPORTED_PAGE_LIST &&
(usb_get_serial(sc->sc_udev)[0] != '\0')) {
struct ccb_scsiio *csio;
struct scsi_vpd_supported_page_list *page_list;
csio = &ccb->csio;
page_list = (struct scsi_vpd_supported_page_list *)csio->data_ptr;
if (page_list->length + 1 < SVPD_SUPPORTED_PAGES_SIZE) {
page_list->list[page_list->length] = SVPD_UNIT_SERIAL_NUMBER;
page_list->length++;
}
}
xpt_done(ccb);
break;
case STATUS_CMD_UNKNOWN:
case STATUS_CMD_FAILED:
/* fetch sense data */
/* the rest of the command was filled in at attach */
sc->cam_scsi_sense.length = ccb->csio.sense_len;
DPRINTF(sc, UDMASS_SCSI, "Fetching %d bytes of "
"sense data\n", ccb->csio.sense_len);
if (umass_std_transform(sc, ccb, &sc->cam_scsi_sense.opcode,
sizeof(sc->cam_scsi_sense))) {
if ((sc->sc_quirks & FORCE_SHORT_INQUIRY) &&
(sc->sc_transfer.cmd_data[0] == INQUIRY)) {
ccb->csio.sense_len = SHORT_INQUIRY_LENGTH;
}
umass_command_start(sc, DIR_IN, &ccb->csio.sense_data.error_code,
ccb->csio.sense_len, ccb->ccb_h.timeout,
&umass_cam_sense_cb, ccb);
}
break;
default:
/*
* The wire protocol failed and will hopefully have
* recovered. We return an error to CAM and let CAM
* retry the command if necessary.
*/
xpt_freeze_devq(ccb->ccb_h.path, 1);
ccb->ccb_h.status = CAM_REQ_CMP_ERR | CAM_DEV_QFRZN;
xpt_done(ccb);
break;
}
}
/*
* Finalise a completed autosense operation
*/
static void
umass_cam_sense_cb(struct umass_softc *sc, union ccb *ccb, uint32_t residue,
uint8_t status)
{
uint8_t *cmd;
switch (status) {
case STATUS_CMD_OK:
case STATUS_CMD_UNKNOWN:
case STATUS_CMD_FAILED:
{
int error, key, asc, ascq;
uint8_t sense_len;
ccb->csio.sense_resid = residue;
sense_len = ccb->csio.sense_len - ccb->csio.sense_resid;
scsi_extract_sense(&ccb->csio.sense_data,
&error,
&key,
&asc, &ascq);
if (ccb->csio.ccb_h.flags & CAM_CDB_POINTER) {
cmd = (uint8_t *)(ccb->csio.cdb_io.cdb_ptr);
} else {
cmd = (uint8_t *)(ccb->csio.cdb_io.cdb_bytes);
}
/*
* Getting sense data always succeeds (apart from wire
* failures):
*/
if ((sc->sc_quirks & RS_NO_CLEAR_UA) &&
(cmd[0] == INQUIRY) &&
(key == SSD_KEY_UNIT_ATTENTION)) {
/*
* Ignore unit attention errors in the case where
* the Unit Attention state is not cleared on
* REQUEST SENSE. They will appear again at the next
* command.
*/
ccb->ccb_h.status = CAM_REQ_CMP;
} else if (key == SSD_KEY_NO_SENSE) {
/*
* No problem after all (in the case of CBI without
* CCI)
*/
ccb->ccb_h.status = CAM_REQ_CMP;
} else if ((sc->sc_quirks & RS_NO_CLEAR_UA) &&
(cmd[0] == READ_CAPACITY) &&
(key == SSD_KEY_UNIT_ATTENTION)) {
/*
* Some devices do not clear the unit attention error
* on request sense. We insert a test unit ready
* command to make sure we clear the unit attention
* condition, then allow the retry to proceed as
* usual.
*/
xpt_freeze_devq(ccb->ccb_h.path, 1);
ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR
| CAM_AUTOSNS_VALID | CAM_DEV_QFRZN;
ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
#if 0
DELAY(300000);
#endif
DPRINTF(sc, UDMASS_SCSI, "Doing a sneaky"
"TEST_UNIT_READY\n");
/* the rest of the command was filled in at attach */
if ((sc->sc_transform)(sc,
&sc->cam_scsi_test_unit_ready.opcode,
sizeof(sc->cam_scsi_test_unit_ready)) == 1) {
umass_command_start(sc, DIR_NONE, NULL, 0,
ccb->ccb_h.timeout,
&umass_cam_quirk_cb, ccb);
break;
}
} else {
xpt_freeze_devq(ccb->ccb_h.path, 1);
if (key >= 0) {
ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR
| CAM_AUTOSNS_VALID | CAM_DEV_QFRZN;
ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
} else
ccb->ccb_h.status = CAM_AUTOSENSE_FAIL
| CAM_DEV_QFRZN;
}
xpt_done(ccb);
break;
}
default:
DPRINTF(sc, UDMASS_SCSI, "Autosense failed, "
"status %d\n", status);
xpt_freeze_devq(ccb->ccb_h.path, 1);
ccb->ccb_h.status = CAM_AUTOSENSE_FAIL | CAM_DEV_QFRZN;
xpt_done(ccb);
}
}
/*
* This completion code just handles the fact that we sent a test-unit-ready
* after having previously failed a READ CAPACITY with CHECK_COND. The CCB
* status for CAM is already set earlier.
*/
static void
umass_cam_quirk_cb(struct umass_softc *sc, union ccb *ccb, uint32_t residue,
uint8_t status)
{
DPRINTF(sc, UDMASS_SCSI, "Test unit ready "
"returned status %d\n", status);
xpt_done(ccb);
}
/*
* SCSI specific functions
*/
static uint8_t
umass_scsi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
uint8_t cmd_len)
{
if ((cmd_len == 0) ||
(cmd_len > sizeof(sc->sc_transfer.cmd_data))) {
DPRINTF(sc, UDMASS_SCSI, "Invalid command "
"length: %d bytes\n", cmd_len);
return (0); /* failure */
}
sc->sc_transfer.cmd_len = cmd_len;
switch (cmd_ptr[0]) {
case TEST_UNIT_READY:
if (sc->sc_quirks & NO_TEST_UNIT_READY) {
DPRINTF(sc, UDMASS_SCSI, "Converted TEST_UNIT_READY "
"to START_UNIT\n");
memset(sc->sc_transfer.cmd_data, 0, cmd_len);
sc->sc_transfer.cmd_data[0] = START_STOP_UNIT;
sc->sc_transfer.cmd_data[4] = SSS_START;
return (1);
}
break;
case INQUIRY:
/*
* some drives wedge when asked for full inquiry
* information.
*/
if (sc->sc_quirks & FORCE_SHORT_INQUIRY) {
memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
sc->sc_transfer.cmd_data[4] = SHORT_INQUIRY_LENGTH;
return (1);
}
break;
}
memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
return (1);
}
static uint8_t
umass_rbc_transform(struct umass_softc *sc, uint8_t *cmd_ptr, uint8_t cmd_len)
{
if ((cmd_len == 0) ||
(cmd_len > sizeof(sc->sc_transfer.cmd_data))) {
DPRINTF(sc, UDMASS_SCSI, "Invalid command "
"length: %d bytes\n", cmd_len);
return (0); /* failure */
}
switch (cmd_ptr[0]) {
/* these commands are defined in RBC: */
case READ_10:
case READ_CAPACITY:
case START_STOP_UNIT:
case SYNCHRONIZE_CACHE:
case WRITE_10:
case 0x2f: /* VERIFY_10 is absent from
* scsi_all.h??? */
case INQUIRY:
case MODE_SELECT_10:
case MODE_SENSE_10:
case TEST_UNIT_READY:
case WRITE_BUFFER:
/*
* The following commands are not listed in my copy of the
* RBC specs. CAM however seems to want those, and at least
* the Sony DSC device appears to support those as well
*/
case REQUEST_SENSE:
case PREVENT_ALLOW:
memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
if ((sc->sc_quirks & RBC_PAD_TO_12) && (cmd_len < 12)) {
memset(sc->sc_transfer.cmd_data + cmd_len,
0, 12 - cmd_len);
cmd_len = 12;
}
sc->sc_transfer.cmd_len = cmd_len;
return (1); /* sucess */
/* All other commands are not legal in RBC */
default:
DPRINTF(sc, UDMASS_SCSI, "Unsupported RBC "
"command 0x%02x\n", cmd_ptr[0]);
return (0); /* failure */
}
}
static uint8_t
umass_ufi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
uint8_t cmd_len)
{
if ((cmd_len == 0) ||
(cmd_len > sizeof(sc->sc_transfer.cmd_data))) {
DPRINTF(sc, UDMASS_SCSI, "Invalid command "
"length: %d bytes\n", cmd_len);
return (0); /* failure */
}
/* An UFI command is always 12 bytes in length */
sc->sc_transfer.cmd_len = UFI_COMMAND_LENGTH;
/* Zero the command data */
memset(sc->sc_transfer.cmd_data, 0, UFI_COMMAND_LENGTH);
switch (cmd_ptr[0]) {
/*
* Commands of which the format has been verified. They
* should work. Copy the command into the (zeroed out)
* destination buffer.
*/
case TEST_UNIT_READY:
if (sc->sc_quirks & NO_TEST_UNIT_READY) {
/*
* Some devices do not support this command. Start
* Stop Unit should give the same results
*/
DPRINTF(sc, UDMASS_UFI, "Converted TEST_UNIT_READY "
"to START_UNIT\n");
sc->sc_transfer.cmd_data[0] = START_STOP_UNIT;
sc->sc_transfer.cmd_data[4] = SSS_START;
return (1);
}
break;
case REZERO_UNIT:
case REQUEST_SENSE:
case FORMAT_UNIT:
case INQUIRY:
case START_STOP_UNIT:
case SEND_DIAGNOSTIC:
case PREVENT_ALLOW:
case READ_CAPACITY:
case READ_10:
case WRITE_10:
case POSITION_TO_ELEMENT: /* SEEK_10 */
case WRITE_AND_VERIFY:
case VERIFY:
case MODE_SELECT_10:
case MODE_SENSE_10:
case READ_12:
case WRITE_12:
case READ_FORMAT_CAPACITIES:
break;
/*
* SYNCHRONIZE_CACHE isn't supported by UFI, nor should it be
* required for UFI devices, so it is appropriate to fake
* success.
*/
case SYNCHRONIZE_CACHE:
return (2);
default:
DPRINTF(sc, UDMASS_SCSI, "Unsupported UFI "
"command 0x%02x\n", cmd_ptr[0]);
return (0); /* failure */
}
memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
return (1); /* success */
}
/*
* 8070i (ATAPI) specific functions
*/
static uint8_t
umass_atapi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
uint8_t cmd_len)
{
if ((cmd_len == 0) ||
(cmd_len > sizeof(sc->sc_transfer.cmd_data))) {
DPRINTF(sc, UDMASS_SCSI, "Invalid command "
"length: %d bytes\n", cmd_len);
return (0); /* failure */
}
/* An ATAPI command is always 12 bytes in length. */
sc->sc_transfer.cmd_len = ATAPI_COMMAND_LENGTH;
/* Zero the command data */
memset(sc->sc_transfer.cmd_data, 0, ATAPI_COMMAND_LENGTH);
switch (cmd_ptr[0]) {
/*
* Commands of which the format has been verified. They
* should work. Copy the command into the destination
* buffer.
*/
case INQUIRY:
/*
* some drives wedge when asked for full inquiry
* information.
*/
if (sc->sc_quirks & FORCE_SHORT_INQUIRY) {
memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
sc->sc_transfer.cmd_data[4] = SHORT_INQUIRY_LENGTH;
return (1);
}
break;
case TEST_UNIT_READY:
if (sc->sc_quirks & NO_TEST_UNIT_READY) {
DPRINTF(sc, UDMASS_SCSI, "Converted TEST_UNIT_READY "
"to START_UNIT\n");
sc->sc_transfer.cmd_data[0] = START_STOP_UNIT;
sc->sc_transfer.cmd_data[4] = SSS_START;
return (1);
}
break;
case REZERO_UNIT:
case REQUEST_SENSE:
case START_STOP_UNIT:
case SEND_DIAGNOSTIC:
case PREVENT_ALLOW:
case READ_CAPACITY:
case READ_10:
case WRITE_10:
case POSITION_TO_ELEMENT: /* SEEK_10 */
case SYNCHRONIZE_CACHE:
case MODE_SELECT_10:
case MODE_SENSE_10:
case READ_BUFFER:
case 0x42: /* READ_SUBCHANNEL */
case 0x43: /* READ_TOC */
case 0x44: /* READ_HEADER */
case 0x47: /* PLAY_MSF (Play Minute/Second/Frame) */
case 0x48: /* PLAY_TRACK */
case 0x49: /* PLAY_TRACK_REL */
case 0x4b: /* PAUSE */
case 0x51: /* READ_DISK_INFO */
case 0x52: /* READ_TRACK_INFO */
case 0x54: /* SEND_OPC */
case 0x59: /* READ_MASTER_CUE */
case 0x5b: /* CLOSE_TR_SESSION */
case 0x5c: /* READ_BUFFER_CAP */
case 0x5d: /* SEND_CUE_SHEET */
case 0xa1: /* BLANK */
case 0xa5: /* PLAY_12 */
case 0xa6: /* EXCHANGE_MEDIUM */
case 0xad: /* READ_DVD_STRUCTURE */
case 0xbb: /* SET_CD_SPEED */
case 0xe5: /* READ_TRACK_INFO_PHILIPS */
break;
case READ_12:
case WRITE_12:
default:
DPRINTF(sc, UDMASS_SCSI, "Unsupported ATAPI "
"command 0x%02x - trying anyway\n",
cmd_ptr[0]);
break;
}
memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
return (1); /* success */
}
static uint8_t
umass_no_transform(struct umass_softc *sc, uint8_t *cmd,
uint8_t cmdlen)
{
return (0); /* failure */
}
static uint8_t
umass_std_transform(struct umass_softc *sc, union ccb *ccb,
uint8_t *cmd, uint8_t cmdlen)
{
uint8_t retval;
retval = (sc->sc_transform) (sc, cmd, cmdlen);
if (retval == 2) {
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
return (0);
} else if (retval == 0) {
xpt_freeze_devq(ccb->ccb_h.path, 1);
ccb->ccb_h.status = CAM_REQ_INVALID | CAM_DEV_QFRZN;
xpt_done(ccb);
return (0);
}
/* Command should be executed */
return (1);
}
#ifdef USB_DEBUG
static void
umass_bbb_dump_cbw(struct umass_softc *sc, umass_bbb_cbw_t *cbw)
{
uint8_t *c = cbw->CBWCDB;
uint32_t dlen = UGETDW(cbw->dCBWDataTransferLength);
uint32_t tag = UGETDW(cbw->dCBWTag);
uint8_t clen = cbw->bCDBLength;
uint8_t flags = cbw->bCBWFlags;
uint8_t lun = cbw->bCBWLUN;
DPRINTF(sc, UDMASS_BBB, "CBW %d: cmd = %db "
"(0x%02x%02x%02x%02x%02x%02x%s), "
"data = %db, lun = %d, dir = %s\n",
tag, clen,
c[0], c[1], c[2], c[3], c[4], c[5], (clen > 6 ? "..." : ""),
dlen, lun, (flags == CBWFLAGS_IN ? "in" :
(flags == CBWFLAGS_OUT ? "out" : "<invalid>")));
}
static void
umass_bbb_dump_csw(struct umass_softc *sc, umass_bbb_csw_t *csw)
{
uint32_t sig = UGETDW(csw->dCSWSignature);
uint32_t tag = UGETDW(csw->dCSWTag);
uint32_t res = UGETDW(csw->dCSWDataResidue);
uint8_t status = csw->bCSWStatus;
DPRINTF(sc, UDMASS_BBB, "CSW %d: sig = 0x%08x (%s), tag = 0x%08x, "
"res = %d, status = 0x%02x (%s)\n",
tag, sig, (sig == CSWSIGNATURE ? "valid" : "invalid"),
tag, res,
status, (status == CSWSTATUS_GOOD ? "good" :
(status == CSWSTATUS_FAILED ? "failed" :
(status == CSWSTATUS_PHASE ? "phase" : "<invalid>"))));
}
static void
umass_cbi_dump_cmd(struct umass_softc *sc, void *cmd, uint8_t cmdlen)
{
uint8_t *c = cmd;
uint8_t dir = sc->sc_transfer.dir;
DPRINTF(sc, UDMASS_BBB, "cmd = %db "
"(0x%02x%02x%02x%02x%02x%02x%s), "
"data = %db, dir = %s\n",
cmdlen,
c[0], c[1], c[2], c[3], c[4], c[5], (cmdlen > 6 ? "..." : ""),
sc->sc_transfer.data_len,
(dir == DIR_IN ? "in" :
(dir == DIR_OUT ? "out" :
(dir == DIR_NONE ? "no data phase" : "<invalid>"))));
}
static void
umass_dump_buffer(struct umass_softc *sc, uint8_t *buffer, uint32_t buflen,
uint32_t printlen)
{
uint32_t i, j;
char s1[40];
char s2[40];
char s3[5];
s1[0] = '\0';
s3[0] = '\0';
ksprintf(s2, " buffer=%p, buflen=%d", buffer, buflen);
for (i = 0; (i < buflen) && (i < printlen); i++) {
j = i % 16;
if (j == 0 && i != 0) {
DPRINTF(sc, UDMASS_GEN, "0x %s%s\n",
s1, s2);
s2[0] = '\0';
}
ksprintf(&s1[j * 2], "%02x", buffer[i] & 0xff);
}
if (buflen > printlen)
ksprintf(s3, " ...");
DPRINTF(sc, UDMASS_GEN, "0x %s%s%s\n",
s1, s2, s3);
}
#endif
|
+++
title = "Радио–Т 115"
date = "2008-12-07T07:47:00"
categories = ["podcast"]
filename = "rt_podcast115"
+++

- Главный мышиный юбилей
- Python 3.0 тут. Ура, товарищи!
- Австралийский телефон
- Выход JavaFX
- Как–бы поиск по книгам Яндекса
- Последствия улучшений Google
- Выход новой версии OpenSolaris
- Поет–ли "поющая птичка"?
- Чудовищные трафики Google
- Окло–эпловые слухи и новинки
- Темы наших слушателей
[аудио](http://cdn.radio-t.com/rt_podcast115.mp3)
<audio src="http://cdn.radio-t.com/rt_podcast115.mp3" preload="none"></audio>
|
# Jaraw
This is the automatically-generated documentation for jaraw.
For a brief overview, look at the [README](https://github.com/Thimoteus/jaraw/blob/master/README.md).
|
public class StringConcat {
public static void main(String[] args) {
System.out.println(1 + 2 + "Hello");
// the output is 3Hello
System.out.println("Hello" + 1 + 2);
// the output is Hello12
}
}
|
#!/bin/bash
[[ -z "$HOME" || ! -d "$HOME" ]] && { echo 'fixing $HOME'; HOME=/root; }
export HOME
yum install -y epel-release
yum install -y nodejs
yum install -y npm
npm install -g azure-cli
azure config mode arm
export AZURE_STORAGE_ACCOUNT="$1"
export AZURE_STORAGE_ACCESS_KEY="$2"
azure storage container create img
azure storage blob copy start --source-uri="$3" --dest-container img --dest-blob os-disk-img.vhd
logger -t imghelper "copy started: $?"
rr=1
while [ $rr -ne 0 ]; do
sleep 10
azure storage blob copy show --json img os-disk-img.vhd | grep '"copyStatus": "success"' >/dev/null
# "copyStatus": "success", "copyStatus": "pending"
rr=$?
done
logger -t imghelper "success"
exit 0
|
use petgraph::stable_graph::NodeIndex;
use crate::graph::CachedStableGraph;
use anyhow::Result;
struct VisitCount {
node: NodeIndex,
touch: usize,
children: usize,
}
/// Performs a depth-first search with duplicates
pub struct Dfs<'a> {
stack: Vec<NodeIndex>,
graph: &'a CachedStableGraph,
cycle: Vec<VisitCount>
}
impl <'a> Dfs<'a> {
pub fn new(graph: &'a CachedStableGraph, start: NodeIndex) -> Self {
Dfs {
stack: vec![start],
graph,
cycle: Vec::new()
}
}
fn reset_path_to_branch(&mut self) {
while let Some(par) = self.cycle.last_mut() {
par.touch += 1;
if par.touch > par.children {
self.cycle.pop();
} else {
break;
}
}
}
fn check_for_cycle(&self, children: &[NodeIndex]) -> Result<(), error::CycleError> {
for prev in &self.cycle {
for child in children {
if prev.node == *child {
let cycle_nodes: Vec<NodeIndex> = self.cycle.iter().map(|n| n.node).collect();
return Err(
error::CycleError::new(&cycle_nodes, *child, self.graph)
);
}
}
}
Ok(())
}
}
impl <'a> Iterator for Dfs<'a> {
type Item = Result<(NodeIndex, Option<NodeIndex>), error::CycleError>;
fn next(&mut self) -> Option<Result<(NodeIndex, Option<NodeIndex>), error::CycleError>> {
let parent = match self.cycle.last() {
Some(p) => Some(p.node),
None => None,
};
if let Some(node) = self.stack.pop() {
self.cycle.push(VisitCount{
node,
children: self.graph.graph.edges(node).count(),
touch: 1,
});
let mut children = self.graph.child_node_indexes(node);
if !children.is_empty() {
// sort by line number in parent
children.sort_by(|x, y| {
let graph = &self.graph.graph;
let edge1 = graph.edge_weight(graph.find_edge(node, *x).unwrap()).unwrap();
let edge2 = graph.edge_weight(graph.find_edge(node, *y).unwrap()).unwrap();
edge2.line.cmp(&edge1.line)
});
match self.check_for_cycle(&children) {
Ok(_) => {}
Err(e) => return Some(Err(e)),
};
for child in children {
self.stack.push(child);
}
} else {
self.reset_path_to_branch();
}
return Some(Ok((node, parent)));
}
None
}
}
pub mod error {
use petgraph::stable_graph::NodeIndex;
use std::{fmt::{Debug, Display}, path::PathBuf, error::Error as StdError};
use crate::{graph::CachedStableGraph, consts};
use rust_lsp::lsp_types::{Diagnostic, DiagnosticSeverity, Position, Range};
#[derive(Debug)]
pub struct CycleError(Vec<PathBuf>);
impl StdError for CycleError {}
impl CycleError {
pub fn new(nodes: &[NodeIndex], current_node: NodeIndex, graph: &CachedStableGraph) -> Self {
let mut resolved_nodes: Vec<PathBuf> = nodes.iter().map(|i| graph.get_node(*i)).collect();
resolved_nodes.push(graph.get_node(current_node));
CycleError(resolved_nodes)
}
}
impl Display for CycleError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut disp = String::new();
disp.push_str(format!("Include cycle detected:\n{:?} imports ", self.0[0]).as_str());
for p in &self.0[1..self.0.len()-1] {
disp.push_str(format!("\n{:?}, which imports ", *p).as_str());
}
disp.push_str(format!("\n{:?}", self.0[self.0.len()-1]).as_str());
f.write_str(disp.as_str())
}
}
impl From<CycleError> for Diagnostic {
fn from(e: CycleError) -> Diagnostic {
Diagnostic{
severity: Some(DiagnosticSeverity::Error),
range: Range::new(Position::new(0, 0), Position::new(0, 500)),
source: Some(consts::SOURCE.into()),
message: e.into(),
code: None,
tags: None,
related_information: None,
code_description: Option::None,
data: Option::None,
}
}
}
impl From<CycleError> for String {
fn from(e: CycleError) -> String {
format!("{}", e)
}
}
}
|
<?php
namespace Gaw508\Queue;
use Aws\Sqs\SqsClient;
use Gaw508\Queue\Exception\QueueException;
/**
* Sqs queue class
*
* Queue interface implementation for Amazon SQS
*
* @author George Webb <george@webb.uno>
* @package Gaw508\Queue
*/
class Sqs implements QueueInterface
{
/**
* The sqs url
*
* @var string
*/
private $sqs_url;
/**
* The sqs client
*
* @var SqsClient
*/
private $sqs_client;
/**
* The long poll time in seconds - reduces number of requests to SQS API, defaults to 10 seconds
*
* @var SqsClient
*/
private $long_poll_time;
/**
* Sqs constructor.
*
* @param string $config Config data for the queue (specific to queue used)
*/
public function __construct($config)
{
$this->sqs_client = isset($config['sqs_client']) ? $config['sqs_client'] : null;
$this->sqs_url = isset($config['queue_url']) ? $config['queue_url'] : '';
$this->long_poll_time = isset($config['long_poll_time']) ? $config['long_poll_time'] : 10;
}
/**
* Puts a message object into the queue
*
* @param Message $message The message object to insert
* @throws QueueException When the operation fails
* @return void
*/
public function put(Message $message)
{
try {
// Send the message
$this->sqs_client->sendMessage(array(
'QueueUrl' => $this->sqs_url,
'MessageBody' => $message->getDataAsJson()
));
} catch (\Exception $e) {
throw new QueueException('Error putting message to SQS queue: ' . $e->getMessage());
}
}
/**
* Gets a message off the queue and returns a Message object
*
* @throws QueueException When the operation fails
* @return Message|bool The message object received from the queue or false if no message available
*/
public function get()
{
try {
// Receive a message from the queue
$result = $this->sqs_client->receiveMessage(array(
'QueueUrl' => $this->sqs_url,
'WaitTimeSeconds' => $this->long_poll_time
));
if ($result['Messages'] == null) {
// No message to process
return false;
}
// Get the message and return it
$result_message = array_pop($result['Messages']);
return Message::createFromQueue(
$result_message['ReceiptHandle'],
$result_message['Body']
);
} catch (\Exception $e) {
throw new QueueException('Error getting message from SQS queue: ' . $e->getMessage());
}
}
/**
* Deletes a message off the queue
*
* @param Message $message The message object to delete
* @throws QueueException When the operation fails
* @return void
*/
public function delete(Message $message)
{
try {
// Delete the message
$this->sqs_client->deleteMessage(array(
'QueueUrl' => $this->sqs_url,
'ReceiptHandle' => $message->getHandle()
));
} catch (\Exception $e) {
throw new QueueException('Error deleting message from SQS queue: ' . $e->getMessage());
}
}
/**
* Releases a message back to the queue, so it can picked up again
*
* @param Message $message The message object to release
* @throws QueueException When the operation fails
* @return void
*/
public function release(Message $message)
{
try {
// Set the visibility timeout to 0 to make the message visible in the queue again straight away
$this->sqs_client->changeMessageVisibility(array(
'QueueUrl' => $this->sqs_url,
'ReceiptHandle' => $message->getHandle(),
'VisibilityTimeout' => 0
));
} catch (\Exception $e) {
throw new QueueException('Error releasing message back to SQS queue: ' . $e->getMessage());
}
}
}
|
use crate::tags::CompressionMethod;
use std::io::{self, Write};
mod deflate;
mod lzw;
mod packbits;
mod uncompressed;
pub use self::deflate::{Deflate, DeflateLevel};
pub use self::lzw::Lzw;
pub use self::packbits::Packbits;
pub use self::uncompressed::Uncompressed;
/// An algorithm used for compression
pub trait CompressionAlgorithm {
/// The algorithm writes data directly into the writer.
/// It returns the total number of bytes written.
fn write_to<W: Write>(&mut self, writer: &mut W, bytes: &[u8]) -> Result<u64, io::Error>;
}
/// An algorithm used for compression with associated enums and optional configurations.
pub trait Compression: CompressionAlgorithm {
/// The corresponding tag to the algorithm.
const COMPRESSION_METHOD: CompressionMethod;
/// Method to optain a type that can store each variant of comression algorithm.
fn get_algorithm(&self) -> Compressor;
}
/// An enum to store each compression algorithm.
pub enum Compressor {
Uncompressed(Uncompressed),
Lzw(Lzw),
Deflate(Deflate),
Packbits(Packbits),
}
impl Default for Compressor {
/// The default compression strategy does not apply any compression.
fn default() -> Self {
Compressor::Uncompressed(Uncompressed::default())
}
}
impl CompressionAlgorithm for Compressor {
fn write_to<W: Write>(&mut self, writer: &mut W, bytes: &[u8]) -> Result<u64, io::Error> {
match self {
Compressor::Uncompressed(algorithm) => algorithm.write_to(writer, bytes),
Compressor::Lzw(algorithm) => algorithm.write_to(writer, bytes),
Compressor::Deflate(algorithm) => algorithm.write_to(writer, bytes),
Compressor::Packbits(algorithm) => algorithm.write_to(writer, bytes),
}
}
}
#[cfg(test)]
mod tests {
pub const TEST_DATA: &'static [u8] =
b"This is a string for checking various compression algorithms.";
}
|
select id, nombre, valor, tipo_vehiculo, tipo_dia
from public.tarifa
where tipo_vehiculo = :tipoVehiculo
and tipo_dia = :tipoDia
|
package io.github.cottonmc.jsonfactory.gui
import java.io.FileNotFoundException
import javax.sound.sampled.AudioSystem
import javax.sound.sampled.Clip
import javax.sound.sampled.LineEvent
internal object Sounds {
// Sound by Headphaze
// https://freesound.org/people/Headphaze/sounds/277032/
val finished = load("finished")
// Sound by original_sound
// https://freesound.org/people/original_sound/sounds/366102/
val confirm = load("confirm")
private fun load(path: String): Clip {
val url = Sounds::class.java.getResource("/json-factory/sounds/$path.wav")
?: throw FileNotFoundException("Sound clip $path not found")
val stream = AudioSystem.getAudioInputStream(url)
val clip = AudioSystem.getClip()
clip.open(stream)
clip.addLineListener {
if (it.type == LineEvent.Type.STOP)
clip.framePosition = 0
}
return clip
}
}
|
package top.srsea.lever.common
import android.util.Log
/**
* Currying Log#println(int, String, String).
*/
fun logger(tag: String = Logger.GLOBAL_TAG): (priority: Int) -> (msg: String) -> Unit = { priority ->
{ msg ->
Log.println(priority, tag, msg)
}
}
/**
* Create a new logger which the class name as tag.
*/
fun Any.logger(priority: Int) = logger(tag = javaClass.simpleName)(priority)
fun Any.debugLogger() = logger(Log.DEBUG)
fun Any.errorLogger() = logger(Log.ERROR)
fun Any.warnLogger() = logger(Log.WARN)
fun Any.infoLogger() = logger(Log.INFO)
fun Any.verboseLogger() = logger(Log.VERBOSE)
fun Any.assertLogger() = logger(Log.ASSERT)
/**
* Each access will create a new logger, just like Any#logger(Int).
* It is recommended to use this extended attribute only when debugging.
*/
inline val Any.debug: (msg: String) -> Unit get() = debugLogger()
inline val Any.error: (msg: String) -> Unit get() = errorLogger()
inline val Any.warn: (msg: String) -> Unit get() = warnLogger()
inline val Any.info: (msg: String) -> Unit get() = infoLogger()
inline val Any.verbose: (msg: String) -> Unit get() = verboseLogger()
inline val Any.assert: (msg: String) -> Unit get() = assertLogger()
|
//! Benchmark packet serialization and deserialization.
use bytes::BytesMut;
use chrono::{DateTime, Utc};
use criterion::{criterion_group, criterion_main, Criterion};
use lazy_static::lazy_static;
use ilp::{ErrorCode, Fulfill, Prepare, Reject};
use ilp::{FulfillBuilder, PrepareBuilder, RejectBuilder};
use interledger_packet as ilp;
lazy_static! {
static ref PREPARE: PrepareBuilder<'static> = PrepareBuilder {
amount: 107,
expires_at: DateTime::parse_from_rfc3339("2017-12-23T01:21:40.549Z")
.unwrap()
.with_timezone(&Utc)
.into(),
execution_condition: b"\
\x74\xe1\x13\x6d\xc7\x1c\x9e\x5f\x28\x3b\xec\x83\x46\x1c\xbf\x12\
\x61\xc4\x01\x4f\x72\xd4\x8f\x8d\xd6\x54\x53\xa0\xb8\x4e\x7d\xe1\
",
destination: b"example.alice",
data: b"\
\x5d\xb3\x43\xfd\xc4\x18\x98\xf6\xdf\x42\x02\x32\x91\x39\xdc\x24\
\x2d\xd0\xf5\x58\xa8\x11\xb4\x6b\x28\x91\x8f\xda\xb3\x7c\x6c\xb0\
",
};
static ref FULFILL: FulfillBuilder<'static> = FulfillBuilder {
fulfillment: b"\
\x11\x7b\x43\x4f\x1a\x54\xe9\x04\x4f\x4f\x54\x92\x3b\x2c\xff\x9e\
\x4a\x6d\x42\x0a\xe2\x81\xd5\x02\x5d\x7b\xb0\x40\xc4\xb4\xc0\x4a\
",
data: b"\
\x5d\xb3\x43\xfd\xc4\x18\x98\xf6\xdf\x42\x02\x32\x91\x39\xdc\x24\
\x2d\xd0\xf5\x58\xa8\x11\xb4\x6b\x28\x91\x8f\xda\xb3\x7c\x6c\xb0\
",
};
static ref REJECT: RejectBuilder<'static> = RejectBuilder {
code: ErrorCode::F99_APPLICATION_ERROR,
message: b"Some error",
triggered_by: b"example.connector",
data: b"\
\x5d\xb3\x43\xfd\xc4\x18\x98\xf6\xdf\x42\x02\x32\x91\x39\xdc\x24\
\x2d\xd0\xf5\x58\xa8\x11\xb4\x6b\x28\x91\x8f\xda\xb3\x7c\x6c\xb0\
",
};
}
fn benchmark_serialize(c: &mut Criterion) {
let prepare_bytes = BytesMut::from(PREPARE.build());
c.bench_function("Prepare (serialize)", move |b| {
b.iter(|| {
assert_eq!(BytesMut::from(PREPARE.build()), prepare_bytes);
});
});
let fulfill_bytes = BytesMut::from(FULFILL.build());
c.bench_function("Fulfill (serialize)", move |b| {
b.iter(|| {
assert_eq!(BytesMut::from(FULFILL.build()), fulfill_bytes);
});
});
let reject_bytes = BytesMut::from(REJECT.build());
c.bench_function("Reject (serialize)", move |b| {
b.iter(|| {
assert_eq!(BytesMut::from(REJECT.build()), reject_bytes);
});
});
}
fn benchmark_deserialize(c: &mut Criterion) {
let prepare_bytes = BytesMut::from(PREPARE.build());
c.bench_function("Prepare (deserialize)", move |b| {
b.iter(|| {
let parsed = Prepare::try_from(prepare_bytes.clone()).unwrap();
assert_eq!(parsed.amount(), PREPARE.amount);
assert_eq!(parsed.destination(), PREPARE.destination);
});
});
let fulfill_bytes = BytesMut::from(FULFILL.build());
c.bench_function("Fulfill (deserialize)", move |b| {
b.iter(|| {
let parsed = Fulfill::try_from(fulfill_bytes.clone()).unwrap();
assert_eq!(parsed.fulfillment(), FULFILL.fulfillment);
});
});
let reject_bytes = BytesMut::from(REJECT.build());
c.bench_function("Reject (deserialize)", move |b| {
b.iter(|| {
let parsed = Reject::try_from(reject_bytes.clone()).unwrap();
assert_eq!(parsed.code(), REJECT.code);
});
});
}
criterion_group! {
name = benches;
config = Criterion::default()
.sample_size(1000);
targets =
benchmark_serialize,
benchmark_deserialize,
}
criterion_main!(benches);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.