text
stringlengths
27
775k
#!/usr/bin/env bash script_path='./scripts/shrink-pr-pdfs/' FILES=$(curl -s -X GET -G "$1" | jq -r '.[] | .filename' | grep "\.pdf$") if [ -n "$FILES" ]; then IFS=$'\n' for file in $FILES; do if grep -q "$file" "${script_path}/shrank-pdfs";then echo "already shrank '$file'" else echo "shrinking '$file'..." tmpfile=$(mktemp) "${script_path}/shrinkpdf.sh" "$file" "${tmpfile}" mv "${tmpfile}" "${file}" echo "${file}" >> "${script_path}/shrank-pdfs" echo "shrank '$file'" fi done else echo "no pdf files changed!" fi
/* example code for cc65, for NES * testing the zapper gun on controller slot 2 * using neslib * Doug Fraker 2018 */ #include "LIB/neslib.h" #include "LIB/nesdoug.h" #include "LIB/zaplib.h" #include "Zapper.h" #include "NES_ST/Zap_Test.h" #include "Sprites.h" const unsigned char pal1[]={ 0x0f, 0x00, 0x10, 0x30, 0,0,0,0, 0,0,0,0, 0,0,0,0 }; const unsigned char pal2[]={ 0x0f, 0x12, 0x22, 0x30, 0x0f, 0x15, 0x25, 0x30, 0x0f, 0x17, 0x27, 0x30, 0x0f, 0x19, 0x29, 0x30, }; void main (void) { ppu_off(); // screen off pal_bg(pal1); // load the palette pal_spr(pal2); // load the palette bank_spr(1); // sprites use the 2nd tileset vram_adr(NAMETABLE_A); // this sets a start position on the BG, top left of screen // vram_adr() and vram_unrle() need to be done with the screen OFF vram_unrle(Zap_Test); // this unpacks an rle compressed full nametable // created by NES Screen Tool ppu_wait_nmi(); // wait // music_play(0); // silence set_vram_buffer(); // points ppu update to vram_buffer, do this at least once ppu_on_all(); // turn on screen while (1){ // infinite loop ppu_wait_nmi(); // wait till beginning of the frame oam_clear(); zapper_ready = pad2_zapper^1; // XOR last frame, make sure not held down still // is trigger pulled? pad2_zapper = zap_shoot(1); // controller slot 2 if(star_active){ move_star(); draw_star(); if((pad2_zapper)&&(zapper_ready)){ // trigger pulled, play bang sound sfx_play(0,0); // bg off, project white boxes oam_clear(); draw_box(); // redraw the star as a box ppu_mask(0x16); // BG off, won't happen till NEXT frame ppu_wait_nmi(); // wait till the top of the next frame // this frame will display no BG and a white box oam_clear(); // clear the NEXT frame draw_star(); // draw a star on the NEXT frame ppu_mask(0x1e); // bg on, won't happen till NEXT frame hit_detected = zap_read(1); // look for light in zapper, port 2 if(hit_detected){ ++score1; adjust_score(); // delete object, set wait star_active = 0; star_wait = 20; // to time the next spawn } // if hit failed, it should have already ran into the next nmi } } else if(star_wait){ --star_wait; } else{ new_star(); } } } void move_star(void){ // gravity, star_y_speed = 16 bit, upper 8 bits = pixel, lower = subpixel star_y_speed += 0x0010; if((star_y_speed < 0x8000) && (star_y_speed > 0x0400)) star_y_speed = 0x0400; star_x += star_x_speed; if(star_x >= 0xf000) star_active = 0; star_y += star_y_speed; if(star_y >= 0xe000) star_active = 0; } void adjust_score(void){ if(score1 >= 10){ score1 = 0; ++score10; if(score10 >= 10){ score10 = 0; ++score100; if(score100 >= 10){ score100 = 0; ++score1000; } } } if(score1000 >= 10){ // maximum 9999 score1000 = 9; score100 = 9; score10 = 9; score1 = 9; } // copy score to BG temp1 = score1000 + 0x30; one_vram_buffer(temp1, NTADR_A(9,4)); temp1 = score100 + 0x30; one_vram_buffer(temp1, NTADR_A(10,4)); temp1 = score10 + 0x30; one_vram_buffer(temp1, NTADR_A(11,4)); temp1 = score1 + 0x30; one_vram_buffer(temp1, NTADR_A(12,4)); } void new_star(void){ star_active = 1; star_color = (star_color + 1) & 1; // 0 or 1 temp1 = rand8(); star_x = (temp1 << 7) + 0x4000; // should give 0x4000-0xbf80 star_y = 0xd000; // int temp1 = rand8(); star_x_speed = ((temp1 & 0x1f) - 0x0f) << 4; star_y_speed = 0xfc00; } void draw_box(void){ temp1 = high_byte(star_x); temp2 = high_byte(star_y); oam_meta_spr(temp1, temp2, WhiteBox); } void draw_star(void){ temp1 = high_byte(star_x); temp2 = high_byte(star_y); if(star_color == 0){ oam_meta_spr(temp1, temp2, StarDark); } else{ oam_meta_spr(temp1, temp2, StarLight); } }
#!/bin/sh # # Script to install ansible without internet connection in the vagrant local environment. # Notice: No virtualenv here because virtualenvs are not working with mapped vagrant folders... # # import helper function (these will provide section_echo and the install* functions) # source /opt/ansible/scripts/helper_functions.sh # need to use full path here or vagrant won't find it... section_echo "Ansible Controller setup started" echo . PLAYBOOKS="/opt/ansible" echo "playbooks directory='$PLAYBOOKS'" # STORAGE="$PLAYBOOKS/storage" # echo "storage directory='$STORAGE'" # PYTHON_INDEX_URL="file://$STORAGE/python/repo/simple" # echo "python package index='$PYTHON_INDEX_URL'" sudo apt-get update sudo apt-get install software-properties-common sudo apt update sudo apt install python3-pip -y sudo pip3 install "ansible" sudo ansible --version sudo apt install nano -y sudo pip3 install "piprepo" sudo pip3 install "paramiko" sudo pip3 install "ansible" sudo pip3 install "yamllint" sudo pip3 install "requests" sudo pip3 install "jinja2" sudo pip3 install "pyYAML" sudo pip3 install "docker" section_echo "creating ansible config for current environment..." cp $PLAYBOOKS/ansible.vagrant_dev.cfg $PLAYBOOKS/ansible.cfg section_echo "changing some default settings..." echo "export EDITOR=nano" | tee -a /home/vagrant/.bashrc echo "alias a=ansible-playbook" | tee -a /home/vagrant/.bashrc echo "cd $PLAYBOOKS" | tee -a /home/vagrant/.bashrc section_echo "Ansible Controller setup finished."
use crate::architecture::arm::ArmChipInfo; /// Information about a chip which is used /// for automatic detection of the connected chip. /// /// For ARM-based chips, the function [ArmProbeInterface::read_from_rom_table] is /// used to read the information from the target. /// /// [ArmProbeInterface::read_from_rom_table]: crate::architecture::arm::communication_interface::ArmProbeInterface::read_from_rom_table #[derive(Debug)] pub(crate) enum ChipInfo { /// ARM specific information for chip /// auto-detection. See [ArmChipInfo]. Arm(ArmChipInfo), } impl From<ArmChipInfo> for ChipInfo { fn from(info: ArmChipInfo) -> Self { ChipInfo::Arm(info) } }
-- vim: set ts=2 sw=2 sts=0 ff=unix foldmethod=indent: {-# LANGUAGE OverloadedStrings #-} module MixKenallGeocode.Csv ( Csv, CsvRow, csv, withCsv, parseCsv ) where import System.IO import Control.Applicative import qualified Data.Text as T import qualified Text.Parsec as P import qualified Text.Parsec.Text as P import MixKenallGeocode.Util type CsvParser = P.Parsec T.Text () type Csv = [CsvRow] type CsvRow = [String] csv :: CsvParser Csv csv = P.many csvRow csvRow :: CsvParser CsvRow csvRow = csvField `P.sepBy1` sep <* eol csvField :: CsvParser String csvField = escapedField <#> (P.many normalText) escapedField :: CsvParser String escapedField = esc *> _field <* esc where _field = P.many $ (esc >> esc) <#> sep <#> cr <#> lf <#> normalText normalText :: CsvParser Char normalText = P.noneOf ['"', ',', '\r', '\n'] -- CSVの区切り文字 sep :: CsvParser Char sep = P.char ',' eol :: CsvParser Char eol = cr >> lf cr :: CsvParser Char cr = P.char '\r' lf :: CsvParser Char lf = P.char '\n' -- | CSVのエスケープ文字 esc :: CsvParser Char esc = P.char '"' withCsv :: FilePath -> (Csv -> IO a) -> IO() withCsv path task = withContents path $ \txt -> task (parseCsv txt) >> return () withContents :: FilePath -> (String -> IO a) -> IO() withContents path task = withFile path ReadMode $ \h -> hGetContents h >>= task >> return () parseCsv :: String -> Csv parseCsv txt = case P.parse csv "* Parse Error *" (T.pack txt) of Left err -> error $ show err Right x -> x
class RecipeSerializer < ActiveModel::Serializer attributes :name, :description, :category, :portions, :tip, :time, :ingredients, :steps, :image, :reviews_count, :reviews_average belongs_to :user has_many :reviews def category object.category.name end def ingredients object.ingredients.pluck(:name) end def steps steps_array = object.steps.pluck(:id, :step) steps_array.inject([]) { |result, elem| result << { id: elem[0], step: elem[1] } } end def cache_key [object, scope] end end
<?php declare(strict_types=1); namespace Pt\LaravelAdminWebUpload\Form; trait BaseKit { /** * @param string $attribute * @param string $value * * @return $this */ protected function defaultAttribute($attribute, $value): static { if (!array_key_exists($attribute, $this->attributes)) { $this->attribute($attribute, $value); } return $this; } protected function setDefaultAttribute(): static { // 30 * 1024 * 1024 == 31457280 return $this->defaultAttribute('data-size', 31457280) ->defaultAttribute('data-scene', '') ->defaultAttribute('data-other', '') ->defaultAttribute('data-platform', 'local'); } public function toQiniu(): static { return $this->defaultAttribute('data-platform', 'qiniu'); } public function toAliyun(): static { return $this->defaultAttribute('data-platform', 'aliyun'); } public function toLocal(): static { return $this->defaultAttribute('data-platform', 'local'); } protected function uploadType(int $type = 1): static { // upload-type 1图片 2音频 3视频 4文件 return $this->addVariables(['uploadType' => $type]); } public function kitFiles(array $data): static { $this->view = 'laravel-admin-webupload::files'; $this->addVariables(array_merge([ 'src' => 'src', // 最大数量 'max'=>12, // 最少数量 -1不限制 'min'=>1, // 文件列表 // 'files' => [ // [ // 'other'=>'', // 'src'=>'', // ] // ] ],$data)); return $this; } }
// Copyright 2021 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef THIRD_PARTY_BLINK_RENDERER_EXTENSIONS_CHROMEOS_SYSTEM_EXTENSIONS_WINDOW_MANAGEMENT_CROS_WINDOW_H_ #define THIRD_PARTY_BLINK_RENDERER_EXTENSIONS_CHROMEOS_SYSTEM_EXTENSIONS_WINDOW_MANAGEMENT_CROS_WINDOW_H_ #include "third_party/blink/renderer/platform/bindings/script_wrappable.h" namespace blink { class DOMPoint; class DOMRect; class CrosWindowManagement; class CrosWindow : public ScriptWrappable { DEFINE_WRAPPERTYPEINFO(); public: CrosWindow(CrosWindowManagement* manager); void Trace(Visitor*) const override; size_t hash(); String title(); String appId(); bool isFullscreen(); bool isMinimised(); bool isVisible(); DOMPoint* origin(); DOMRect* bounds(); bool setOrigin(double x, double y); bool setBounds(double x, double y, double width, double height); bool setFullscreen(bool value); bool maximize(); bool minimize(); bool raise(); bool focus(); bool close(); private: Member<CrosWindowManagement> manager_; }; } // namespace blink #endif // THIRD_PARTY_BLINK_RENDERER_EXTENSIONS_CHROMEOS_SYSTEM_EXTENSIONS_WINDOW_MANAGEMENT_CROS_WINDOW_H_
package org.mjstudio.gfree.data.database import androidx.room.Dao import androidx.room.Insert import androidx.room.Query import org.mjstudio.gfree.domain.dto.NotiDTO @Dao interface NotiDAO { @Insert suspend fun insertNoti(item : NotiDTO) @Insert suspend fun insertAll(vararg items : NotiDTO) @Query("SELECT * FROM Notification ORDER BY created DESC LIMIT :limit") suspend fun getRecentNotifications(limit : Int) : List<NotiDTO> }
import asyncio import datetime import threading import time from twitter_listener import TwitterListener from StreamerTests.twitter_keys_hidden import api_key, api_secret, access_token_secret, access_token import tweepy def push_tweet(author: str, text: str, created_at: datetime): print("{author} tweeted at {at}: {text}".format(author=author, at=created_at, text=text)) def on_error(msg: str): print("Error: " + msg) def on_warning(msg: str): print("Warning: " + msg) async def listen(source, track): source.filter(track=track) auth = tweepy.OAuthHandler(api_key, api_secret) auth.set_access_token(access_token, access_token_secret) listener = TwitterListener(push_tweet, on_error, on_warning) stream = tweepy.Stream(auth, listener) stream.filter(track=['flutter', 'alteryx', 'Elon'], is_async=True) time.sleep(30) stream.disconnect()
/** * Copyright 2019, OpenCensus Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * This module contains the functions for serializing and deserializing * TagMap (TagContext) with the binary format. It allows tags to propagate * across requests. * * <p>OpenCensus tag context encoding: * * <ul> * <li>Tags are encoded in single byte sequence. The version 0 format is: * <li>{@code <version_id><encoded_tags>} * <li>{@code <version_id> -> a single byte, value 0} * <li>{@code <encoded_tags> -> (<tag_field_id><tag_encoding>)*} * <ul> * <li>{@code <tag_field_id>} -> a single byte, value 0 * <li>{@code <tag_encoding>}: * <ul> * <li>{@code <tag_key_len><tag_key><tag_val_len><tag_val>} * <ul> * <li>{@code <tag_key_len>} -> varint encoded integer * <li>{@code <tag_key>} -> tag_key_len bytes comprising tag name * <li>{@code <tag_val_len>} -> varint encoded integer * <li>{@code <tag_val>} -> tag_val_len bytes comprising tag value * </ul> * </li> * </ul> * </li> * </ul> * </ul> */ import { TagMap } from '../tag-map'; import { TagKey, TagValue } from '../types'; import { DecodeVarint, EncodeVarint } from './variant-encoding'; // This size limit only applies to the bytes representing tag keys and values. export const TAG_MAP_SERIALIZED_SIZE_LIMIT = 8192; const ENCODING = 'utf8'; const VERSION_ID = 0; const TAG_FIELD_ID = 0; const VERSION_ID_INDEX = 0; /** * Serializes a given TagMap to the on-the-wire format. * @param tagMap The TagMap to serialize. */ export function serializeBinary(tagMap: TagMap): Buffer { const byteArray: number[] = []; byteArray.push(VERSION_ID); let totalChars = 0; const tags = tagMap.tags; tags.forEach((tagValue: TagValue, tagKey: TagKey) => { totalChars += tagKey.name.length; totalChars += tagValue.value.length; encodeTag(tagKey, tagValue, byteArray); }); if (totalChars > TAG_MAP_SERIALIZED_SIZE_LIMIT) { throw new Error( `Size of TagMap exceeds the maximum serialized size ${TAG_MAP_SERIALIZED_SIZE_LIMIT}` ); } return Buffer.from(byteArray); } /** * Deserializes input to TagMap based on the binary format standard. * @param buffer The TagMap to deserialize. */ export function deserializeBinary(buffer: Buffer): TagMap { if (buffer.length === 0) { throw new Error('Input buffer can not be empty.'); } const versionId = buffer.readInt8(VERSION_ID_INDEX); if (versionId > VERSION_ID) { throw new Error( `Wrong Version ID: ${versionId}. Currently supports version up to: ${VERSION_ID}` ); } return parseTags(buffer); } function encodeTag(tagKey: TagKey, tagValue: TagValue, byteArray: number[]) { byteArray.push(TAG_FIELD_ID); encodeString(tagKey.name, byteArray); encodeString(tagValue.value, byteArray); } function encodeString(input: string, byteArray: number[]) { byteArray.push(...EncodeVarint(input.length)); byteArray.push(...input.split('').map(unicode)); return byteArray; } function parseTags(buffer: Buffer): TagMap { const tags = new TagMap(); const limit = buffer.length; let totalChars = 0; let currentIndex = 1; while (currentIndex < limit) { const fieldId = buffer.readInt8(currentIndex); if (fieldId > TAG_FIELD_ID) { // Stop parsing at the first unknown field ID, since there is no way to // know its length. break; } currentIndex += 1; const key = decodeString(buffer, currentIndex); currentIndex += key.length; totalChars += key.length; currentIndex += 1; const val = decodeString(buffer, currentIndex); currentIndex += val.length; totalChars += val.length; currentIndex += 1; if (totalChars > TAG_MAP_SERIALIZED_SIZE_LIMIT) { throw new Error( `Size of TagMap exceeds the maximum serialized size ${TAG_MAP_SERIALIZED_SIZE_LIMIT}` ); } else { tags.set({ name: key }, { value: val }); } } return tags; } function decodeString(buffer: Buffer, offset: number): string { const length = DecodeVarint(buffer, offset); return buffer.toString(ENCODING, offset + 1, offset + 1 + length); } function unicode(x: string) { return x.charCodeAt(0); }
data Rocks = Rocks String deriving (Eq, Show) data Yeah = Yeah Bool deriving (Eq, Show) data Papu = Papu Rocks Yeah deriving (Eq, Show) data Papu2 = Papu21 String Bool deriving (Eq, Show) -- 1. -- phew = Papu "chases" False -- ERROR: string and bool aren't equal to Rocks and Yeah phew = Papu21 "chases" False -- 2. truth = Papu (Rocks "chases") (Yeah True) -- 3. equalityForAll :: Papu -> Papu -> Bool equalityForAll p p' = p == p' -- 4. -- comparePapus :: Papu -> Papu -> Bool -- comparePapus p p' = p > p' -- ERROR: No Ord
using System; using System.Collections; using System.Collections.Generic; using UnityEngine; [Serializable] public class Colocado : IComparable { public string Nome; public int Pontos; public string Id; public Colocado(string nome, int pontos, string id) { Nome = nome; Pontos = pontos; Id = id; } public int CompareTo(object obj) { var outroObj = obj as Colocado; return outroObj.Pontos.CompareTo(Pontos); } }
package com.infinum.sentinel.data.sources.raw.collectors import android.content.Context import android.provider.Settings import com.infinum.sentinel.data.models.raw.DeviceData import com.infinum.sentinel.domain.collectors.Collectors internal class DeviceCollector( private val context: Context ) : Collectors.Device { override fun invoke() = DeviceData( autoTime = Settings.Global.getInt( context.contentResolver, Settings.Global.AUTO_TIME, 0 ) == 1, autoTimezone = Settings.Global.getInt( context.contentResolver, Settings.Global.AUTO_TIME_ZONE, 0 ) == 1 ) }
default["user"] = "di" default["brew_cask"]["packages"] = ["iterm2", "google-chrome", "slack", "1password", "docker-toolbox", "skitch", "viscosity", "vagrant", "aws-vault"]
export * from './complex.module'; export * from './fields-filter.pipe'; export * from './read-complex-field-raw.component'; export * from './read-complex-field-table.component'; export * from './read-complex-field-collection-table.component'; export * from './read-complex-field.component'; export * from './write-complex-field.component';
// *********************************************************************** // Assembly : PureActive.Hosting // Author : SteveBu // Created : 11-03-2018 // License : Licensed under MIT License, see https://github.com/PureActive/PureActive/blob/master/LICENSE // // Last Modified By : SteveBu // Last Modified On : 11-20-2018 // *********************************************************************** // <copyright file="WebHostBuilderExtensions.cs" company="BushChang Corporation"> // © 2018 BushChang Corporation. All rights reserved. // </copyright> // <summary></summary> // *********************************************************************** using System; using Microsoft.AspNetCore.Hosting; using Microsoft.Extensions.Configuration; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Logging; using PureActive.Core.Abstractions.System; using PureActive.Core.System; using PureActive.Logger.Provider.Serilog.Configuration; using PureActive.Logger.Provider.Serilog.Settings; using PureActive.Logging.Abstractions.Types; using Serilog.Events; using OperatingSystem = PureActive.Core.System.OperatingSystem; namespace PureActive.Hosting.Configuration { /// <summary> /// Class WebHostBuilderExtensions. /// </summary> /// <autogeneratedoc /> public static class WebHostBuilderExtensions { /// <summary> /// Returns the configuration for the webapp. /// </summary> /// <returns>IConfigurationRoot.</returns> private static IConfigurationRoot GetAppConfiguration() { var fileSystem = new FileSystem(); return new ConfigurationBuilder() .SetBasePath(fileSystem.GetCurrentDirectory()) .AddAppSettings() .AddEnvironmentVariables() .Build(); } /// <summary> /// Uses the system settings. /// </summary> /// <param name="webHostBuilder">The web host builder.</param> /// <param name="logFileName">Name of the log file.</param> /// <param name="includeLogEvent">The include log event.</param> /// <returns>IWebHostBuilder.</returns> /// <autogeneratedoc /> public static IWebHostBuilder UseSystemSettings(this IWebHostBuilder webHostBuilder, string logFileName, Func<LogEvent, bool> includeLogEvent) { var operatingSystem = new OperatingSystem(); var appConfiguration = GetAppConfiguration(); var fileSystem = new FileSystem(appConfiguration, operatingSystem); var loggerSettings = new SerilogLoggerSettings(fileSystem, appConfiguration, LoggingOutputFlags.AppFull); var loggerConfiguration = LoggerConfigurationFactory.CreateLoggerConfiguration(appConfiguration, logFileName, loggerSettings, includeLogEvent); var loggerFactory = LoggerConfigurationFactory.CreatePureSeriLoggerFactory(loggerSettings, loggerConfiguration); webHostBuilder.ConfigureServices(services => services.AddSingleton(loggerFactory)); webHostBuilder.ConfigureServices(services => services.AddSingleton<ILoggerFactory>(loggerFactory)); webHostBuilder.ConfigureServices(services => services.AddSingleton<IOperatingSystem>(operatingSystem)); webHostBuilder.ConfigureServices(services => services.AddSingleton<IFileSystem>(fileSystem)); return webHostBuilder; } } }
const { genericService } = require("../util"); const { itemDb } = require("../db"); const itemService = { get: genericService.get(itemDb.get), update: genericService.update(itemDb.update), create: genericService.create(itemDb.create), remove: genericService.remove(itemDb.remove), } module.exports = itemService;
extern crate termion; use termion::color::{Bg, Rgb}; fn get_color(index: usize) -> Rgb { let palette: Vec<Rgb> = vec![ Rgb(7, 7, 7), Rgb(31, 7, 7), Rgb(47, 15, 7), Rgb(71, 15, 7), Rgb(87, 23, 7), Rgb(103, 31, 7), Rgb(119, 31, 7), Rgb(143, 39, 7), Rgb(159, 47, 7), Rgb(175, 63, 7), Rgb(191, 71, 7), Rgb(199, 71, 7), Rgb(223, 79, 7), Rgb(223, 87, 7), Rgb(223, 87, 7), Rgb(215, 95, 7), Rgb(215, 95, 7), Rgb(215, 103, 15), Rgb(207, 111, 15), Rgb(207, 119, 15), Rgb(207, 127, 15), Rgb(207, 135, 23), Rgb(207, 135, 23), Rgb(199, 135, 23), Rgb(199, 143, 23), Rgb(199, 151, 31), Rgb(191, 159, 31), Rgb(191, 159, 31), Rgb(191, 167, 39), Rgb(191, 167, 39), Rgb(191, 175, 47), Rgb(183, 175, 47), Rgb(183, 183, 47), Rgb(183, 183, 55), Rgb(207, 207, 111), Rgb(223, 223, 159), Rgb(239, 239, 199), Rgb(255, 255, 255), ]; return palette[index]; } pub fn get_bg_color(index: usize) -> Bg<Rgb> { return Bg(get_color(index)); }
require "sinatra" require "openssl" require "rack" require "twitter" def verify_signature(payload_body, request_signature) signature = "sha1=" + OpenSSL::HMAC.hexdigest(OpenSSL::Digest.new("sha1"), ENV["GITHUB_WEBHOOK_TOKEN"], payload_body) unless Rack::Utils.secure_compare(signature, request_signature) halt 500, "Signatures didn't match!" end end def doi2url(doi) case doi when /^arXiv:/ then "http://arxiv.org/abs/#{doi.sub(/^arXiv:/, "")}" else "http://doi.org/#{doi}" end end def twitter_client(tap) token_suffix = tap.official? ? "_#{TWITTER_ACCOUNT_MAP[tap.repo].upcase}" : "" Twitter::REST::Client.new do |config| config.consumer_key = ENV["TWITTER_CONSUMER_KEY"] config.consumer_secret = ENV["TWITTER_CONSUMER_SEC"] config.access_token = ENV["TWITTER_ACCESS_TOKEN#{token_suffix}"] config.access_token_secret = ENV["TWITTER_ACCESS_TOKEN_SEC#{token_suffix}"] end end
package com.karasiq.shadowcloud.metadata.imageio import com.typesafe.config.Config import com.karasiq.shadowcloud.metadata.{MetadataParser, MetadataProvider, MimeDetector} class ImageIOMetadataProvider(rootConfig: Config) extends MetadataProvider { protected object imageioConfig { val config = rootConfig.getConfig("metadata.imageio") val thumbnailsConfig = config.getConfig("thumbnails") } val detectors: Seq[MimeDetector] = Vector.empty val parsers: Seq[MetadataParser] = Vector( ImageIOThumbnailCreator(imageioConfig.thumbnailsConfig) ) }
##ShaunFlynn import numpy as np import matplotlib.pyplot as pyplot ## d(f(x)) is f(x**n) = n * x**(n-1) ## function of x^2 def f(): return (x**2) ## function of x^3 def f_2(): return (x**3) ## function of x^3 + 5x def f_3(): return (x**3 + 5x) ## derivative of f def d_f: return (2x) ## derivative of f_2 def d_f_2: return (3x**2) ## derivative of f_3 def d_f_3: return (3x**2 + 5) ## the sum of vectors 'x' and 'y' is another vector 'v' ## vector_sum = (x_i + y_i) def vector_sum: return np.add(x,y) ## the difference of two vectors is another vector 'v' ## vector_less = (x_i - y_i) def vector_less: return np.subtract(x,y) ## the magnitude of a vector, its length, is ## absolute value of v is equal to the square root of the summation between 'i' and 'n' ## in the formula 'v' sub 'i' squared [(v_i)**2)] def vector_magnitude(x): return np.linalg.norm(x) ## arrays def vec5: return np.array([1,1,1,1,1]) def vec3: return np.array([0,0,0]) def vec2_1: return np.array([1,0]) def vec2_2: return np.array([0,1]) ## matrix multiplication function that multiplies ## a two element vector by a 2x2 matrix def matrix_multiply(vec,matrix): return np.inner(vec,matrix) ## Matrix [m_0_0,m_0_1,m_0_2] ## [m_1_0,m_1_1,m_1_2] ## Vector [v_0,v_1,v_2] ## M*v = [(v_0 * m_0_0) + (v_1 * m_0_1) + (v_2 * m_0_2)] ## [(v_0 * m_1_0) + (v_1 * m_1_1) + (v_2 * m_1_2)]
import 'package:bullshit/screens/home_screen.dart'; import 'package:bullshit/screens/show_todo_screen.dart'; import 'package:bullshit/screens/splash_screen.dart'; import 'package:flutter/material.dart'; class Routes { static const String splashScreen = "/"; static const String homeScreen = "/homeScreen"; static const String showTodoScreen = "/showTodoScreen"; static Map<String, Widget Function(BuildContext)> routes = { splashScreen: (context) => const SplashScreen(), homeScreen: (context) => const HomeScreen(), showTodoScreen: (context) => const ShowTodoScreen() }; }
'use strict'; /** * Created by Adrian on 11-Apr-16. */ module.exports = function(thorin, opt, AccountModel) { function initModel(modelObj, Seq) { modelObj .field('id', Seq.PRIMARY) .field('type', Seq.STRING(20)) // the history type. Types: LOGIN, PASSWORD_CHANGE, etc. .field('user_agent', Seq.STRING, { defaultValue: null }) .field('ip', Seq.STRING(20)); modelObj.belongsTo(AccountModel.code); } return initModel; };
package com.andryoga.safebox.security.interfaces interface PasswordBasedEncryption { fun encryptDecrypt( password: CharArray, data: ByteArray, salt: ByteArray, iv: ByteArray, encrypt: Boolean ): ByteArray fun getRandomSalt(): ByteArray fun getRandomIV(): ByteArray }
import 'models/models.dart'; import 'models/models.reflectable.dart'; import 'package:flutter_model_form_validation/flutter_model_form_validation.dart'; import 'package:flutter_test/flutter_test.dart'; void main() { initializeReflectable(); group('StringRange.', () { group('Test the priority between user and developer static data.', () { test( '"Min" and "max" are provided by user and developer. User data has priority.', () { StringRangeWithUserAndDeveloperValuesTest tester = new StringRangeWithUserAndDeveloperValuesTest('m', 'g', 'o'); bool isValid = ModelState.isValid<StringRangeWithUserAndDeveloperValuesTest>( tester); expect(isValid, true); expect(ModelState.errors.isEmpty, true); }); test('"Min" and "max" are provided by user only.', () { StringRangeWithUserValuesTest tester = new StringRangeWithUserValuesTest('m', 'g', 'o'); bool isValid = ModelState.isValid<StringRangeWithUserValuesTest>(tester); expect(isValid, true); expect(ModelState.errors.isEmpty, true); }); test('"Min" and "max" are provided by developer only.', () { StringRangeWithDeveloperValuesTest tester = new StringRangeWithDeveloperValuesTest('c'); bool isValid = ModelState.isValid<StringRangeWithDeveloperValuesTest>(tester); expect(isValid, true); expect(ModelState.errors.isEmpty, true); }); }); group('Test the validation > success.', () { test('The value is equal to "min".', () { StringRangeTest tester = new StringRangeTest('g', 'g', 'o'); bool isValid = ModelState.isValid<StringRangeTest>(tester); expect(isValid, true); expect(ModelState.errors.isEmpty, true); }); test('The value is between "min" and "max".', () { StringRangeTest tester = new StringRangeTest('m', 'g', 'o'); bool isValid = ModelState.isValid<StringRangeTest>(tester); expect(isValid, true); expect(ModelState.errors.isEmpty, true); }); test('The value is equal to "max".', () { StringRangeTest tester = new StringRangeTest('o', 'g', 'o'); bool isValid = ModelState.isValid<StringRangeTest>(tester); expect(isValid, true); expect(ModelState.errors.isEmpty, true); }); }); group('Test the validation > failure.', () { test('The value is smaller than "min".', () { StringRangeTest tester = new StringRangeTest('f', 'g', 'o'); bool isValid = ModelState.isValid<StringRangeTest>(tester); expect(isValid, false); expect(ModelState.errors['value'].validatorType, StringRange); expect(ModelState.errors['value'].propertyName, 'value'); expect(ModelState.errors['value'].error, 'This string is not in the range'); }); test('The value is greater than "max".', () { StringRangeTest tester = new StringRangeTest('p', 'g', 'o'); bool isValid = ModelState.isValid<StringRangeTest>(tester); expect(isValid, false); expect(ModelState.errors['value'].validatorType, StringRange); expect(ModelState.errors['value'].propertyName, 'value'); expect(ModelState.errors['value'].error, 'This string is not in the range'); }); }); }); }
#ifndef GAME_MAPENTITYLIST_H #define GAME_MAPENTITYLIST_H /////////////////////////////////////////////////////////////////////////////// #define MAX_MAPENTITIES 1024 class MapEntityList { public: MapEntityList ( ); ~MapEntityList ( ); MapEntity* findEnt ( int ); MapEntity* findEntSingleClient ( int, int, int& ); void reset( ); protected: MapEntity* teamList[MAX_MAPENTITIES]; int lastUpdated; private: void clean ( ); void init ( ); }; /////////////////////////////////////////////////////////////////////////////// #endif // GAME_MAPENTITYLIST_H
import { Block, BlockList } from "./block-node.js"; const LINE_BREAK_LENGTH = 1; /** * Used for building a block syntax tree. */ export class BlockSyntaxTreeBuilder { private blocks: BlockList; private activeBlock: Block | undefined; private lastOffset: number; constructor() { this.blocks = this.emptyBlockList(); this.lastOffset = -1; this.reset(); } reset() { this.blocks = this.emptyBlockList(); this.lastOffset = -1; return this; } /** * Return the build syntax tree. * Call `reset` to build a new syntax tree. * * @returns Syntax tree instance. */ syntaxTree() { return this.blocks; } private emptyBlockList(): BlockList { return { children: [], type: "blockList" }; } newEmptyLine() { throw new Error("Not implemented yet."); } newBlock(level: number, indentationText: string, lineContent: string) { this.lastOffset += LINE_BREAK_LENGTH; const lineNumber = this.blocks.children.length + 1; const lineContentLength = lineContent.length; const indentationLength = indentationText.length; const block: Block = { type: "block", level, children: [ { type: "rootBlockLine", indentation: { type: "blockLineIndentation", indentation: indentationLength, value: indentationText, position: { start: { column: 1, line: lineNumber, offset: this.lastOffset }, end: { column: 1, line: lineNumber, offset: (this.lastOffset += indentationLength), }, }, }, content: { type: "blockLineContent", value: lineContent, position: { start: { column: 1 + indentationLength, line: lineNumber, offset: this.lastOffset, }, end: { column: 1 + indentationLength + lineContentLength, line: lineNumber, offset: (this.lastOffset += lineContentLength), }, }, }, }, ], }; this.activeBlock = block; this.blocks.children.push(block); return this; } newBlockLine(indentationText: string, lineContent: string) { this.lastOffset += LINE_BREAK_LENGTH; const lineNumber = this.blocks.children.length + 1; const lineContentLength = lineContent.length; const indentationLength = indentationText.length; this.activeBlock!.children.push({ type: "childBlockLine", indentation: { type: "blockLineIndentation", indentation: indentationLength, value: indentationText, position: { start: { column: 1, line: lineNumber, offset: this.lastOffset }, end: { column: 1, line: lineNumber, offset: (this.lastOffset += indentationLength), }, }, }, content: { type: "blockLineContent", value: lineContent, position: { start: { column: 1 + indentationLength, line: lineNumber, offset: this.lastOffset, }, end: { column: 1 + indentationLength + lineContentLength, line: lineNumber, offset: (this.lastOffset += lineContentLength), }, }, }, }); return this; } }
//--------------------------------------------------------------------- // <copyright file="LinqToAstoriaEvaluator.cs" company="Microsoft"> // Copyright (C) Microsoft Corporation. All rights reserved. See License.txt in the project root for license information. // </copyright> //--------------------------------------------------------------------- namespace Microsoft.Test.Taupo.Astoria.LinqToAstoria { using System; using System.Collections.Generic; using System.Linq; using Microsoft.Test.Taupo.Astoria.Contracts.EntityModel; using Microsoft.Test.Taupo.Astoria.Contracts.LinqToAstoria; using Microsoft.Test.Taupo.Common; using Microsoft.Test.Taupo.Contracts.EntityModel; using Microsoft.Test.Taupo.Query.Common; using Microsoft.Test.Taupo.Query.Contracts; using Microsoft.Test.Taupo.Query.Contracts.CommonExpressions; using Microsoft.Test.Taupo.Query.Contracts.Linq; using Microsoft.Test.Taupo.Query.Contracts.Linq.Expressions; using Microsoft.Test.Taupo.Query.Linq; /// <summary> /// Evaluates query expressions for a given query data set. /// </summary> [ImplementationName(typeof(ILinqToAstoriaExpressionEvaluator), "Default")] public class LinqToAstoriaEvaluator : ILinqToAstoriaExpressionEvaluator { /// <summary> /// Gets or sets the query resolver. /// </summary> [InjectDependency(IsRequired = true)] public ILinqToAstoriaQueryResolver QueryResolver { get; set; } /// <summary> /// Gets or sets the query data set. /// </summary> [InjectDependency(IsRequired = true)] public IQueryDataSet QueryDataSet { get; set; } /// <summary> /// Evaluates the specified expression. /// </summary> /// <param name="expression">The expression.</param> /// <returns>Value of the expression.</returns> public QueryValue Evaluate(QueryExpression expression) { return this.Evaluate(expression, new Dictionary<string, QueryExpression>()); } /// <summary> /// Evaluates the specified expression. /// </summary> /// <param name="expression">The expression.</param> /// <param name="freeVariableAssignments">Free variable assignments.</param> /// <returns>Value of the expression.</returns> public QueryValue Evaluate(QueryExpression expression, IDictionary<string, QueryExpression> freeVariableAssignments) { ExceptionUtilities.CheckArgumentNotNull(expression, "expression"); ExceptionUtilities.CheckArgumentNotNull(freeVariableAssignments, "freeVariableAssignments"); var visitor = new LinqToAstoriaEvaluatingVisitor(this.QueryDataSet, freeVariableAssignments); expression = this.QueryResolver.Resolve(expression); return visitor.Evaluate(expression); } /// <summary> /// Temporarily replaces the evaluator's data-set with the one given /// </summary> /// <param name="temporary">The temporary query data-set</param> /// <returns>A token that, when disposed, will reset the query data-set back to the original.</returns> public IDisposable WithTemporaryDataSet(IQueryDataSet temporary) { var original = this.QueryDataSet; this.QueryDataSet = temporary; return new DelegateBasedDisposable(() => this.QueryDataSet = original); } /// <summary> /// Evaluates Linq-specific expression trees. /// </summary> internal class LinqToAstoriaEvaluatingVisitor : LinqEvaluatingVisitor, ILinqToAstoriaExpressionVisitor<QueryValue> { /// <summary> /// Initializes a new instance of the LinqToAstoriaEvaluatingVisitor class. /// </summary> /// <param name="dataSet">The data set.</param> /// <param name="freeVariableAssignments">Free variable assignments.</param> internal LinqToAstoriaEvaluatingVisitor(IQueryDataSet dataSet, IDictionary<string, QueryExpression> freeVariableAssignments) : base(dataSet, freeVariableAssignments) { } /// <summary> /// Evaluates the specified expression. /// </summary> /// <param name="expression">The expression to evaluate.</param> /// <returns>Value of the expression.</returns> public override QueryValue Visit(LinqCountExpression expression) { return this.VisitCollectionElementPrimitiveOrComplexTypeError( expression, delegate { return base.Visit(expression); }); } /// <summary> /// Visits a LinqDistinctExpression. This expression is not supported in Linq to Astoria. /// </summary> /// <param name="expression">The expression.</param> /// <returns>The result of visiting this expression.</returns> public override QueryValue Visit(LinqDistinctExpression expression) { var source = this.EvaluateCollection(expression.Source); var sourceError = QueryError.GetErrorFromValues(source.Elements); var error = QueryError.Combine(sourceError, new QueryError("This expression is not supported in Linq to Astoria.")); return expression.Source.ExpressionType.CreateErrorValue(error); } /// <summary> /// Evaluates the specified expression. /// </summary> /// <param name="expression">The expression to evaluate.</param> /// <returns>Value of the expression.</returns> public override QueryValue Visit(LinqLongCountExpression expression) { return this.VisitCollectionElementPrimitiveOrComplexTypeError( expression, delegate { return base.Visit(expression); }); } /// <summary> /// Evaluates the specified expression. /// </summary> /// <param name="expression">The expression to evaluate.</param> /// <returns>Value of the expression.</returns> public override QueryValue Visit(LinqNewInstanceExpression expression) { ExceptionUtilities.CheckArgumentNotNull(expression, "expression"); if (expression.ExpressionType is AstoriaQueryStreamType) { // Handle expressions like: new DataServiceStreamLink(c, "Photo") const int ExpectedArgumentCount = 2; int actualArgumentCount = expression.ConstructorArguments.Count; ExceptionUtilities.Assert( actualArgumentCount == ExpectedArgumentCount, "Expected {0} arguments in the constructor. Actual: {1}", ExpectedArgumentCount, actualArgumentCount); var source = (QueryStructuralValue)this.Evaluate(expression.ConstructorArguments[0]); string streamName = (string)((QueryScalarValue)this.Evaluate(expression.ConstructorArguments[1])).Value; return source.GetValue(streamName); } else { return base.Visit(expression); } } /// <summary> /// Evaluates the specified expression. /// </summary> /// <param name="expression">The expression to evaluate.</param> /// <returns>Value of the expression.</returns> public override QueryValue Visit(LinqOrderByExpression expression) { var value = this.VisitCollectionElementPrimitiveOrComplexTypeError( expression, delegate { return base.Visit(expression); }); var collection = value as QueryCollectionValue; var strategy = collection.Type.ElementType.EvaluationStrategy as ILinqToAstoriaQueryEvaluationStrategy; ExceptionUtilities.CheckObjectNotNull(strategy, "Cannot get astoria-specific evaluation strategy from collection value."); if (strategy.IsCollectionOrderPredictable) { return QueryCollectionValue.Create(collection.Type.ElementType, collection.Elements, true); } return value; } /// <summary> /// Evaluates the specified expression. /// </summary> /// <param name="expression">The expression to evaluate.</param> /// <returns>Value of the expression.</returns> public override QueryValue Visit(LinqSelectExpression expression) { return this.VisitCollectionElementPrimitiveOrComplexTypeError( expression, delegate { return base.Visit(expression); }); } /// <summary> /// Evaluates the specified expression. /// </summary> /// <param name="expression">The expression to evaluate.</param> /// <returns>Value of the expression.</returns> public override QueryValue Visit(LinqSkipExpression expression) { return this.VisitCollectionElementPrimitiveOrComplexTypeError( expression, delegate { return base.Visit(expression); }); } /// <summary> /// Evaluates the specified expression. /// </summary> /// <param name="expression">The expression to evaluate.</param> /// <returns>Value of the expression.</returns> public override QueryValue Visit(LinqTakeExpression expression) { return this.VisitCollectionElementPrimitiveOrComplexTypeError( expression, delegate { return base.Visit(expression); }); } /// <summary> /// Visits a LinqAddQueryOptionExpression. /// </summary> /// <param name="expression">The expression.</param> /// <returns>Value of the expression</returns> public QueryValue Visit(LinqToAstoriaAddQueryOptionExpression expression) { ExceptionUtilities.CheckArgumentNotNull(expression, "expression"); string queryOption = expression.QueryOption; if (queryOption.Equals("$top", StringComparison.OrdinalIgnoreCase) || queryOption.Equals("$skip", StringComparison.OrdinalIgnoreCase)) { var source = this.EvaluateCollection(expression.Source); var evalStrategy = new LinqToAstoriaClrQueryEvaluationStrategy(); QueryScalarValue count = new QueryScalarValue(evalStrategy.IntegerType, (int)expression.QueryValue, null, evalStrategy); if (queryOption.Equals("$top", StringComparison.OrdinalIgnoreCase)) { return source.Take(count); } else { return source.Skip(count); } } else { return this.Evaluate(expression.Source); } } /// <summary> /// Evaluates the specified expression. /// </summary> /// <param name="expression">The expression to evaluate.</param> /// <returns>Value of the expression.</returns> public virtual QueryValue Visit(LinqToAstoriaConditionalExpression expression) { var binaryQueryExpressionValue = (QueryScalarValue)expression.Condition.Accept(this); ExceptionUtilities.CheckObjectNotNull(binaryQueryExpressionValue, "Conditional expression evaluated to null"); bool binaryValue = (bool)binaryQueryExpressionValue.Value; if (binaryValue) { return expression.IfTrue.Accept(this); } else { return expression.IfFalse.Accept(this); } } /// <summary> /// Visits a LinqExpandExpression. /// </summary> /// <param name="expression">The expression.</param> /// <returns>Value of the expression</returns> public QueryValue Visit(LinqToAstoriaExpandExpression expression) { // expand is not handled here, instead it's handled in the trimming phase after // the whole expression has been evaluated var expanded = this.Evaluate(expression.Source); // if expanding a collection using sql strategy, we do not guarantee the order of top level set. var collection = expanded as QueryCollectionValue; if (collection != null) { var strategy = collection.Type.ElementType.EvaluationStrategy as ILinqToAstoriaQueryEvaluationStrategy; ExceptionUtilities.CheckObjectNotNull(strategy, "Cannot get astoria-specific evaluation strategy from collection value."); if (!strategy.IsCollectionOrderPredictable) { return QueryCollectionValue.Create(collection.Type.ElementType, collection.Elements, false); } } return expanded; } /// <summary> /// Visits a LinqToAstoriaExpandLambdaExpression. /// </summary> /// <param name="expression">The expression.</param> /// <returns>The result of visiting this expression.</returns> public QueryValue Visit(LinqToAstoriaExpandLambdaExpression expression) { return this.Evaluate(expression.ToLinqToAstoriaExpandExpression()); } /// <summary> /// Visits a LinqKeyExpression. /// </summary> /// <param name="expression">The expression.</param> /// <returns>Value of the expression</returns> public QueryValue Visit(LinqToAstoriaKeyExpression expression) { ExceptionUtilities.CheckArgumentNotNull(expression, "expression"); var collectionType = expression.ExpressionType as QueryCollectionType; var replaced = expression.Source.Where(expression.Lambda); if (collectionType != null) { replaced = replaced.OfType(collectionType.ElementType); } else { replaced = replaced.SingleOrDefault().As(expression.ExpressionType); } return this.Evaluate(replaced); } /// <summary> /// Visits a LinqToAstoriaLinksExpression. /// </summary> /// <param name="expression">The expression.</param> /// <returns>The result of visiting this expression.</returns> public QueryValue Visit(LinqToAstoriaLinksExpression expression) { return this.Evaluate(expression.Source); } /// <summary> /// Visits a LinqToAstoriaValueExpression. /// </summary> /// <param name="expression">The expression.</param> /// <returns>The result of visiting this expression.</returns> public QueryValue Visit(LinqToAstoriaValueExpression expression) { return this.Evaluate(expression.Source); } /// <summary> /// Evaluates the specified expression. /// </summary> /// <param name="expression">The expression to evaluate.</param> /// <returns>Value of the expression.</returns> public override QueryValue Visit(LinqWhereExpression expression) { return this.VisitCollectionElementPrimitiveOrComplexTypeError( expression, delegate { return base.Visit(expression); }); } /// <summary> /// Evaluates the specified expression. /// </summary> /// <param name="expression">The expression to evaluate</param> /// <returns>Value of the expression</returns> public override QueryValue Visit(QueryCustomFunctionCallExpression expression) { var result = base.Visit(expression); var svcOpAnnotation = expression.Function.Annotations.OfType<LegacyServiceOperationAnnotation>().SingleOrDefault(); if (IsSingleResultQueryableOrEnumerable(svcOpAnnotation)) { var collection = result as QueryCollectionValue; ExceptionUtilities.CheckObjectNotNull(collection, "Result should have been a collection for a queryable/enumerable svc op. Result was '{0}'", result); result = collection.SingleOrDefault(); } return result; } /// <summary> /// Evaluates the specified expression. /// </summary> /// <param name="expression">The expression to evaluate</param> /// <returns>Value of the expression</returns> public override QueryValue Visit(QueryPropertyExpression expression) { if (expression.ExpressionType is AstoriaQueryStreamType) { var originalSource = this.Evaluate(expression.Instance); var source = originalSource as QueryStructuralValue; return source.GetValue(expression.Name); } else { return base.Visit(expression); } } internal static bool IsSingleResultQueryableOrEnumerable(LegacyServiceOperationAnnotation svcOpAnnotation) { if (svcOpAnnotation == null) { return false; } if (!svcOpAnnotation.SingleResult) { return false; } return svcOpAnnotation.ReturnTypeQualifier == ServiceOperationReturnTypeQualifier.IEnumerable || svcOpAnnotation.ReturnTypeQualifier == ServiceOperationReturnTypeQualifier.IQueryable; } /// <summary> /// Creates a visitor for replacing function parameter references. /// </summary> /// <param name="customFunction">The custom function.</param> /// <param name="arguments">The arguments for the function call.</param> /// <returns>Linq to Astoria visitor for replacing function parameter references.</returns> protected override IQueryExpressionReplacingVisitor CreateFunctionParameterReferenceReplacingVisitor(Function customFunction, IEnumerable<QueryExpression> arguments) { return new LinqToAstoriaReplaceFunctionParameterReferenceVisitor(customFunction, arguments); } /// <summary> /// Determines if the source that is visiting a LinqQueryMethodExpression is a QueryCollection of Primitive or ComplexTypes /// and if it is it writes an error in, other wise it continues to evaluate this using the base Linq evaluator /// </summary> /// <param name="expression">The expression to evaluate.</param> /// <param name="entityCollectionVisitAction">Base expression to visit</param> /// <returns>Value of the expression.</returns> private QueryValue VisitCollectionElementPrimitiveOrComplexTypeError(LinqQueryMethodExpression expression, Func<QueryValue> entityCollectionVisitAction) { QueryValue queryValue = null; var source = this.EvaluateCollection(expression.Source); if (source.Type.ElementType is QueryComplexType || (source.Type.ElementType is QueryScalarType && !(source.Type.ElementType is QueryClrSpatialType))) { var sourceError = QueryError.GetErrorFromValues(source.Elements); var error = QueryError.Combine(sourceError, new QueryError("This expression is not supported in Linq to Astoria.")); queryValue = expression.Source.ExpressionType.CreateErrorValue(error); } else { queryValue = entityCollectionVisitAction(); } return queryValue; } } } }
require 'test_helper' module PushType class AdminHelperTest < ActionView::TestCase before { @view_flow = ActionView::OutputFlow.new } describe '#title' do let(:my_title) { 'My test title' } before { title my_title } it { content_for?(:title).must_equal true } it { content_for(:title).must_equal my_title } end describe '#ficon' do it 'should return an <i> element' do ficon(:foo).must_equal '<i class="fi-foo"></i>' end it 'should return any text' do ficon(:foo, 'My icon').must_equal '<i class="fi-foo"></i> My icon' end end end end
import {UserHighlightPayload} from '@api/payloads'; const BASE_URL = 'http://localhost:6617', Mappings = { LIST_REPOSITORIES_URL: 'repositories', LIST_MODELS_URL: 'models', LIST_VIEWS_URL: 'views', LIST_PROJECTS_URL: 'projects', USER_HIGHLIGHT_URL: (payload: UserHighlightPayload) => `user/${payload.userName}/highlight`, AUTH_LOGIN_URL: 'auth/login', AUTH_REGISTER_URL: 'auth/register', AUTH_RESTORE_URL: 'auth/restore' }, addBaseURL = (mappings, baseURL: string) => { Object.keys(mappings).forEach((name) => { const mapping = mappings[name]; mappings[name] = typeof mapping === 'function' ? (p) => `${baseURL}/${mapping(p)}` : `${baseURL}/${mapping}`; }); }; addBaseURL(Mappings, BASE_URL); export default Mappings;
# # Cookbook Name:: rubygems # Recipe:: ip_security # bash "disable tcp timestamps" do code "echo 0 > /proc/sys/net/ipv4/tcp_timestamps" not_if "cat /proc/sys/net/ipv4/tcp_timestamps | grep 0" end bash "enable syn cookies (prevent against the common 'syn flood attack')" do code "echo 1 > /proc/sys/net/ipv4/tcp_syncookies" not_if "cat /proc/sys/net/ipv4/tcp_syncookies | grep 1" end bash "disable Packet forwarning between interfaces" do code "echo 0 > /proc/sys/net/ipv4/ip_forward" not_if "cat /proc/sys/net/ipv4/ip_forward | grep 0" end bash "ignore all ICMP ECHO and TIMESTAMP requests sent to it via broadcast/multicast" do code "echo 1 > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts" not_if "cat /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts | grep 1" end bash "log packets with impossible addresses to kernel log" do code "echo 1 > /proc/sys/net/ipv4/conf/all/log_martians" not_if "cat /proc/sys/net/ipv4/conf/all/log_martians | grep 1" end bash "disable logging of bogus responses to broadcast frames" do code "echo 1 > /proc/sys/net/ipv4/icmp_ignore_bogus_error_responses" not_if "cat /proc/sys/net/ipv4/icmp_ignore_bogus_error_responses | grep 1" end bash "do source validation by reversed path (Recommended option for single homed hosts)" do code "echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter" not_if "cat /proc/sys/net/ipv4/conf/all/rp_filter | grep 1" end bash "don't send redirects" do code "echo 0 > /proc/sys/net/ipv4/conf/all/send_redirects" not_if "cat /proc/sys/net/ipv4/conf/all/send_redirects | grep 0" end bash "don't accept packets with SRR option" do code "echo 0 > /proc/sys/net/ipv4/conf/all/accept_source_route" not_if "cat /proc/sys/net/ipv4/conf/all/accept_source_route | grep 0" end
#![cfg_attr(docsrs, feature(doc_cfg))] #[macro_use] extern crate static_assertions; use std::{collections::HashMap, time::Duration}; use hyper::client::connect::HttpConnector; pub use clickhouse_derive::Row; use self::error::Result; pub use self::{compression::Compression, row::Row}; pub mod error; pub mod insert; pub mod inserter; pub mod query; pub mod sql; #[cfg(feature = "test-util")] #[cfg_attr(docsrs, doc(cfg(feature = "test-util")))] pub mod test; pub mod watch; mod buflist; mod compression; mod response; mod row; mod rowbinary; mod sealed { pub trait Sealed {} } const TCP_KEEPALIVE: Duration = Duration::from_secs(60); #[derive(Clone, Debug)] pub struct Client { client: hyper::Client<HttpConnector>, url: String, database: Option<String>, user: Option<String>, password: Option<String>, compression: Compression, options: HashMap<String, String>, } impl Default for Client { fn default() -> Self { let mut connector = HttpConnector::new(); // TODO: make configurable in `Client::builder()`. connector.set_keepalive(Some(TCP_KEEPALIVE)); Self { client: hyper::Client::builder().build(connector), url: String::new(), database: None, user: None, password: None, compression: Compression::default(), options: HashMap::new(), } } } impl Client { pub fn url(&self) -> &String { &self.url } pub fn database(&self) -> Option<&String> { self.database.as_ref() } pub fn user(&self) -> Option<&String> { self.user.as_ref() } pub fn password(&self) -> Option<&String> { self.password.as_ref() } pub fn compression(&self) -> &Compression { &self.compression } pub fn options(&self) -> &HashMap<String, String> { &self.options } // TODO: use `url` crate? pub fn with_url(mut self, url: impl Into<String>) -> Self { self.url = url.into(); self } pub fn with_database(mut self, database: impl Into<String>) -> Self { self.database = Some(database.into()); self } pub fn with_user(mut self, user: impl Into<String>) -> Self { self.user = Some(user.into()); self } pub fn with_password(mut self, password: impl Into<String>) -> Self { self.password = Some(password.into()); self } pub fn with_compression(mut self, compression: Compression) -> Self { // TODO: remove when compression will be implemented. self.compression = if cfg!(feature = "test-util") { Compression::None } else { compression }; self } pub fn with_options(mut self, options: HashMap<String, String>) -> Self { self.options = options; self } pub fn with_option(mut self, name: impl Into<String>, value: impl Into<String>) -> Self { self.options.insert(name.into(), value.into()); self } /// Starts a new INSERT statement. /// /// # Panics /// If `T` has unnamed fields, e.g. tuples. pub fn insert<T: Row>(&self, table: &str) -> Result<insert::Insert<T>> { insert::Insert::new(self, table) } pub fn inserter<T: Row>(&self, table: &str) -> Result<inserter::Inserter<T>> { inserter::Inserter::new(self, table) } pub fn query(&self, query: &str) -> query::Query { query::Query::new(self, query) } pub fn watch(&self, query: &str) -> watch::Watch { watch::Watch::new(self, query) } }
#ifndef _DELTAIP_ARP_H #define _DELTAIP_ARP_H #include "pktbuf.h" #include "iface.h" #define ARP_TABLE_SIZE 128 #define ARP_STATE_EMPTY 0x00 #define ARP_STATE_STABLE 0x01 #define ARP_STATE_STATIC 0x02 #define ARP_OP_REQUEST 0x01 #define ARP_OP_REPLY 0x02 #define ARP_TIMEOUT 300 #define ARP_FOREACH(entry) FOREACH(arp_base, entry) struct arp_hdr { uint16_t hwtype; uint16_t proto; uint8_t hwlen; uint8_t protolen; uint16_t opcode; struct eth_addr shwaddr; struct ip_addr sipaddr; struct eth_addr dhwaddr; struct ip_addr dipaddr; } __attribute__((packed)); struct arp_entry { struct ip_addr ip_addr; struct eth_addr eth_addr; uint8_t state; struct arp_entry *next; long update_time; long last_time; struct iface *iface; }; void arp_init(); int arp_recv(struct pktbuf *buf); struct arp_entry *arp_add(struct iface *ifa, const struct ip_addr *ipaddr, struct eth_addr *ethaddr); void arp_update_entry(struct iface *ifa, const struct ip_addr *ipaddr, struct eth_addr *ethaddr); void arp_task(); struct arp_entry *arp_lookup(struct iface *ifa, const struct ip_addr *ipaddr); extern struct arp_entry *arp_base; #endif
#include <iostream> using namespace std; constexpr int MAX_N = 1000; template<class T> class Number { public: T value; int count; Number() {}; Number(T value, int count) : value(value), count(count) {}; bool operator<= (const Number& n) { if (this->count > n.count) { return true; } else if (this->count == n.count && this->value <= n.value) { return true; } return false; } }; template<class T> void quick_sort(T *array, int start, int end) { // partition if (start >= end) return; T pivot = array[end]; int i = start - 1; for (int j = start; j < end; j++) { if (array[j] <= pivot) { i++; swap<T>(array[i], array[j]); } } i++; // now i refers to pivot element swap<T>(array[i], array[end]); // recurse quick_sort(array, start, i-1); quick_sort(array, i+1, end); } template void quick_sort<int>(int *array, int start, int end); template void quick_sort<Number<int>>(Number<int> *array, int start, int end); int main() { int n; cin >> n; int *raw_data = new int[n]; for (int i = 0; i < n; i++) { cin >> raw_data[i]; } // sort first quick_sort<int>(raw_data, 0, n-1); // count numbers Number<int> *numbers = new Number<int>[MAX_N]; int cur = 0; int count = 1; int last = raw_data[0]; for (int i = 1; i < n; i++) { if (raw_data[i] != last) { numbers[cur].value = last; numbers[cur].count = count; cur++; last = raw_data[i]; count = 1; } else count++; } if (count > 0) { numbers[cur].value = last; numbers[cur].count = count; } quick_sort(numbers, 0, cur); for (int i = 0; i < cur+1; i++) cout << numbers[i].value << ' ' << numbers[i].count << endl; }
# Open For Contributions Upload anything related to development, programming. Make your own Folder and update file in it. Add Name to contributors.md.
#include <stddef.h> #include <kernel/gpio.h> #include <kernel/timer.h> #include <common/stdio.h> /** * Write 32-bit value to register * @param reg Register to write * @param data Value to write to the register */ void mmio_write(uint32_t reg, uint32_t data) { *(volatile uint32_t *) reg = data; } /** * Read value from register * @param reg Register from which to read */ uint32_t mmio_read(uint32_t reg) { return *(volatile uint32_t *) reg; } /** * Loop in a way the compiler will not optimise (inexact) * @param count Number of loops */ static inline void delay(int32_t count) { asm volatile( "__delay_%=: subs %[count], %[count], #1; bne __delay_%=\n" : "=r"(count): [count]"0"(count) : "cc" ); } /** * Initialise the UART peripheral */ void uart_init() { mmio_write(UART0_CR, 0x00000000); mmio_write(GPPUD, 0x00000000); delay(150); mmio_write(GPPUDCLK0, (1 << 14) | (1 << 15)); delay(150); mmio_write(GPPUDCLK0, 0x00000000); mmio_write(UART0_ICR, 0x7ff); mmio_write(UART0_IBRD, 1); mmio_write(UART0_FBRD, 40); mmio_write(UART0_LCRH, (1 << 4) | (1 << 5) | (1 << 6)); mmio_write(UART0_IMSC, (1 << 1) | (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10)); mmio_write(UART0_CR, (1 << 0) | (1 << 8) | (1 << 9)); } /** * Write a character to the UART data register * @param c Character to send */ void uart_putc(unsigned char c) { while (mmio_read(UART0_FR) & (1 << 5)) ; mmio_write(UART0_DR, c); } /** * Retrieve a character from the UART data register */ unsigned char uart_getc() { while (mmio_read(UART0_FR) & (1 << 4)) ; return mmio_read(UART0_DR); } /** * Send a string to be printed by the UART * @param str String to print */ void uart_puts(const char *str) { for (size_t i = 0; str[i] != '\0'; i++) uart_putc((unsigned char) str[i]); } /** * Initialise the ACT LED for use */ void act_init(void) { mmio_write(ACT_GPFSEL, 1 << ACT_GPFBIT); } /** * Turn the ACT LED on */ void act_on(void) { mmio_write(ACT_GPSET, 1 << ACT_GPBIT); } /** * Turn the ACT LED off */ void act_off(void) { mmio_write(ACT_GPCLR, 1 << ACT_GPBIT); } /** * Blink the ACT LED a set number of times * @param n Number of times to blink */ void act_blink(uint32_t n) { while (n--) { act_on(); uwait(200000); act_off(); uwait(200000); }; }
import { ActionTree, MutationTree, GetterTree } from 'vuex' import { augmentKeys, chainIdHexToNumber } from '~/modules/tools' import { BSC, ETH, IMainChain, Polygon } from '~/constant/chain' import { IAccountInfo } from '~/services/Account' export interface IConnectedAccount { address: string chain: IMainChain, walletName: string } const keys = { namespace: 'me', // mutations setInviter: 'setInviter', setChannel: 'setChannel', setConnectedAccount: 'setConnectedAccount', setLoggedIn: 'setLoggedIn', setRegisteringAccounts: 'setRegisteringAccounts', // actions fetchRegisteringAccounts: 'fetchRegisteringAccounts', // getters computedChainId: 'computedChainId', computedEvmChainId: 'computedEvmChainId' } export const state = () => ({ inviter: '', channel: '', connectedAccount: { address: '' } as IConnectedAccount, loggedIn: false, registeringAccounts: [] as IAccountInfo[], }) export type MeState = ReturnType<typeof state> export const mutations: MutationTree<MeState> = { [keys.setInviter]: (state, inviter: string) => { state.inviter = inviter }, [keys.setChannel]: (state, channel: string) => { state.channel = channel }, [keys.setConnectedAccount]: (state, connectedAccount: IConnectedAccount) => { state.connectedAccount = connectedAccount state.loggedIn = true }, [keys.setLoggedIn]: (state, status: boolean) => { state.loggedIn = status }, [keys.setRegisteringAccounts]: (state, accounts: IAccountInfo[]) => { state.registeringAccounts = accounts } } export const actions: ActionTree<MeState, MeState> = { async [keys.fetchRegisteringAccounts] ({ commit, state, getters }) { if (!state.connectedAccount.address) { return } try { const res = await this.$services.account.registeringAccounts({ chain_type: getters[keys.computedChainId], address: state.connectedAccount.address }) commit(keys.setRegisteringAccounts, res && res.registering_accounts) } catch (err) { console.error(err) throw err } } } export const getters: GetterTree<MeState, MeState> = { [keys.computedChainId] (state): number { const chainId = state.connectedAccount.chain?.chainId if ([BSC.chainId, Polygon.chainId].includes(chainId)) { return ETH.chainId } return chainId }, [keys.computedEvmChainId] (): number { const { ethereum } = window if (typeof ethereum !== 'undefined' && (ethereum.networkVersion || ethereum.chainId)) { return chainIdHexToNumber(ethereum.networkVersion || ethereum.chainId) } else { return 0 } } } export const ME_KEYS = augmentKeys(keys, keys.namespace)
import { Memento } from "./Memento"; class PositionSnapshot { public constructor(public x: number, public y: number) { } public toJSON() { return { x: this.x, y: this.y } } } class Position implements Memento<PositionSnapshot> { public constructor(private _x: number, private _y: number) { } public save(): PositionSnapshot { return new PositionSnapshot(this._x, this._y) } public get x() { return this._x } public get y() { return this._y } public set x(x: number) { this._x = x; } public set y(y: number) { this._y = y; } public restore(snapshot: PositionSnapshot): void { this._x = snapshot.x; this._y = snapshot.y; } public clone(): Position { return new Position(this._x, this._y); } } export { Position, PositionSnapshot }
package com.ifanr.tangzhi.ext import com.google.gson.JsonObject import java.lang.reflect.Constructor private val CONSTRUCTS = mutableMapOf<Class<*>, Constructor<*>>() @Throws(Exception::class) private fun findSuitableConstruct(clz: Class<*>): Constructor<*> { synchronized(CONSTRUCTS) { return CONSTRUCTS.getOrPut(clz) { clz.getConstructor(JsonObject::class.java) } } } fun JsonObject.getSafeString(key: String) = try { get(key).asString ?: "" } catch (e: Exception) { "" } fun JsonObject.getSafeFloat(key: String) = try { get(key).asFloat } catch (e: Exception) { 0f } fun JsonObject.getSafeBoolean(key: String) = try { get(key).asBoolean } catch (e: Exception) { false } fun JsonObject.getSafeStringArray(key: String) = try { get(key).asJsonArray.map { it.asString } } catch (e: Exception) { listOf<String>() } fun <T> JsonObject.getSafeArrayByConstruct(prop: String, clz: Class<T>): List<T> { return try { getAsJsonArray(prop).map { findSuitableConstruct(clz).newInstance(it) as T } } catch (e: Exception) { emptyList() } } fun JsonObject.getSafeLong(key: String) = try { get(key).asLong } catch (e: Exception) { 0L } fun JsonObject.getSafeInt(key: String) = try { get(key).asInt } catch (e: Exception) { 0 }
<?php namespace app\api\model; use think\Model; /** * 用户类 model */ class Member extends Model { public function getUser(){ } public function add( $data ){ $result = $this->save( $data ); if( $result === false ){ return $this->getMessage(); }else{ return $result; } } public function edit(){ $result = $this->save( $data, ['user_id'=>$data['user_id']]); if( $result === false ){ return $this->getMessage(); }else{ return $result; } } }
<?php namespace Drupal\Tests\feeds\Kernel; use Drupal\feeds_test_events\EventSubscriber\FeedsSubscriber; use Drupal\node\Entity\Node; /** * Tests for dispatching feeds events. * * @group feeds */ class FeedsEventsTest extends FeedsKernelTestBase { /** * {@inheritdoc} */ public static $modules = [ 'field', 'node', 'feeds', 'text', 'filter', 'feeds_test_events', ]; /** * Checks the order of event dispatching messages. * * Module feeds_test_events implements all feeds events and stores a message * for each in $GLOBALS['feeds_test_events']. * * @param array $messages * An array of plain-text messages in the order they should appear. */ protected function assertEventSubscriberMessageOrder(array $messages) { $positions = []; foreach ($messages as $message) { // Verify that each message is found and record its position. $position = array_search($message, $GLOBALS['feeds_test_events']); if ($this->assertTrue($position !== FALSE, $message)) { $positions[] = $position; } } // Sort the positions and ensure they remain in the same order. $sorted = $positions; sort($sorted); $this->assertTrue($sorted == $positions, 'The event subscriber messages appear in the correct order.'); } /** * Ensure that the prevalidate event is dispatched at the right moment. */ public function testPrevalidateEvent() { // Create a feed type. Do not map to 'title'. $feed_type = $this->createFeedTypeForCsv(['guid' => 'guid'], [ 'id' => 'my_feed_type', 'mappings' => [ [ 'target' => 'feeds_item', 'map' => ['guid' => 'guid'], 'unique' => ['guid' => TRUE], ], ], ]); // Try to import a feed. $feed = $this->createFeed($feed_type->id(), [ 'source' => $this->resourcesPath() . '/csv/content.csv', ]); $feed->import(); // Ensure that the import failed because of validation errors. $messages = \Drupal::messenger()->all(); $this->assertContains('This value should not be null.', (string) $messages['warning'][0]); $this->assertNodeCount(0); // Clear messages. \Drupal::messenger()->deleteAll(); // Now create a feed type with the same settings. This time, ensure that // \Drupal\feeds_test_events\EventSubscriber::prevalidate() sets a title on // the entity, which it does only for the feed type 'no_title'. $feed_type = $this->createFeedTypeForCsv(['guid' => 'guid'], [ 'id' => 'no_title', 'mappings' => [ [ 'target' => 'feeds_item', 'map' => ['guid' => 'guid'], 'unique' => ['guid' => TRUE], ], ], ]); // Try to import a feed. $feed = $this->createFeed($feed_type->id(), [ 'source' => $this->resourcesPath() . '/csv/content.csv', ]); $feed->import(); // Assert that there are no warnings this time. $messages = \Drupal::messenger()->all(); $this->assertArrayNotHasKey('warning', $messages); // Assert that 2 nodes were created. $this->assertNodeCount(2); // Check title of the first created node. $node = Node::load(1); $this->assertEquals('foo', $node->getTitle()); } /** * Tests skip import on presave feature. */ public function testSkipImportOnPresave() { $feed_type = $this->createFeedTypeForCsv(['guid' => 'guid', 'title' => 'title'], [ 'id' => 'import_skip', ]); // Import feed. $feed = $this->createFeed($feed_type->id(), [ 'source' => $this->resourcesPath() . '/csv/content.csv', ]); $feed->import(); // Assert that only the second item was imported. $this->assertNodeCount(1); $node = Node::load(1); $this->assertEquals('Ut wisi enim ad minim veniam', $node->getTitle()); } /** * Tests the order in which events are dispatched on an import. */ public function testEventDispatchOrderOnImport() { $GLOBALS['feeds_test_events'] = []; $feed_type = $this->createFeedTypeForCsv(['guid' => 'guid', 'title' => 'title']); // Import feed. $feed = $this->createFeed($feed_type->id(), [ 'source' => $this->resourcesPath() . '/csv/content.csv', ]); $feed->import(); $this->assertEventSubscriberMessageOrder([ // Import starts with fetching. FeedsSubscriber::class . '::onInitImport called', FeedsSubscriber::class . '::preFetch called', FeedsSubscriber::class . '::postFetch called', // Second stage is parsing. FeedsSubscriber::class . '::onInitImport called', FeedsSubscriber::class . '::preParse called', FeedsSubscriber::class . '::postParse called', // Third stage is processing, process events occur per item. FeedsSubscriber::class . '::onInitImport called', FeedsSubscriber::class . '::preProcess called', FeedsSubscriber::class . '::prevalidate called', FeedsSubscriber::class . '::preSave called', FeedsSubscriber::class . '::postSave called', FeedsSubscriber::class . '::postProcess called', // Second item being processed. FeedsSubscriber::class . '::onInitImport called', FeedsSubscriber::class . '::preProcess called', FeedsSubscriber::class . '::prevalidate called', FeedsSubscriber::class . '::preSave called', FeedsSubscriber::class . '::postSave called', FeedsSubscriber::class . '::postProcess called', // There are no items to clean, so the clean stage is completely skipped. FeedsSubscriber::class . '::onFinish called', ]); } /** * Tests the order in which events are dispatched on an expire. */ public function testEventDispatchOrderOnExpire() { // Import items first. $feed_type = $this->createFeedTypeForCsv(['guid' => 'guid', 'title' => 'title'], [ 'processor_configuration' => [ 'authorize' => FALSE, 'values' => [ 'type' => 'article', ], 'expire' => 3600, ], ]); $feed = $this->createFeed($feed_type->id(), [ 'source' => $this->resourcesPath() . '/csv/content.csv', ]); $feed->import(); // Set imported time of all imported items to a timestamp in the past so // that they expire. for ($i = 1; $i <= 2; $i++) { $node = Node::load($i); $node->feeds_item->imported = \Drupal::service('datetime.time')->getRequestTime() - 3601; $node->save(); } // Now expire items. $GLOBALS['feeds_test_events'] = []; $feed->startBatchExpire(); $batch =& batch_get(); $batch['progressive'] = FALSE; batch_process(); $this->assertEventSubscriberMessageOrder([ FeedsSubscriber::class . '::onInitExpire called', FeedsSubscriber::class . '::onExpire called', FeedsSubscriber::class . '::onInitExpire called', FeedsSubscriber::class . '::onExpire called', ]); } /** * Tests the order in which events are dispatched when clearing items. */ public function testEventDispatchOrderOnClear() { // Import items first. $feed_type = $this->createFeedTypeForCsv(['guid' => 'guid', 'title' => 'title']); $feed = $this->createFeed($feed_type->id(), [ 'source' => $this->resourcesPath() . '/csv/content.csv', ]); $feed->import(); // Now delete all items using a batch. $GLOBALS['feeds_test_events'] = []; $feed->startBatchClear(); $batch =& batch_get(); $batch['progressive'] = FALSE; batch_process(); $this->assertEventSubscriberMessageOrder([ FeedsSubscriber::class . '::onInitClear called', FeedsSubscriber::class . '::onClear called', ]); } }
# frozen_string_literal: true module Tinybucket module Resource class PullRequests < Base def initialize(repo, options) @repo = repo @args = [options] end # Create a new pull request. # # @todo to be implemented. # @raise [NotImplementedError] to be implemented. def create(_options) raise NotImplementedError end # Get the specific pull request on the repository. # # @param pullrequest_id [String] # @param options [Hash] # @return [Tinybucket::Model::PullRequest] def find(pullrequest_id, options = {}) pull_requests_api.find(pullrequest_id, options).tap do |m| inject_repo_keys(m, @repo.repo_keys) end end # Get activities on the po # # TODO: To be implemented. def activities(_options) raise NotImplementedError end private def pull_requests_api create_api('PullRequests', @repo.repo_keys) end def enumerator create_enumerator(pull_requests_api, :list, *@args) do |m| inject_repo_keys(m, @repo.repo_keys) end end end end end
1 142 150 157 304 2 3 240 4 5 136 6 98 228 7 97 214 294 8 72 259 305 9 148 190 10 202 11 12 189 339 13 348 14 145 15 54 293 329 16 17 45 56 152 170 18 19 198 276 20 24 21 132 218 289 22 23 145 188 253 24 149 302 25 125 314 26 116 178 27 28 270 29 39 283 299 30 148 258 31 1 297 32 51 33 251 306 307 34 182 244 282 289 35 69 75 171 219 324 349 36 11 185 258 37 154 193 250 38 104 137 163 39 40 39 117 247 326 41 44 172 42 25 117 133 43 90 172 323 44 45 46 179 47 69 75 48 49 56 162 50 131 241 272 51 52 7 53 124 54 70 55 283 56 199 277 296 57 52 64 58 60 80 128 59 294 60 134 208 61 2 50 52 84 62 6 63 32 161 64 116 158 265 278 65 129 66 184 67 165 187 68 14 69 247 287 70 71 258 72 122 297 73 74 179 75 89 189 206 76 37 260 77 18 217 78 57 82 244 79 1 42 80 156 305 81 48 134 82 45 70 83 183 248 83 84 55 235 268 85 69 295 86 43 61 87 88 28 89 308 90 337 91 118 328 92 85 232 238 329 93 116 94 158 283 95 96 97 146 348 98 180 306 99 100 101 144 102 11 25 186 103 285 104 92 213 105 194 284 106 147 172 107 34 108 9 109 254 110 111 112 253 265 113 111 114 5 238 115 132 116 228 302 117 82 118 119 120 98 260 121 51 100 258 122 139 168 331 123 1 279 343 124 30 125 328 126 131 215 257 127 31 311 128 111 135 156 170 129 250 130 12 74 279 291 131 64 349 132 130 133 9 50 134 149 183 186 248 135 80 170 223 136 80 251 315 137 128 138 116 143 284 139 231 303 140 231 141 49 142 42 121 180 318 341 143 41 80 289 144 154 283 336 145 7 35 113 146 147 46 202 240 148 27 149 240 324 150 86 232 151 80 157 287 336 152 146 153 82 154 155 91 308 156 60 160 157 51 128 194 158 41 159 247 350 160 161 256 262 162 163 71 164 31 165 325 166 187 300 167 124 266 286 168 52 169 170 171 172 79 282 343 173 193 200 236 174 190 175 176 177 156 343 178 64 84 140 179 44 274 180 211 312 181 95 182 129 183 139 191 184 18 146 310 185 266 186 86 332 187 10 25 76 188 129 332 189 190 56 60 250 191 100 227 192 18 37 193 75 200 222 340 194 63 72 120 204 209 195 196 8 18 197 20 87 198 32 81 111 168 199 200 27 135 201 121 202 301 203 84 288 301 204 77 179 205 30 112 206 20 297 314 207 283 208 209 210 7 56 147 202 211 60 309 212 274 213 75 277 214 175 215 138 210 216 43 78 264 291 217 218 192 234 327 219 72 220 1 244 349 221 162 334 222 97 291 223 341 224 129 310 225 9 228 226 140 335 337 227 285 228 134 229 212 310 230 89 180 254 231 185 253 232 330 233 79 211 250 234 96 252 235 97 328 236 68 224 229 237 82 238 260 239 75 255 240 69 241 241 242 155 243 37 96 300 244 73 79 229 245 62 246 81 243 248 292 247 248 249 84 138 250 12 251 266 274 252 111 253 79 266 268 254 255 18 67 124 166 245 256 114 257 25 45 188 331 258 154 259 260 261 262 133 263 145 286 264 265 266 80 262 274 267 89 111 268 165 256 279 319 269 270 271 3 312 272 191 273 76 292 274 97 275 190 208 276 134 256 277 191 278 279 57 280 14 185 343 281 76 254 271 282 48 283 284 21 62 93 285 259 277 286 192 287 253 288 108 200 322 335 289 202 290 232 291 299 292 145 257 293 93 294 295 296 297 125 163 309 298 223 299 45 341 300 301 42 247 323 302 303 66 74 304 343 305 160 306 62 317 322 343 307 308 309 28 310 172 210 329 311 78 181 312 85 146 313 249 322 314 315 8 263 316 13 51 330 317 38 318 319 100 320 321 322 129 150 204 287 323 3 23 182 349 324 129 340 325 16 197 265 269 326 70 327 79 144 205 252 305 328 329 217 313 330 117 266 331 48 332 276 333 217 320 334 168 301 335 37 336 155 237 346 337 4 132 261 338 339 82 178 340 21 209 341 342 325 343 344 101 345 159 260 346 102 169 347 32 348 7 84 202 349 6 152 350
module HandlePolicyNotification class BrokerDetails include Virtus.model attribute :npn, String def found_broker @found_broker ||= Broker.by_npn(npn).first end end end
package com.raphtory.algorithms import com.raphtory.core.model.algorithm.{GraphAlgorithm, GraphPerspective, Row} import scala.collection.mutable /** Description This algorithm will return the two hop neighbours of each node in the graph. If the user provides a node ID, then it will only return the two hop neighbours of that node. 1. In the first step the node messages all its neighbours, saying that it is asking for a two-hop analysis. 2. Each vertex, starting from a triangle count of zero, looks at the lists of ID requests it has received, it then finds all of its neighbours and replies to the node in the form (response, neighbour, me). 3. The requester compiles these into a list of results Parameters node (String) : The node ID to start with. If not specified, then this is run for all nodes. output (String) : The path where the output will be saved. If not specified, defaults to /tmp/twoHopNeighbour Returns ID (Long) : Vertex ID Triangle Count (Long) : Number of triangles Warning As this sends alot of messages between nodes, running this for the entire graph with a large number of iterations may cause you to run out of memory. Therefore it is most optimal to run with a select node at a time. The number of iterations makes a difference to ensure all messages have been read. **/ class twoHopNeighbour(nodeID:Long = -1, output: String = "/tmp/twoHopNeighbour") extends GraphAlgorithm { override def algorithm(graph: GraphPerspective): Unit = { graph .step( vertex => if (nodeID == -1 || vertex.ID() == nodeID){ vertex.getEdges().foreach(edge => edge.send(("twoHopRequest", vertex.ID, 0))) } ) .iterate( { vertex => val newMessages = vertex.messageQueue[(String, Long, Long)] val requests = newMessages.distinct.filter(_._1 == "twoHopRequest") val responses = newMessages.distinct.filter(_._1 == "twoHopResponse") if (requests.nonEmpty) { vertex.getAllNeighbours().foreach { neighbour => requests.foreach(msg => if (msg._2 != neighbour) { vertex.messageNeighbour(msg._2, ("twoHopResponse", neighbour, vertex.ID)) }) } } if (responses.nonEmpty) { vertex.setState("twoHopResponse", true) var twoHops = vertex.getOrSetState("twoHops", mutable.ListBuffer[(Long, Long)]()) responses.foreach(response => twoHops.append((response._2, response._3))) twoHops = twoHops.distinct vertex.setState("twoHops", twoHops) } }, 25, true ) .select(vertex => Row( vertex.getStateOrElse("twoHopResponse", false), vertex.ID(), vertex.getStateOrElse("twoHops", "") ) ) .filter( row => row.get(0) == true) .explode( row => row.get(2).asInstanceOf[mutable.ListBuffer[(Long, Long)]] .toList.map(hops => Row(row.get(1), hops._1, hops._2) ) ) .writeTo(output) } } object twoHopNeighbour { def apply(nodeID: Long= -1, output: String= "/tmp/twoHopNeighbour") = new twoHopNeighbour(nodeID, output) }
package lila package game import game._ class FeaturedTest extends LilaSpec { import Featured._ "Featured" should { "box 0 to 1" in { foreach(List( 0f -> 0f, 1f -> 1f, 0.5f -> 0.5f, 0.9f -> 0.9f, -1f -> 0f, 2f -> 1f)) { case (a, b) ⇒ box(0 to 1)(a) must_== b } } "box 1200 to 2000" in { foreach(List( 1200f -> 0f, 2000f -> 1f, 1600f -> 0.5f, 1900f -> 0.875f, -1f -> 0f, 800f -> 0f, 2200f -> 1f)) { case (a, b) ⇒ box(1200 to 2000)(a) must_== b } } val game1 = DbGame( game = chess.Game(chess.Variant.default), whitePlayer = DbPlayer.white.copy(elo = 1600.some), blackPlayer = DbPlayer.black, ai = None, creatorColor = chess.Color.White, mode = chess.Mode.default, variant = chess.Variant.default) val game2 = game1.copy( clock = chess.Clock(180,0).some, turns = 11) val game3 = game1.copy( clock = chess.Clock(60,0).some, turns = 21) val games = List(game1, game2, game3) "elo" in { "game1 white" in { eloHeuristic(chess.Color.White)(game1) must_== 0.6f } "game1 black" in { eloHeuristic(chess.Color.Black)(game1) must_== 0f } } "speed" in { "game1" in { speedHeuristic(game1) must_== 0 } "game2" in { speedHeuristic(game2) must_== 0.5f } "game3" in { speedHeuristic(game3) must_== 1f } } "progress" in { "game1" in { progressHeuristic(game1) must_== 1f } "game2" in { progressHeuristic(game2) must_== 0.5f } "game3" in { progressHeuristic(game3) must_== 0f } } "score" in { "game1" in { score(game1) must_== 0.6f + 0f + 1f * 0.5f } "game2" in { score(game2) must_== 0.6f + 0.5f + 0.5f * 0.5f } "game3" in { score(game3) must_== 0.6f + 1f + 0f * 0.5f } } "best" in { "3 games" in { best(games) must_== game3.some } "3 games reversed" in { best(games.reverse) must_== game3.some } } } }
# [Efficient Comparison](https://app.codesignal.com/arcade/python-arcade/meet-python/NWtSkp4Gd8ZeKc5R5/)
# Deallocate Firewall Login-AzAccount $resourceGroup = 'rg-alias-region-networking' $firewallName = 'fw-alias-region-01' $firewall = Get-AzFirewall -Name $firewallName -ResourceGroupName $resourceGroup $firewall.Deallocate() Set-AzFirewall -AzureFirewall $firewall # Allocate Firewall Login-AzAccount $resourceGroup = 'rg-alias-region-networking' $firewallName = 'fw-alias-region-01' $firewall = Get-AzFirewall -Name $firewallName -ResourceGroupName $resourceGroup $virtualNetwork01Name = 'vnet-alias-region-01' $virtualNetwork = Get-AzVirtualNetwork -ResourceGroupName $resourceGroup -Name $virtualNetwork01Name $firewallPublicIPAddressName = 'pip-alias-region-fw01' $publicIPAddress = Get-AzPublicIpAddress -Name $firewallPublicIPAddressName -ResourceGroupName $resourceGroup $firewall.Allocate($virtualNetwork,$publicIPAddress) Set-AzFirewall -AzureFirewall $firewall
/***************************************************************************** * Copyright (C) 2003-2010 PEAK System-Technik GmbH * * linux@peak-system.com * www.peak-system.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Maintainer: Stephane Grosjean (s.grosjean@peak-system.com) * * Major contributions by: * Oliver Hartkopp (oliver.hartkopp@volkswagen.de) socketCAN * Klaus Hitschler (klaus.hitschler@gmx.de) * * Contributions: Philipp Baer (philipp.baer@informatik.uni-ulm.de) * Tom Heinrich * John Privitera (JohnPrivitera@dciautomation.com) *****************************************************************************/ /***************************************************************************** * * pcan_usb_core.c - the outer usb parts for pcan-usb and pcan-usb-pro support * * $Id: pcan_usb_core.c 626 2010-06-16 21:37:49Z khitschler $ * *****************************************************************************/ /* #define DEBUG */ /* #undef DEBUG */ #include "src/pcan_common.h" /* must always be the 1st include */ #ifdef USB_SUPPORT #include <linux/stddef.h> /* NULL */ #include <linux/errno.h> #include <linux/slab.h> /* pcan_malloc() */ #include <linux/usb.h> #include <linux/net.h> #include "src/pcan_main.h" #include "src/pcan_fops.h" #include "src/pcan_usb_core.h" #include "src/pcan_usb.h" #include "src/pcan_usbpro.h" #include "src/pcanfd_usb.h" #include "src/pcanfd_core.h" #include "src/pcan_filter.h" #ifdef NETDEV_SUPPORT #include "src/pcan_netdev.h" // for hotplug pcan_netdev_(un)register() #endif #ifdef DEBUG #define PCAN_USB_DEBUG_WRITE #define PCAN_USB_DEBUG_DECODE #else //#define PCAN_USB_DEBUG_WRITE //#define PCAN_USB_DEBUG_DECODE #endif #define PCAN_USB_VENDOR_ID 0x0c72 #define PCAN_USB_PRODUCT_ID 0x000c #define PCAN_USBPRO_PRODUCT_ID 0x000d #define PCAN_USB_READ_BUFFER_SIZE_OLD 64 /* used len for PCAN-USB rev < 6*/ #define PCAN_USB_READ_BUFFER_SIZE 1024 /* buffer for read URB data (IN) */ #define PCAN_USB_READ_PACKET_SIZE 64 /* always 64 (USB1 device) */ #define PCAN_USB_WRITE_BUFFER_SIZE_OLD 64 // length for PCAN-USB rev < 6 //#define PCAN_USB_WRITE_BUFFER_SIZE 128 // buffer for write URB (OUT) #define PCAN_USB_WRITE_BUFFER_SIZE 256 // ...says Win driver #define PCAN_USB_WRITE_PACKET_SIZE 64 // always 64 (USB1 device) /* Defines the size of one USB message that can be received from the device * Driver allocates one buffer of n x PCAN_USBPRO_READ_BUFFER_SIZE to handle * consecutive reads */ //#define PCAN_USBPRO_READ_BUFFER_SIZE 1024 #define PCAN_USBPRO_READ_BUFFER_SIZE 2048 //#define PCAN_USBPRO_READ_BUFFER_SIZE 4096 #define MAX_CYCLES_TO_WAIT_FOR_RELEASE 100 /* max schedules before release */ /* wait this time in seconds at startup to get first messages */ #define STARTUP_WAIT_TIME 0.01 static struct usb_device_id pcan_usb_ids[] = { { USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID) }, { USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID) }, { USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID) }, { USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID) }, { USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID) }, #ifdef PCAN_USBX6_PRODUCT_ID { USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID) }, #endif { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, pcan_usb_ids); #ifndef PCAN_USB_DONT_REGISTER_DEV static struct usb_class_driver pcan_class = { .name = "pcanusb%d", .fops = &pcan_fops, #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) .mode = S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, #endif .minor_base = PCAN_USB_MINOR_BASE, }; #endif static int usb_devices = 0; /* the number of accepted usb_devices */ /* this function is global for USB adapters */ struct pcan_usb_interface *pcan_usb_get_if(struct pcandev *pdev) { #ifdef PCAN_USBX6_PRODUCT_ID return pdev->port.usb.usb_if; #else return (struct pcan_usb_interface *)pdev->adapter; #endif } /* forward declaration for chardev pcan_usb_write_notitfy() */ static int pcan_usb_write(struct pcandev *dev, struct pcan_udata *ctx); static void pcan_usb_write_notify(struct urb *purb, struct pt_regs *pregs) { struct pcandev *dev = purb->context; struct pcan_usb_interface *usb_if = pcan_usb_get_if(dev); pcan_lock_irqsave_ctxt lck_ctx; int err = purb->status; #ifdef PCAN_USB_DEBUG_WRITE printk(KERN_INFO "%s(): status=%d actual_length=%d\n", __func__, purb->status, purb->actual_length); #endif #if 1 /* SGr: useful? */ #else if (!usb_if) { pr_info(DEVICE_NAME "%s(%u): usb_if=NULL\n", __func__, __LINE__); return; } #endif /* un-register outstanding urb */ atomic_dec(&usb_if->active_urbs); /* don't count interrupts - count packets */ dev->dwInterruptCounter++; pcan_lock_get_irqsave(&dev->isr_lock, lck_ctx); switch (err) { case 0: dev->tx_frames_counter++; err = pcan_usb_write(dev, NULL); if (!err) break; #ifdef PCAN_USES_OLD_TX_ENGINE_STATE /* engine stopped */ atomic_set(&dev->tx_engine_state, TX_ENGINE_STOPPED); #else /* done by pcan_usb_write() */ //dev->locked_tx_engine_state = TX_ENGINE_STOPPED; #endif if (err == -ENODATA) { /* signal I'm ready to write again */ pcan_event_signal(&dev->out_event); #ifdef NETDEV_SUPPORT netif_wake_queue(dev->netdev); #endif break; } dev->nLastError = err; /* build the error frame and put it into Rx FIFO */ if (!(dev->wCANStatus & CAN_ERR_QXMTFULL)) { struct pcanfd_msg ef; pcan_handle_error_ctrl(dev, &ef, PCANFD_TX_OVERFLOW); if (pcan_xxxdev_rx(dev, &ef) > 0) pcan_event_signal(&dev->in_event); } break; default: /* case -ECONNRESET: case -ESHUTDOWN: */ pr_err(DEVICE_NAME ": %s(%u): USB abnormal err %d\n", __func__, __LINE__, err); case -ENOENT: /* urb killed */ /* engine stopped */ pcan_set_tx_engine(dev, TX_ENGINE_STOPPED); break; } pcan_lock_put_irqrestore(&dev->isr_lock, lck_ctx); } static void pcan_usb_read_notify(struct urb *purb, struct pt_regs *pregs) { struct pcan_usb_interface *usb_if = purb->context; struct pcandev *dev; u8 *read_buffer_addr = purb->transfer_buffer; const int read_buffer_len = purb->actual_length; int read_buffer_size; int err, d; #if 0 DPRINTK(KERN_DEBUG "%s: %s() status=%d\n", DEVICE_NAME, __func__, purb->status); #endif /* un-register outstanding urb */ atomic_dec(&usb_if->active_urbs); /* do interleaving read, stop with first error */ switch (purb->status) { case 0: break; case -ECONNRESET: /* usb_unlink_urb() called */ case -ENOENT: /* urb killed */ case -EPIPE: DPRINTK(KERN_DEBUG "%s: read data stream turned off (err %d)\n", DEVICE_NAME, purb->status); return; #if 0 /* here also are cases that have been seen to occur */ case -EOVERFLOW: #endif /* error codes when USB device is hot unplugged */ case -ESHUTDOWN: /* the ep is being disabled */ case -EPROTO: case -EILSEQ: default: pr_err("%s: unhandled read data stream turned off (err %d)\n", DEVICE_NAME, purb->status); /* "unplug" all devices of the same USB adapter */ dev = usb_if->dev; for (d = 0; d < usb_if->can_count; dev++, d++) { /* seems that this is the most reasonable thing to do * most of the times... */ dev->ucPhysicallyInstalled = 0; /* unlock any waiting tasks */ if (dev->nOpenPaths > 0) { pcan_event_signal(&dev->out_event); pcan_event_signal(&dev->in_event); } } return; } /* buffer interleave to increase speed */ if (read_buffer_addr == usb_if->read_buffer_addr[0]) { FILL_BULK_URB(purb, usb_if->usb_dev, usb_rcvbulkpipe(usb_if->usb_dev, usb_if->pipe_read.ucNumber), usb_if->read_buffer_addr[1], usb_if->read_buffer_size, pcan_usb_read_notify, usb_if); } else { FILL_BULK_URB(purb, usb_if->usb_dev, usb_rcvbulkpipe(usb_if->usb_dev, usb_if->pipe_read.ucNumber), usb_if->read_buffer_addr[0], usb_if->read_buffer_size, pcan_usb_read_notify, usb_if); } /* start next urb */ err = __usb_submit_urb(purb); if (err) { pr_err("%s: %s() URB submit failure %d\n", DEVICE_NAME, __func__, err); } else { atomic_inc(&usb_if->active_urbs); } /* decoding the received one */ #ifdef PCAN_USB_DEBUG_DECODE pr_info("%s: got %u bytes URB, decoding it by packets of %u bytes:\n", DEVICE_NAME, read_buffer_len, usb_if->read_packet_size); #endif for (read_buffer_size = 0; read_buffer_size < read_buffer_len; ) { #ifdef PCAN_USB_DEBUG_DECODE pr_info("%s: decoding @offset %u:\n", DEVICE_NAME, read_buffer_size); #endif err = usb_if->device_msg_decode(usb_if, read_buffer_addr, usb_if->read_packet_size); if (err < 0) { #ifdef PCAN_USB_DEBUG_DECODE if (net_ratelimit()) pr_err("%s: offset %d: msg decoding error %d\n", DEVICE_NAME, read_buffer_size, err); #endif /* no need to continue because error can be: * - not enough space in rx fifo * - decoding is out of sync. */ break; } read_buffer_addr += usb_if->read_packet_size; read_buffer_size += usb_if->read_packet_size; } } /* USB write functions */ static int pcan_usb_write(struct pcandev *dev, struct pcan_udata *ctx) { struct pcan_usb_interface *usb_if = pcan_usb_get_if(dev); USB_PORT *u = &dev->port.usb; int err = 0; u8 *write_buffer_addr = u->write_buffer_addr; int write_packet_size; int write_buffer_size; #ifdef PCAN_USES_OLD_TX_ENGINE_STATE pcan_lock_irqsave_ctxt lck_ctx; #endif #if 0 DPRINTK(KERN_DEBUG "%s: %s(CAN%u)\n", DEVICE_NAME, __func__, dev->nChannel+1); #endif /* don't do anything with non-existent hardware */ if (!dev->ucPhysicallyInstalled) return -ENODEV; #ifdef PCAN_USES_OLD_TX_ENGINE_STATE pcan_lock_get_irqsave(&dev->wlock, lck_ctx); #endif for (write_buffer_size=0; write_buffer_size < u->write_buffer_size; ) { write_packet_size = u->write_packet_size; err = usb_if->device_ctrl_msg_encode(dev, write_buffer_addr, &write_packet_size); if (err >= 0) { write_buffer_size += u->write_packet_size; write_buffer_addr += u->write_packet_size; #ifdef PCAN_USB_DEBUG_WRITE printk(KERN_INFO "%s: encoded %u bytes in %u bytes packet\n", DEVICE_NAME, write_packet_size, u->write_packet_size); #endif continue; } #ifdef PCAN_USB_DEBUG_WRITE printk(KERN_INFO "%s: err=%d: total=%u/%u\n", DEVICE_NAME, err, write_buffer_size, u->write_buffer_size); #endif switch (err) { case -ENODATA: write_buffer_size += write_packet_size; break; case -ENOSPC: write_buffer_size += write_packet_size; err = 0; break; default: break; } break; } if (write_buffer_size > 0) { #ifdef PCAN_USB_DEBUG_WRITE dump_mem("message sent to device", u->write_buffer_addr, write_buffer_size); printk(KERN_INFO "%s: submitting %u bytes buffer to usb EP#%d\n", DEVICE_NAME, write_buffer_size, u->pipe_write.ucNumber); #endif FILL_BULK_URB(&u->write_data, usb_if->usb_dev, usb_sndbulkpipe(usb_if->usb_dev, u->pipe_write.ucNumber), u->write_buffer_addr, write_buffer_size, pcan_usb_write_notify, dev); /* remember the USB device is BUSY */ pcan_set_tx_engine(dev, TX_ENGINE_STARTED); /* start next urb */ err = __usb_submit_urb(&u->write_data); if (err) { dev->nLastError = err; dev->dwErrorCounter++; printk(KERN_ERR "%s: %s() URB submit failure %d\n", DEVICE_NAME, __func__, err); } else { //dev->wCANStatus &= ~CAN_ERR_QXMTFULL; pcan_clear_status_bit(dev, CAN_ERR_QXMTFULL); atomic_inc(&usb_if->active_urbs); } } #ifdef PCAN_USES_OLD_TX_ENGINE_STATE pcan_lock_put_irqrestore(&dev->wlock, lck_ctx); #else if (err && (err != -EBUSY)) pcan_set_tx_engine(dev, TX_ENGINE_STOPPED); #endif return err; } static void pcan_usb_free_resources(struct pcan_usb_interface *usb_if) { struct pcandev *dev = &usb_if->dev[0]; USB_PORT *u; int c; for (c = 0; c < usb_if->can_count; c++, dev++) { u = &dev->port.usb; pcan_free(u->write_buffer_addr); pcan_free(u->cout_baddr); } pcan_free(usb_if->read_buffer_addr[0]); } /* usb resource allocation */ static int pcan_usb_alloc_resources(struct pcan_usb_interface *usb_if) { struct pcandev *dev; USB_PORT *u; int err = 0; int c; DPRINTK(KERN_DEBUG "%s: %s()\n", DEVICE_NAME, __func__); /* make param URB */ #ifndef PCAN_USB_CMD_PER_DEV usb_init_urb(&usb_if->urb_cmd_async); #endif usb_init_urb(&usb_if->urb_cmd_sync); /* allocate write buffer * Check revision according to device id. */ switch (le16_to_cpu(usb_if->usb_dev->descriptor.idProduct)) { case PCAN_USBFD_PRODUCT_ID: case PCAN_USBCHIP_PRODUCT_ID: usb_if->read_packet_size = 4096; usb_if->dev[0].port.usb.write_packet_size = 512; usb_if->read_buffer_size = usb_if->read_packet_size; usb_if->dev[0].port.usb.write_buffer_size = usb_if->dev[0].port.usb.write_packet_size; usb_if->dev[0].port.usb.cout_bsize = 512; break; case PCAN_USBPROFD_PRODUCT_ID: usb_if->read_packet_size = 4096; usb_if->dev[0].port.usb.write_packet_size = usb_if->dev[1].port.usb.write_packet_size = 512; usb_if->read_buffer_size = usb_if->read_packet_size; usb_if->dev[0].port.usb.write_buffer_size = usb_if->dev[0].port.usb.write_packet_size; usb_if->dev[1].port.usb.write_buffer_size = usb_if->dev[1].port.usb.write_packet_size; usb_if->dev[0].port.usb.cout_bsize = usb_if->dev[1].port.usb.cout_bsize = 512; break; #ifdef PCAN_USBX6_PRODUCT_ID case PCAN_USBX6_PRODUCT_ID: usb_if->read_packet_size = 4096; usb_if->dev[0].port.usb.write_packet_size = usb_if->dev[1].port.usb.write_packet_size = 512; usb_if->read_buffer_size = usb_if->read_packet_size; usb_if->dev[0].port.usb.write_buffer_size = usb_if->dev[0].port.usb.write_packet_size; usb_if->dev[1].port.usb.write_buffer_size = usb_if->dev[1].port.usb.write_packet_size; usb_if->dev[0].port.usb.cout_bsize = usb_if->dev[1].port.usb.cout_bsize = 512; break; #endif case PCAN_USBPRO_PRODUCT_ID: /* Rev 0x00 */ /* Copied from Win32 Driver: * DeviceContext->IsDeviceHighSpeed ? 512 : 64 * 512 bytes packet size leads to fragmentation issue while * messages are 1024 bytes large */ if (usb_if->usb_dev->speed == USB_SPEED_HIGH) { usb_if->read_packet_size = 1024; usb_if->dev[0].port.usb.write_packet_size = usb_if->dev[1].port.usb.write_packet_size = 512; } else { usb_if->read_packet_size = 64; usb_if->dev[0].port.usb.write_packet_size = usb_if->dev[1].port.usb.write_packet_size = 64; } usb_if->dev[0].port.usb.cout_bsize = PCAN_USB_WRITE_PACKET_SIZE; usb_if->dev[1].port.usb.cout_bsize = PCAN_USB_WRITE_PACKET_SIZE; #ifdef PCAN_USBPRO_READ_BUFFER_SIZE usb_if->read_buffer_size = PCAN_USBPRO_READ_BUFFER_SIZE; #else usb_if->read_buffer_size = usb_if->read_packet_size; #endif #ifdef PCAN_USBPRO_WRITE_BUFFER_SIZE usb_if->dev[0].port.usb.write_buffer_size = PCAN_USBPRO_WRITE_BUFFER_SIZE; usb_if->dev[1].port.usb.write_buffer_size = PCAN_USBPRO_WRITE_BUFFER_SIZE; #else usb_if->dev[0].port.usb.write_buffer_size = usb_if->dev[0].port.usb.write_packet_size; usb_if->dev[1].port.usb.write_buffer_size = usb_if->dev[1].port.usb.write_packet_size; #endif break; case PCAN_USB_PRODUCT_ID: usb_if->dev[0].port.usb.cout_bsize = PCAN_USB_WRITE_PACKET_SIZE; if (usb_if->ucRevision >= 7) { usb_if->read_buffer_size = PCAN_USB_READ_BUFFER_SIZE; usb_if->dev[0].port.usb.write_buffer_size = PCAN_USB_WRITE_BUFFER_SIZE; usb_if->read_packet_size = PCAN_USB_READ_PACKET_SIZE; usb_if->dev[0].port.usb.write_packet_size = PCAN_USB_WRITE_PACKET_SIZE; break; } default: usb_if->read_buffer_size = PCAN_USB_READ_BUFFER_SIZE_OLD; usb_if->dev[0].port.usb.write_buffer_size = PCAN_USB_WRITE_BUFFER_SIZE_OLD; usb_if->read_packet_size = PCAN_USB_READ_PACKET_SIZE; usb_if->dev[0].port.usb.write_packet_size = PCAN_USB_WRITE_PACKET_SIZE; break; } dev = &usb_if->dev[0]; for (c = 0; c < usb_if->can_count; c++, dev++) { u = &dev->port.usb; #ifdef PCAN_USB_CMD_PER_DEV /* make param URB */ usb_init_urb(&u->urb_cmd_sync); usb_init_urb(&u->urb_cmd_async); #endif u->write_buffer_addr = pcan_malloc(u->write_buffer_size, GFP_KERNEL); if (!u->write_buffer_addr) { err = -ENOMEM; goto fail; } DPRINTK(KERN_DEBUG "%s: %s() allocate %d bytes buffer for writing\n", DEVICE_NAME, __func__, u->write_buffer_size); /* make write urb */ usb_init_urb(&u->write_data); if (u->cout_bsize) { u->cout_baddr = pcan_malloc(u->cout_bsize, GFP_KERNEL); if (!u->cout_baddr) { err = -ENOMEM; goto fail; } } else { u->cout_baddr = NULL; } } /* allocate two read buffers for URB */ usb_if->read_buffer_addr[0] = pcan_malloc(usb_if->read_buffer_size * 2, GFP_KERNEL); if (!usb_if->read_buffer_addr[0]) { err = -ENOMEM; goto fail; } DPRINTK(KERN_DEBUG "%s: %s() allocate %d buffers of %d bytes for reading\n", DEVICE_NAME, __func__, 2, usb_if->read_buffer_size); usb_if->read_buffer_addr[1] = usb_if->read_buffer_addr[0] + usb_if->read_buffer_size; /* make read urb */ usb_init_urb(&usb_if->read_data); fail: return err; } static int pcan_kill_sync_urb(struct urb *urb) { int err = 0; if (urb->status == -EINPROGRESS) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) usb_kill_urb(urb); #else err = usb_unlink_urb(urb); #endif DPRINTK(KERN_DEBUG "%s: %s() done...\n", DEVICE_NAME, __func__); } return err; } static int pcan_usb_stop(struct pcandev *dev) { struct pcan_usb_interface *usb_if = pcan_usb_get_if(dev); USB_PORT *u = &dev->port.usb; int err = 0; DPRINTK(KERN_DEBUG "%s: %s(CAN%u), minor=%d\n", DEVICE_NAME, __func__, dev->nChannel+1, dev->nMinor); #if 0 if (!usb_if) { pr_info(DEVICE_NAME "%s(%u): usb_if=NULL\n", __func__, __LINE__); return -ENODEV; } #endif if (!(dev->flags & PCAN_DEV_OPENED)) return 0; if (usb_if->device_ctrl_close) err = usb_if->device_ctrl_close(dev); if (usb_if->opened_count > 0) usb_if->opened_count--; /* unlink URBs for device/controller */ pcan_kill_sync_urb(&u->write_data); DPRINTK(KERN_DEBUG "%s: have still %d active URBs on interface\n", DEVICE_NAME, atomic_read(&usb_if->active_urbs)); return usb_if->device_ctrl_set_bus_off(dev); } /* remove device resources */ static int pcan_usb_cleanup(struct pcandev *dev) { if (dev) { USB_PORT *u = &dev->port.usb; DPRINTK(KERN_DEBUG "%s: %s(CAN%u): wInitStep=%d\n", DEVICE_NAME, __func__, dev->nChannel+1, dev->wInitStep); pcan_free(u->write_buffer_addr); u->write_buffer_addr = NULL; switch(dev->wInitStep) { case 4: dev->ucPhysicallyInstalled = 0; #if 1 /* Hem... These events are normally "destroyed" when * the device was closed... ("normally" because the * 'pcan_event' is in fact not destroyed nor deleted * when running in non-RT... */ #else /* New: unlock any waiting task */ pcan_event_signal(&dev->out_event); pcan_event_signal(&dev->in_event); #endif #ifdef NETDEV_SUPPORT pcan_netdev_unregister(dev); #endif case 3: usb_devices--; case 2: pcan_dev_remove_from_list(dev); case 1: case 0: dev->filter = pcan_delete_filter_chain(dev->filter); } } else { DPRINTK(KERN_DEBUG "%s: %s(NULL dev)\n", DEVICE_NAME, __func__); } return 0; } #if 0 /* dummy entries for request and free irq */ static int pcan_usb_req_irq(struct pcandev *dev, struct pcan_udata *dev_priv) { return 0; } #endif static void pcan_usb_free_irq(struct pcandev *dev, struct pcan_udata *dev_priv) { DPRINTK(KERN_DEBUG "%s: %s()\n", DEVICE_NAME, __func__); /* mis-used here for another purpose * pcan_usb_free_irq() calls when the last path to device just closing * and the device itself is already plugged out */ if ((dev) && (!dev->ucPhysicallyInstalled)) pcan_usb_cleanup(dev); } /* interface depended open and close */ static int pcan_usb_open(struct pcandev *dev) { DPRINTK(KERN_DEBUG "%s: %s(), minor = %d.\n", DEVICE_NAME, __func__, dev->nMinor); return 0; } static int pcan_usb_release(struct pcandev *dev) { DPRINTK(KERN_DEBUG "%s: %s(), minor = %d.\n", DEVICE_NAME, __func__, dev->nMinor); return 0; } static int pcan_usb_device_open_fd(struct pcandev *dev, struct pcanfd_init *pfdi) { struct pcan_usb_interface *usb_if = pcan_usb_get_if(dev); USB_PORT *u = &dev->port.usb; int err = 0; DPRINTK(KERN_DEBUG "%s: %s(), minor = %d. (nOpenPaths=%d)\n", DEVICE_NAME, __func__, dev->nMinor, dev->nOpenPaths); #if 1 /* TODO SGr 20160324: I think the below test is useless, since the * "device_open()" callabck is called *ONLY* once, when * "dev->nOpenPaths" is 0... */ #else /* in general, when second open() occurs * remove and unlink urbs, when interface is already running */ if ((dev->nOpenPaths) && (dev->device_release)) dev->device_release(dev); else #endif /* otherwise, first action: turn CAN off */ if ((err = usb_if->device_ctrl_set_bus_off(dev))) goto fail; memset(&u->usb_time, '\0', sizeof(PCAN_USB_TIME)); /* init hardware specific parts */ if (usb_if->device_ctrl_open_fd) { err = usb_if->device_ctrl_open_fd(dev, pfdi); } else { TPCANInit init; pcan_fd_to_init(&init, pfdi); err = usb_if->device_ctrl_open(dev, init.wBTR0BTR1, init.ucCANMsgType & MSGTYPE_EXTENDED, init.ucListenOnly); } if (err) goto fail; usb_if->opened_count++; /* last action: turn CAN on */ if ((err = usb_if->device_ctrl_set_bus_on(dev))) goto fail; /* delay to get first messages read */ set_current_state(TASK_INTERRUPTIBLE); schedule_timeout((int)(STARTUP_WAIT_TIME * HZ + 0.9)); fail: return err; } /* emulated device access functions * call is only possible if device exists */ static int pcan_usb_device_open(struct pcandev *dev, uint16_t btr0btr1, u8 bExtended, u8 bListenOnly) { struct pcanfd_init fd_init; TPCANInit init = { .wBTR0BTR1 = btr0btr1, .ucCANMsgType = bExtended ? MSGTYPE_EXTENDED : MSGTYPE_STANDARD, .ucListenOnly = bListenOnly }; return pcan_usb_device_open_fd(dev, pcan_init_to_fd(&fd_init, &init)); } static void pcan_usb_device_release(struct pcandev *dev) { DPRINTK(KERN_DEBUG "%s: %s(), minor=%d (nOpenPaths=%d).\n", DEVICE_NAME, __func__, dev->nMinor, dev->nOpenPaths); /* test only mdelay(100); */ pcan_usb_stop(dev); } /* get or set special device related parameters */ static int pcan_usb_device_params(struct pcandev *dev, TPEXTRAPARAMS *params) { struct pcan_usb_interface *usb_if = pcan_usb_get_if(dev); USB_PORT *u = &dev->port.usb; int err; DPRINTK(KERN_DEBUG "%s: %s(%d)\n", DEVICE_NAME, __func__, params->nSubFunction); switch (params->nSubFunction) { case SF_GET_SERIALNUMBER: err = usb_if->device_get_snr(usb_if, &params->func.dwSerialNumber); break; case SF_GET_HCDEVICENO: /* can cast to u32 * since "func" is an union with * dwSerialNumber */ err = usb_if->device_ctrl_get_dnr(dev, //(u32 *)&params->func.ucHCDeviceNo); &params->func.dwSerialNumber); break; case SF_SET_HCDEVICENO: /* * err = usb_if->device_ctrl_set_dnr(dev, * params->func.ucHCDeviceNo); */ err = usb_if->device_ctrl_set_dnr(dev, params->func.dwSerialNumber); /* Should update dev object cache with new value * (see /dev/pcan display)*/ if (!err) { u->ucHardcodedDevNr = params->func.ucHCDeviceNo; #if 1 /* why not caching full 32b value in device_alt_num? */ dev->device_alt_num = params->func.dwSerialNumber; #else dev->device_alt_num = u->ucHardcodedDevNr; #endif } break; default: DPRINTK(KERN_DEBUG "%s: Unknown sub-function %d!\n", DEVICE_NAME, params->nSubFunction); return -EINVAL; } return err; } /* things to do after plugin or plugout of device (and power on too) */ #ifndef PCAN_USB_PCAN_SYSFS #define PCAN_DEVICE_ATTR(_v, _name, _show) \ struct device_attribute pcan_dev_attr_##_v = \ __ATTR(_name, S_IRUGO, _show, NULL) static struct pcandev *to_pcandev(struct device *dev, int c) { struct usb_interface *interface = to_usb_interface(dev->parent); struct pcan_usb_interface *usb_if = usb_get_intfdata(interface); return (struct pcandev *)&usb_if->dev[c]; } static ssize_t show_pcan_x_hwtype(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return show_u32(buf, to_pcandev(dev, can_idx)->wType); } static ssize_t show_pcan_x_devid(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return show_u32(buf, to_pcandev(dev, can_idx)->device_alt_num); } static ssize_t show_pcan_x_minor(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return show_int(buf, to_pcandev(dev, can_idx)->nMinor); } static ssize_t show_pcan_x_ctrl_number(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return show_int(buf, to_pcandev(dev, can_idx)->nChannel); } static ssize_t show_pcan_x_bitrate(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return show_u32(buf, to_pcandev(dev, can_idx)->init_settings.nominal.bitrate); } static ssize_t show_pcan_x_clock(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return show_u32(buf, to_pcandev(dev, can_idx)->init_settings.clock_Hz); } static ssize_t show_pcan_x_bus_state(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return show_u32(buf, to_pcandev(dev, can_idx)->bus_state); } static ssize_t show_pcan_x_rx_err_cnt(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return show_u32(buf, to_pcandev(dev, can_idx)->rx_error_counter); } static ssize_t show_pcan_x_tx_err_cnt(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return show_u32(buf, to_pcandev(dev, can_idx)->tx_error_counter); } static ssize_t show_pcan_x_bus_load(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return show_u32(buf, to_pcandev(dev, can_idx)->bus_load); } static ssize_t show_pcan_x_dbitrate(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return show_u32(buf, to_pcandev(dev, can_idx)->init_settings.data.bitrate); } /* /proc/pcan redundant */ static ssize_t show_pcan_x_type(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return show_str(buf, to_pcandev(dev, can_idx)->type); } #ifdef NETDEV_SUPPORT static ssize_t show_pcan_x_ndev(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { struct pcandev *pdev = to_pcandev(dev, can_idx); return show_str(buf, pdev->netdev ? pdev->netdev->name : "can?"); } #endif /* like with /proc/pcan, display Serial/Number instead */ static ssize_t show_pcan_x_serial_number(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "0x%x\n", to_pcandev(dev, can_idx)->port.usb.usb_if->dwSerialNumber); } #if 1 /* don't display /proc/pcan useless irq column, since devid already exists */ #else static ssize_t show_pcan_x_irq(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return show_u32(buf, to_pcandev(dev, can_idx)->port.usb.ucHardcodedDevNr); } #endif static ssize_t show_pcan_x_btr0btr1(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { struct pcandev *pdev = to_pcandev(dev, can_idx); u32 dev_btr0btr1 = sja1000_bitrate(pdev->init_settings.nominal.bitrate, pdev->init_settings.nominal.sample_point); return snprintf(buf, PAGE_SIZE, "0x%04x\n", dev_btr0btr1); } static ssize_t show_pcan_x_read(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { struct pcandev *pdev = to_pcandev(dev, can_idx); #ifdef NETDEV_SUPPORT struct net_device_stats *stats = (pdev->netdev) ? pcan_netdev_get_stats(dev->netdev) : NULL; u32 dev_read = (stats) ? stats->rx_packets : 0; #else u32 dev_read = pdev->readFifo.dwTotal; #endif return show_u32(buf, dev_read); } static ssize_t show_pcan_x_write(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { struct pcandev *pdev = to_pcandev(dev, can_idx); #ifdef NETDEV_SUPPORT struct net_device_stats *stats = (pdev->netdev) ? pcan_netdev_get_stats(dev->netdev) : NULL; u32 dev_write = (stats) ? stats->tx_packets : 0; #else u32 dev_write = pdev->writeFifo.dwTotal; #endif return show_u32(buf, dev_write); } static ssize_t show_pcan_x_irqs(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return show_u32(buf, to_pcandev(dev, can_idx)->dwInterruptCounter); } static ssize_t show_pcan_x_errors(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return show_u32(buf, to_pcandev(dev, can_idx)->dwErrorCounter); } static ssize_t show_pcan_x_status(int can_idx, struct device *dev, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "0x%04x\n", to_pcandev(dev, can_idx)->wCANStatus); } /* * per-channel 'show' callbacks */ #define PCAN_DEFINE_ATTR_SHOW(_name, _c) \ static ssize_t show_pcan_##_name##_c(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ return show_pcan_x_##_name(_c, dev, attr, buf); \ } /* Channel #0 show callbacks definitions */ PCAN_DEFINE_ATTR_SHOW(hwtype, 0) PCAN_DEFINE_ATTR_SHOW(devid, 0) PCAN_DEFINE_ATTR_SHOW(minor, 0) PCAN_DEFINE_ATTR_SHOW(ctrl_number, 0) PCAN_DEFINE_ATTR_SHOW(bitrate, 0) PCAN_DEFINE_ATTR_SHOW(clock, 0) PCAN_DEFINE_ATTR_SHOW(bus_state, 0) PCAN_DEFINE_ATTR_SHOW(rx_err_cnt, 0) PCAN_DEFINE_ATTR_SHOW(tx_err_cnt, 0) PCAN_DEFINE_ATTR_SHOW(bus_load, 0) PCAN_DEFINE_ATTR_SHOW(dbitrate, 0) /* /proc/pcan redundant */ PCAN_DEFINE_ATTR_SHOW(type, 0) #ifdef NETDEV_SUPPORT PCAN_DEFINE_ATTR_SHOW(ndev, 0) #endif PCAN_DEFINE_ATTR_SHOW(serial_number, 0) PCAN_DEFINE_ATTR_SHOW(btr0btr1, 0) PCAN_DEFINE_ATTR_SHOW(read, 0) PCAN_DEFINE_ATTR_SHOW(write, 0) PCAN_DEFINE_ATTR_SHOW(irqs, 0) PCAN_DEFINE_ATTR_SHOW(errors, 0) PCAN_DEFINE_ATTR_SHOW(status, 0) /* Channel #1 show callbacks definitions */ PCAN_DEFINE_ATTR_SHOW(hwtype, 1) PCAN_DEFINE_ATTR_SHOW(devid, 1) PCAN_DEFINE_ATTR_SHOW(minor, 1) PCAN_DEFINE_ATTR_SHOW(ctrl_number, 1) PCAN_DEFINE_ATTR_SHOW(bitrate, 1) PCAN_DEFINE_ATTR_SHOW(clock, 1) PCAN_DEFINE_ATTR_SHOW(bus_state, 1) PCAN_DEFINE_ATTR_SHOW(rx_err_cnt, 1) PCAN_DEFINE_ATTR_SHOW(tx_err_cnt, 1) PCAN_DEFINE_ATTR_SHOW(bus_load, 1) PCAN_DEFINE_ATTR_SHOW(dbitrate, 1) /* /proc/pcan redundant */ PCAN_DEFINE_ATTR_SHOW(type, 1) #ifdef NETDEV_SUPPORT PCAN_DEFINE_ATTR_SHOW(ndev, 1) #endif PCAN_DEFINE_ATTR_SHOW(serial_number, 1) PCAN_DEFINE_ATTR_SHOW(btr0btr1, 1) PCAN_DEFINE_ATTR_SHOW(read, 1) PCAN_DEFINE_ATTR_SHOW(write, 1) PCAN_DEFINE_ATTR_SHOW(irqs, 1) PCAN_DEFINE_ATTR_SHOW(errors, 1) PCAN_DEFINE_ATTR_SHOW(status, 1) /* Channel #0 attributes declaration */ static PCAN_DEVICE_ATTR(devid0, devid, show_pcan_devid0); static PCAN_DEVICE_ATTR(hwtype0, hwtype, show_pcan_hwtype0); static PCAN_DEVICE_ATTR(minor0, minor, show_pcan_minor0); static PCAN_DEVICE_ATTR(ctrl_number0, ctrl_number, show_pcan_ctrl_number0); static PCAN_DEVICE_ATTR(bitrate0, bitrate, show_pcan_bitrate0); static PCAN_DEVICE_ATTR(clock0, clock, show_pcan_clock0); static PCAN_DEVICE_ATTR(bus_state0, bus_state, show_pcan_bus_state0); /* /proc/pcan redundant */ static PCAN_DEVICE_ATTR(type0, type, show_pcan_type0); #ifdef NETDEV_SUPPORT static PCAN_DEVICE_ATTR(ndev0, ndev, show_pcan_ndev0); #endif static PCAN_DEVICE_ATTR(serial_number0, serial_number, show_pcan_serial_number0); static PCAN_DEVICE_ATTR(btr0btr10, btr0btr1, show_pcan_btr0btr10); static PCAN_DEVICE_ATTR(read0, read, show_pcan_read0); static PCAN_DEVICE_ATTR(write0, write, show_pcan_write0); static PCAN_DEVICE_ATTR(irqs0, irqs, show_pcan_irqs0); static PCAN_DEVICE_ATTR(errors0, errors, show_pcan_errors0); static PCAN_DEVICE_ATTR(status0, status, show_pcan_status0); /* Channel #1 attributes declaration */ static PCAN_DEVICE_ATTR(devid1, devid, show_pcan_devid1); static PCAN_DEVICE_ATTR(hwtype1, hwtype, show_pcan_hwtype1); static PCAN_DEVICE_ATTR(minor1, minor, show_pcan_minor1); static PCAN_DEVICE_ATTR(ctrl_number1, ctrl_number, show_pcan_ctrl_number1); static PCAN_DEVICE_ATTR(bitrate1, bitrate, show_pcan_bitrate1); static PCAN_DEVICE_ATTR(clock1, clock, show_pcan_clock1); static PCAN_DEVICE_ATTR(bus_state1, bus_state, show_pcan_bus_state1); /* /proc/pcan redundant */ static PCAN_DEVICE_ATTR(type1, type, show_pcan_type1); #ifdef NETDEV_SUPPORT static PCAN_DEVICE_ATTR(ndev1, ndev, show_pcan_ndev1); #endif static PCAN_DEVICE_ATTR(serial_number1, serial_number, show_pcan_serial_number1); static PCAN_DEVICE_ATTR(btr0btr11, btr0btr1, show_pcan_btr0btr11); static PCAN_DEVICE_ATTR(read1, read, show_pcan_read1); static PCAN_DEVICE_ATTR(write1, write, show_pcan_write1); static PCAN_DEVICE_ATTR(irqs1, irqs, show_pcan_irqs1); static PCAN_DEVICE_ATTR(errors1, errors, show_pcan_errors1); static PCAN_DEVICE_ATTR(status1, status, show_pcan_status1); #ifdef NETDEV_SUPPORT #define PCAN_DEV_ATTRS_COUNT 20 #else #define PCAN_DEV_ATTRS_COUNT 19 #endif static struct attribute *pcan_usb_sysfs_attrs[][PCAN_DEV_ATTRS_COUNT] = { { &pcan_dev_attr_devid0.attr, &pcan_dev_attr_hwtype0.attr, &pcan_dev_attr_minor0.attr, &pcan_dev_attr_ctrl_number0.attr, &pcan_dev_attr_bitrate0.attr, &pcan_dev_attr_clock0.attr, &pcan_dev_attr_bus_state0.attr, /* /proc/pcan redundant */ &pcan_dev_attr_type0.attr, #ifdef NETDEV_SUPPORT &pcan_dev_attr_ndev0.attr, #endif &pcan_dev_attr_serial_number0.attr, &pcan_dev_attr_btr0btr10.attr, &pcan_dev_attr_read0.attr, &pcan_dev_attr_write0.attr, &pcan_dev_attr_irqs0.attr, &pcan_dev_attr_errors0.attr, &pcan_dev_attr_status0.attr, NULL }, { &pcan_dev_attr_devid1.attr, &pcan_dev_attr_hwtype1.attr, &pcan_dev_attr_minor1.attr, &pcan_dev_attr_ctrl_number1.attr, &pcan_dev_attr_bitrate1.attr, &pcan_dev_attr_clock1.attr, &pcan_dev_attr_bus_state1.attr, /* /proc/pcan redundant */ &pcan_dev_attr_type1.attr, #ifdef NETDEV_SUPPORT &pcan_dev_attr_ndev1.attr, #endif &pcan_dev_attr_serial_number1.attr, &pcan_dev_attr_btr0btr11.attr, &pcan_dev_attr_read1.attr, &pcan_dev_attr_write1.attr, &pcan_dev_attr_irqs1.attr, &pcan_dev_attr_errors1.attr, &pcan_dev_attr_status1.attr, NULL }, }; static PCAN_DEVICE_ATTR(dbitrate0, dbitrate, show_pcan_dbitrate0); static PCAN_DEVICE_ATTR(dbitrate1, dbitrate, show_pcan_dbitrate1); static struct attribute *pcanfd_usb_sysfs_attrs[][2] = { { &pcan_dev_attr_dbitrate0.attr, NULL }, { &pcan_dev_attr_dbitrate1.attr, NULL }, }; static PCAN_DEVICE_ATTR(bus_load0, bus_load, show_pcan_bus_load0); static PCAN_DEVICE_ATTR(bus_load1, bus_load, show_pcan_bus_load1); static struct attribute *pcan_usb_sysfs_bus_load_attrs[][2] = { { &pcan_dev_attr_bus_load0.attr, NULL }, { &pcan_dev_attr_bus_load1.attr, NULL }, }; static PCAN_DEVICE_ATTR(rx_err_cnt0, rx_error_counter, show_pcan_rx_err_cnt0); static PCAN_DEVICE_ATTR(rx_err_cnt1, rx_error_counter, show_pcan_rx_err_cnt1); static PCAN_DEVICE_ATTR(tx_err_cnt0, tx_error_counter, show_pcan_tx_err_cnt0); static PCAN_DEVICE_ATTR(tx_err_cnt1, tx_error_counter, show_pcan_tx_err_cnt1); static struct attribute *pcan_usb_sysfs_err_cnt_attrs[][3] = { { &pcan_dev_attr_rx_err_cnt0.attr, &pcan_dev_attr_tx_err_cnt0.attr, NULL }, { &pcan_dev_attr_rx_err_cnt1.attr, &pcan_dev_attr_tx_err_cnt1.attr, NULL }, }; #endif /* PCAN_USB_PCAN_SYSFS */ static int pcan_usb_get_devid(struct pcandev *dev, struct pcanfd_option *opt, void *c) { struct pcan_usb_interface *usb_if = pcan_usb_get_if(dev); u32 dev_id; int err = usb_if->device_ctrl_get_dnr(dev, &dev_id); if (err) { pr_err(DEVICE_NAME ": %s() err %d getting dev number from %s CAN%d\n", __func__, err, dev->adapter->name, dev->nChannel+1); return err; } opt->size = sizeof(dev_id); if (pcan_copy_to_user(opt->value, &dev_id, opt->size, c)) { pr_err(DEVICE_NAME ": %s(): copy_to_user() failure\n", __func__); return -EFAULT; } return 0; } static int pcan_usb_set_devid(struct pcandev *dev, struct pcanfd_option *opt, void *c) { struct pcan_usb_interface *usb_if = pcan_usb_get_if(dev); USB_PORT *u = &dev->port.usb; u32 dev_id; int err = pcan_copy_from_user(&dev_id, opt->value, sizeof(u32), c); if (err) { pr_err(DEVICE_NAME ": %s(): copy_from_user() failure\n", __func__); return -EFAULT; } err = usb_if->device_ctrl_set_dnr(dev, dev_id); if (err) { pr_err(DEVICE_NAME ": %s() err %d setting dev number to %s CAN%d\n", __func__, err, dev->adapter->name, dev->nChannel+1); return err; } /* Should update dev object cache with new value * (see /dev/pcan display) */ u->ucHardcodedDevNr = (u8 )dev_id; dev->device_alt_num = dev_id; //u->ucHardcodedDevNr; return 0; } /* USB device specific options */ static struct pcanfd_options pcan_usb_opts[PCANFD_OPT_MAX] = { [PCANFD_OPT_DEVICE_ID] = { .req_size = sizeof(u32), .get = pcan_usb_get_devid, .set = pcan_usb_set_devid, }, }; static int pcan_usb_create_dev(struct pcan_usb_interface *usb_if, int ctrl_index) { struct pcandev *dev = (struct pcandev *)&usb_if->dev[ctrl_index]; struct usb_device *usb_dev = usb_if->usb_dev; USB_PORT *u = &dev->port.usb; int err, retry; #ifndef PCAN_USB_DONT_REGISTER_DEV void *h; #endif switch (le16_to_cpu(usb_dev->descriptor.idProduct)) { case PCAN_USBFD_PRODUCT_ID: case PCAN_USBCHIP_PRODUCT_ID: /* init structure elements to defaults */ ucan_soft_init(dev, "usbfd", HW_USB_FD, usb_if->adapter); break; case PCAN_USBPROFD_PRODUCT_ID: /* init structure elements to defaults */ ucan_soft_init(dev, "usbfd", HW_USB_PRO_FD, usb_if->adapter); break; #ifdef PCAN_USBX6_PRODUCT_ID case PCAN_USBX6_PRODUCT_ID: /* init structure elements to defaults */ ucan_soft_init(dev, "usbfd", HW_USB_X6, usb_if->adapter); break; #endif case PCAN_USBPRO_PRODUCT_ID: /* init structure elements to defaults */ pcan_soft_init_ex(dev, "usb", HW_USB_PRO, (const struct pcanfd_available_clocks *)&sja2010_clocks, &sja2010_caps, PCAN_DEV_BUSLOAD_RDY); break; case PCAN_USB_PRODUCT_ID: default: /* init structure elements to defaults */ pcan_soft_init(dev, "usb", HW_USB); /* Device Id. is a single-octet value in these old adapters, * thus, the 'default' value is 0xff (instead of 0xffffffff) */ dev->device_alt_num = 0xff; break; } dev->nChannel = ctrl_index; dev->adapter = usb_if->adapter; /* overrride with USB devices specific options callbacks */ dev->option = pcan_usb_opts; dev->device_open = pcan_usb_device_open; /* override standard device access functions: * if device is CANFD capable, set the CANFD open function. Otehrwise, * set the deafult CAN 2.0 open function */ if (usb_if->device_ctrl_open_fd) dev->device_open_fd = pcan_usb_device_open_fd; dev->device_write = pcan_usb_write; dev->device_release = pcan_usb_device_release; /* set this before any instructions, fill struct pcandev, part 1 */ dev->readreg = NULL; dev->writereg = NULL; dev->cleanup = pcan_usb_cleanup; dev->free_irq = pcan_usb_free_irq; dev->open = pcan_usb_open; dev->release = pcan_usb_release; dev->filter = pcan_create_filter_chain(); dev->device_params = pcan_usb_device_params; #if 0//def DEBUG printk(KERN_DEBUG "%s: usb hardware revision = %d\n", DEVICE_NAME, usb_if->ucRevision); #endif dev->wInitStep = 1; /* assign the device as plugged in */ dev->ucPhysicallyInstalled = 1; /* add this device to the list */ pcan_add_device_in_list_ex(dev, PCAN_DEV_STATIC); dev->wInitStep = 2; usb_devices++; dev->wInitStep = 3; /* MUST do that before any attempt to write something... */ dev->port.usb.usb_if = usb_if; /* get serial number as soon as possible */ usb_if->device_get_snr(usb_if, &usb_if->dwSerialNumber); /* Get device number early too (sometimes, need to retry...) */ for (retry = 3; retry; retry--) { u32 device_nr32; err = usb_if->device_ctrl_get_dnr(dev, &device_nr32); if (!err) { u->ucHardcodedDevNr = (u8 )device_nr32; #ifdef DEBUG pr_info(DEVICE_NAME "%s(): CAN%u devid=%xh (%u)\n", __func__, ctrl_index, device_nr32, device_nr32); #endif break; } } #ifndef PCAN_USB_DONT_REGISTER_DEV /* Handle controller list per interface */ h = usb_get_intfdata(usb_if->usb_intf); dev->nMajor = USB_MAJOR; dev->nMinor = -1; /* must tell that this interface is not in use for all controllers, * especially for controllers > 0 (kernel>2.6.26) */ usb_if->usb_intf->minor = -1; err = usb_register_dev(usb_if->usb_intf, &pcan_class); if (err < 0) { pr_err(DEVICE_NAME ": unable to register usb device\n"); usb_set_intfdata(usb_if->usb_intf, h); goto reject; } dev->nMinor = usb_if->usb_intf->minor; #else dev->nMajor = pcan_drv.nMajor; dev->nMinor = pcan_find_free_minor(dev, PCAN_USB_MINOR_BASE, PCAN_USB_MINOR_END); if (dev->nMinor < 0) { err = dev->nMinor; pr_err(DEVICE_NAME ": not enough minors\n"); goto reject; } #endif #ifdef PCAN_USB_PCAN_SYSFS /* do register pcan dev under sysfs */ pcan_sysfs_dev_node_create(dev); #else dev->sysfs_dev = usb_if->usb_intf->usb_dev; pcan_sysfs_add_attrs(dev->sysfs_dev, pcan_usb_sysfs_attrs[ctrl_index]); if (usb_if->device_ctrl_open_fd) pcan_sysfs_add_attrs(dev->sysfs_dev, pcanfd_usb_sysfs_attrs[ctrl_index]); if (dev->flags & PCAN_DEV_BUSLOAD_RDY) pcan_sysfs_add_attrs(dev->sysfs_dev, pcan_usb_sysfs_bus_load_attrs[ctrl_index]); if (dev->flags & PCAN_DEV_ERRCNT_RDY) pcan_sysfs_add_attrs(dev->sysfs_dev, pcan_usb_sysfs_err_cnt_attrs[ctrl_index]); #endif /* PCAN_USB_PCAN_SYSFS */ /* set device in inactive state to prevent violating the bus */ usb_if->device_ctrl_set_bus_off(dev); /* Call hardware supplied own callback to do some private init */ if (usb_if->device_ctrl_init) { err = usb_if->device_ctrl_init(dev); if (err) { pr_err(DEVICE_NAME ": CAN%u initialization not complete\n", ctrl_index+1); goto reject; } } #ifdef NETDEV_SUPPORT pcan_netdev_register(dev); #endif dev->wInitStep = 4; printk(KERN_INFO "%s: usb device minor %d found\n", DEVICE_NAME, dev->nMinor); return 0; reject: pcan_usb_cleanup(dev); pr_err(DEVICE_NAME ": failed to register %s CAN%u as a new USB CAN channel " "err %d\n", dev->adapter->name, ctrl_index+1, err); return err; } #ifdef NETDEV_SUPPORT static void pcan_usb_plugout_netdev(struct pcandev *dev) { struct net_device *ndev = dev->netdev; if (ndev) { netif_stop_queue(ndev); pcan_netdev_unregister(dev); } } #endif static int pcan_usb_plugin(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(interface); struct usb_endpoint_descriptor *endpoint; struct usb_host_interface *iface_desc; struct pcan_usb_interface *usb_if; int (*device_init)(struct pcan_usb_interface *); int err, i, dev_ctrl_count, sizeof_if; DPRINTK(KERN_DEBUG "%s: %s(0x%04x, 0x%04x, 0x%04x)\n", DEVICE_NAME, __func__, usb_dev->descriptor.idVendor, usb_dev->descriptor.idProduct, usb_dev->descriptor.bcdDevice); /* check endpoint addresses (numbers) and associated max data length * (only from setting 0) * Since USB-PRO defines also a LIN interface, should reject it when * adapter plugged: make use of endpoint addresses (no other way...) */ iface_desc = &interface->altsetting[0]; DPRINTK(KERN_DEBUG "%s: %s(): bNumEndpoints=%d\n", DEVICE_NAME, __func__, iface_desc->desc.bNumEndpoints); for (i=0; i < iface_desc->desc.bNumEndpoints; i++) { struct usb_endpoint_descriptor *endpoint = &iface_desc->endpoint[i].desc; /* Below is the list of valid ep addreses. Any other ep address * is considered as not-CAN interface address => no dev created */ switch (endpoint->bEndpointAddress) { case 0x01: case 0x81: case 0x02: case 0x82: case 0x03: case 0x83: break; default: #ifdef DEBUG printk(KERN_INFO "%s: %s(): EP address %02x not in CAN range.\n", DEVICE_NAME, __func__, endpoint->bEndpointAddress); printk(KERN_INFO "%s: %s(): ignoring the whole USB interface\n", DEVICE_NAME, __func__); #endif return -ENODEV; } } #if 0 /* Does not work with PCAN-USB FD (and 3.14-rc2 ...) * * TODO: check if we could remove this call, because according to * drivers/usb/core/message.c: * * "Instead, the driver [..] may use usb_set_interface() on the * interface it claims" */ if (le16_to_cpu(usb_dev->descriptor.idProduct) != PCAN_USBFD_PRODUCT_ID) { /* take the 1st configuration (it's default) */ err = usb_reset_configuration(usb_dev); if (err < 0) { pr_err(DEVICE_NAME ": usb_reset_configuration() failed err %d\n", err); return err; } } #endif /* only 1 interface is supported * Note: HW_USB_PRO: interface#0 for CAN, #1 for LIN */ err = usb_set_interface(usb_dev, 0, 0); if (err < 0) { printk(KERN_ERR "%s: usb_set_interface() failed! (err %d)\n", DEVICE_NAME, err); return err; } /* Now, according to device id, create as many device as CAN * controllers */ switch (le16_to_cpu(usb_dev->descriptor.idProduct)) { case PCAN_USBFD_PRODUCT_ID: case PCAN_USBCHIP_PRODUCT_ID: dev_ctrl_count = 1; device_init = pcan_usbfd_init; break; case PCAN_USBPROFD_PRODUCT_ID: dev_ctrl_count = 2; device_init = pcan_usbfd_init; break; #ifdef PCAN_USBX6_PRODUCT_ID case PCAN_USBX6_PRODUCT_ID: dev_ctrl_count = 2; device_init = pcan_usbfd_init; break; #endif case PCAN_USBPRO_PRODUCT_ID: dev_ctrl_count = 2; device_init = pcan_usbpro_init; break; case PCAN_USB_PRODUCT_ID: default: dev_ctrl_count = 1; device_init = pcan_usb_init; break; } /* create our interface object for the USB device */ sizeof_if = sizeof(struct pcan_usb_interface) + sizeof(struct pcandev) * dev_ctrl_count; #ifdef DEBUG printk(KERN_INFO "%s: new ", DEVICE_NAME); if (usb_dev->speed == USB_SPEED_HIGH) printk("high speed "); printk("usb adapter with %u CAN controller(s) detected\n", dev_ctrl_count); #endif usb_if = pcan_malloc(sizeof_if, GFP_KERNEL); if (!usb_if) { pr_err(DEVICE_NAME ": pcan_malloc(%d) failed!\n", sizeof_if); return err; } memset(usb_if, '\0', sizeof_if); /* store pointer to kernel supplied usb_dev */ usb_if->usb_dev = usb_dev; usb_if->usb_intf = interface; /* if usb interface is not a root port, then setup a zero based index * so that the exported sysfs channel number will take it into account */ if (usb_dev->route) { /* note: usb port are 1-based numbers * (see drivers/usb/core/usb.c#L472) */ usb_if->index = usb_dev->portnum - 1; } #ifndef PCAN_USB_CMD_PER_DEV /* preset finish flags */ atomic_set(&usb_if->cmd_sync_complete, 0); atomic_set(&usb_if->cmd_async_complete, 1); #endif /* preset active URB counter */ atomic_set(&usb_if->active_urbs, 0); /* get endpoint addresses (numbers) and associated max data length * (only from setting 0) */ /* * USB-Pro * Function Interface Endpoints DeviceId * --------- --------- ----------------------------------------- * Control 0 * CAN 0 "CAN-Device", * USB\VID_0c72&PID_000d&MI_00 * 1=Command, bidi for both CAN_Ctrller * 2=CAN-Controller 0, rcv (IN) both CAN-Ctrller, * transmit (OUT) CAN-Ctrl#0, * 3=CAN-Controller 1 transmit (OUT) CAN-Ctrl#1 * LIN 1 "LIN-Device", * USB\VID_0c72&PID_000d&MI_01 * 4=Command, * 5=Controller 0, * 6=Controller 1 */ for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { PCAN_ENDPOINT *pipe_addr = NULL; endpoint = &iface_desc->endpoint[i].desc; DPRINTK(KERN_DEBUG "%s: %s(): EP[%d]={addr=%d max=%d}\n", DEVICE_NAME, __func__, i, endpoint->bEndpointAddress, endpoint->wMaxPacketSize); switch (endpoint->bEndpointAddress) { case 0x01: pipe_addr = &usb_if->pipe_cmd_out; break; case 0x81: pipe_addr = &usb_if->pipe_cmd_in; break; case 0x02: switch (le16_to_cpu(usb_dev->descriptor.idProduct)) { case PCAN_USBFD_PRODUCT_ID: case PCAN_USBCHIP_PRODUCT_ID: case PCAN_USBPROFD_PRODUCT_ID: #ifdef PCAN_USBX6_PRODUCT_ID case PCAN_USBX6_PRODUCT_ID: #endif case PCAN_USBPRO_PRODUCT_ID: case PCAN_USB_PRODUCT_ID: default: pipe_addr = &usb_if->dev[0].port.usb.pipe_write; break; } break; case 0x82: pipe_addr = &usb_if->pipe_read; break; case 0x03: switch (le16_to_cpu(usb_dev->descriptor.idProduct)) { #if 0//def HW_USB_FD case PCAN_USBFD_PRODUCT_ID: #warning TODO: check if 0x03 can be enum with PCANFD (dev[1] below is estonishing...) #endif case PCAN_USBPROFD_PRODUCT_ID: #ifdef PCAN_USBX6_PRODUCT_ID case PCAN_USBX6_PRODUCT_ID: #endif case PCAN_USBPRO_PRODUCT_ID: pipe_addr = &usb_if->dev[1].port.usb.pipe_write; break; } case 0x83: /* Unused pipe for PCAN-USB-PRO * But seems that need to be reset too... */ /* TBD */ break; default: continue; } if (pipe_addr) { pipe_addr->ucNumber = endpoint->bEndpointAddress; pipe_addr->wDataSz = le16_to_cpu(endpoint->wMaxPacketSize); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30) usb_reset_endpoint(usb_dev, endpoint->bEndpointAddress); #endif } /* ucRevision needs to be defined before allocating resources * (PCAN-USB) */ #if defined(__LITTLE_ENDIAN) usb_if->ucHardcodedDevNr = (u8)(usb_if->usb_dev->descriptor.bcdDevice & 0xff); usb_if->ucRevision = (u8)(usb_if->usb_dev->descriptor.bcdDevice >> 8); #elif defined(__BIG_ENDIAN) usb_if->ucHardcodedDevNr = (u8)(usb_if->usb_dev->descriptor.bcdDevice >> 8); usb_if->ucRevision = (u8)(usb_if->usb_dev->descriptor.bcdDevice & 0xff); #else #error "Please fix the endianness defines in <asm/byteorder.h>" #endif DPRINTK(KERN_DEBUG "%s(): ucHardcodedDevNr=0x%02x ucRevision=0x%02X\n", __func__, usb_if->ucHardcodedDevNr, usb_if->ucRevision); /* MUST do this BEFORE calling pcan_usb_alloc_resources() */ usb_if->can_count = dev_ctrl_count; /* resources MUST be allocated before calling device_init() */ err = pcan_usb_alloc_resources(usb_if); if (err) goto reject; /* call initialisation callback for entire device */ err = device_init(usb_if); if (err) { pr_err(DEVICE_NAME ": device_init() failure err %d\n", err); goto reject_free; } #if 1 /* install the reception part for the interface */ if (!atomic_read(&usb_if->read_data.use_count)) { FILL_BULK_URB(&usb_if->read_data, usb_if->usb_dev, usb_rcvbulkpipe(usb_if->usb_dev, usb_if->pipe_read.ucNumber), usb_if->read_buffer_addr[0], usb_if->read_buffer_size, pcan_usb_read_notify, usb_if); /* submit urb */ err = __usb_submit_urb(&usb_if->read_data); if (err) { pr_err(DEVICE_NAME ": %s() can't submit! (%d)\n", __func__, err); goto reject_free; } atomic_inc(&usb_if->active_urbs); } #endif /* should be set BEFORE pcan_usb_create_dev() */ usb_set_intfdata(interface, usb_if); /* next, initialize each controller */ for (i = 0; i < dev_ctrl_count; i++) { #ifdef PCAN_USB_CMD_PER_DEV /* preset finish flags */ atomic_set(&usb_if->dev[i].port.usb.cmd_sync_complete, 0); atomic_set(&usb_if->dev[i].port.usb.cmd_async_complete, 1); #endif err = pcan_usb_create_dev(usb_if, i); if (err) goto reject_free_all_dev; } #if 0 /* install the reception part for the interface */ if (!atomic_read(&usb_if->read_data.use_count)) { FILL_BULK_URB(&usb_if->read_data, usb_if->usb_dev, usb_rcvbulkpipe(usb_if->usb_dev, usb_if->pipe_read.ucNumber), usb_if->read_buffer_addr[0], usb_if->read_buffer_size, pcan_usb_read_notify, usb_if); /* submit urb */ if ((err = __usb_submit_urb(&usb_if->read_data))) { printk(KERN_ERR "%s: %s() can't submit! (%d)\n", DEVICE_NAME, __func__, err); goto reject; } atomic_inc(&usb_if->active_urbs); } #endif return 0; reject_free_all_dev: /* remove ALL previously created devs for the same USB interface */ while (i--) { struct pcandev *dev = usb_if->dev + i; const int m = dev->nMinor; #ifdef NETDEV_SUPPORT pcan_usb_plugout_netdev(dev); #endif #ifdef PCAN_USB_PCAN_SYSFS pcan_sysfs_dev_node_destroy(dev); #endif pcan_usb_cleanup(dev); pr_info(DEVICE_NAME ": usb device minor %d removed\n", m); } pcan_kill_sync_urb(&usb_if->read_data); reject_free: pcan_usb_free_resources(usb_if); reject: pcan_free(usb_if); return err; } /* is called at plug out of device */ static void pcan_usb_plugout(struct usb_interface *interface) { struct pcan_usb_interface *usb_if = usb_get_intfdata(interface); struct pcandev *dev; int c; DPRINTK(KERN_DEBUG "%s: %s(): usb_if=%p\n", DEVICE_NAME, __func__, usb_if); if (!usb_if) { pr_info(DEVICE_NAME "%s(%u): usb_if=NULL\n", __func__, __LINE__); return; } /* do it now in case of reenrance somewhere... */ usb_set_intfdata(interface, NULL); dev = usb_if->dev; for (c = 0; c < usb_if->can_count; c++, dev++) { DPRINTK(KERN_DEBUG "%s: %s(%d)\n", DEVICE_NAME, __func__, dev->nMinor); #ifdef PCAN_USB_PCAN_SYSFS pcan_sysfs_dev_node_destroy(dev); #else if (dev->flags & PCAN_DEV_ERRCNT_RDY) pcan_sysfs_del_attrs(dev->sysfs_dev, pcan_usb_sysfs_err_cnt_attrs[c]); if (dev->flags & PCAN_DEV_BUSLOAD_RDY) pcan_sysfs_del_attrs(dev->sysfs_dev, pcan_usb_sysfs_bus_load_attrs[c]); if (usb_if->device_ctrl_open_fd) pcan_sysfs_del_attrs(dev->sysfs_dev, pcanfd_usb_sysfs_attrs[c]); pcan_sysfs_del_attrs(dev->sysfs_dev, pcan_usb_sysfs_attrs[c]); #endif #ifdef NETDEV_SUPPORT pcan_usb_plugout_netdev(dev); #endif /* mark this device as plugged out */ dev->ucPhysicallyInstalled = 0; #if 0 /* do not remove resources if the device is still in use */ if (!dev->nOpenPaths) pcan_usb_cleanup(dev); #else /* Should close all dev resources EVEN if the device is in use, * otherwise application may not be noticed that the device was * removed: CAN_Open(); while (1) CAN_Read(h); */ pcan_usb_cleanup(dev); #endif #ifdef PCAN_USB_CMD_PER_DEV pcan_kill_sync_urb(&dev->port.usb.urb_cmd_sync); pcan_kill_sync_urb(&dev->port.usb.urb_cmd_async); #endif pcan_kill_sync_urb(&dev->port.usb.write_data); pcan_free(dev->port.usb.cout_baddr); #ifndef PCAN_USB_DONT_REGISTER_DEV interface->minor = dev->nMinor; usb_deregister_dev(interface, &pcan_class); #endif } #ifndef PCAN_USB_CMD_PER_DEV pcan_kill_sync_urb(&usb_if->urb_cmd_async); #endif pcan_kill_sync_urb(&usb_if->urb_cmd_sync); pcan_kill_sync_urb(&usb_if->read_data); pcan_free(usb_if->read_buffer_addr[0]); if (usb_if->device_free) usb_if->device_free(usb_if); usb_reset_device(usb_if->usb_dev); pcan_free(usb_if); } /* small interface to rest of driver, only init and deinit */ static int pcan_usb_core_init(void) { DPRINTK(KERN_DEBUG "%s: %s() -------------------------------------------\n", DEVICE_NAME, __func__); memset (&pcan_drv.usbdrv, 0, sizeof(pcan_drv.usbdrv)); /* do inherit default options */ pcan_inherit_options(pcan_usb_opts); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,24) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) pcan_drv.usbdrv.owner = THIS_MODULE; #endif pcan_drv.usbdrv.probe = pcan_usb_plugin; pcan_drv.usbdrv.disconnect = pcan_usb_plugout; pcan_drv.usbdrv.name = DEVICE_NAME; pcan_drv.usbdrv.id_table = pcan_usb_ids; return usb_register(&pcan_drv.usbdrv); } static int pcan_usb_do_cleanup(struct device *dev, void *arg) { struct usb_interface *intf = to_usb_interface(dev); struct pcan_usb_interface *usb_if = \ (struct pcan_usb_interface *)usb_get_intfdata(intf); struct pcandev *pdev; int c; DPRINTK(KERN_DEBUG "%s: %s()\n", DEVICE_NAME, __func__); if (!usb_if) { DPRINTK(KERN_DEBUG "%s: %s(): NULL usb_if\n", DEVICE_NAME, __func__); return 0; } /* Browse controllers list */ pdev = usb_if->dev; for (c = 0; c < usb_if->can_count; c++, pdev++) if (pdev->ucPhysicallyInstalled) /* Last chance for URB submitting */ if (usb_if->device_ctrl_cleanup) usb_if->device_ctrl_cleanup(pdev); return 0; } void pcan_usb_deinit(void) { DPRINTK(KERN_DEBUG "%s: %s()\n", DEVICE_NAME, __func__); if (pcan_drv.usbdrv.probe == pcan_usb_plugin) { /* Added this since it is the last chance for URB submitting */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) int err = driver_for_each_device( &pcan_drv.usbdrv.drvwrap.driver, NULL, NULL, pcan_usb_do_cleanup); #else int err = driver_for_each_device(&pcan_drv.usbdrv.driver, NULL, NULL, pcan_usb_do_cleanup); #endif /* driver_for_each_device() is declared with "must_check" * attribute so check err here, knowing that drv is not NULL * (1st arg) and that pcan_usb_do_cleanup() always return 0 */ if (err) err = 0; /* then it was registered * unregister usb parts, makes a plugout of registered devices */ usb_deregister(&pcan_drv.usbdrv); } } /* init for usb based devices from peak */ int pcan_usb_register_devices(void) { int err; DPRINTK(KERN_DEBUG "%s: %s()\n", DEVICE_NAME, __func__); if (!(err = pcan_usb_core_init())) { DPRINTK(KERN_DEBUG "%s: %s() is OK\n", DEVICE_NAME, __func__); } return err; } #endif /* USB_SUPPORT */
import 'package:flutter/material.dart'; import 'package:flutter/widgets.dart'; import 'package:flutterapp/common/main.dart'; class TabBarPage extends StatefulWidget { @override State<StatefulWidget> createState() => TabBarPageState(); } class TabBarPageState extends State<TabBarPage> with SingleTickerProviderStateMixin { TabController _tabController; List<String> items = ['News', 'Image', 'Video']; List<Widget> icons = [HYIcon.news, HYIcon.image, HYIcon.video]; @override void initState() { super.initState(); _tabController = TabController(length: items.length, vsync: this); } @override Widget build(BuildContext context) { return Scaffold( appBar: AppBar( title: HYText(title: 'Hello World'), centerTitle: true, bottom: TabBar( controller: _tabController, tabs: items .asMap() .keys .map((index) => Tab(text: items[index], icon: icons[index])) .toList(), ), ), body: TabBarView( controller: _tabController, children: items .map((item) => Center(child: HYText.bigAndBold(title: item))) .toList()), ); } } class ContentView extends StatelessWidget { final String title; const ContentView({Key key, this.title}) : super(key: key); @override Widget build(BuildContext context) { return Center( child: HYText.bigAndBold(title: title), ); } } class BottomAppBarPage extends StatefulWidget { @override State<StatefulWidget> createState() => BottomAppBarPageState(); } class BottomAppBarPageState extends State<BottomAppBarPage> { int selectedIndex = 0; List<Widget> items; @override void initState() { super.initState(); items = ['Home', 'Add', 'Setting'] .map((item) => ContentView(title: item)) .toList(); } @override Widget build(BuildContext context) { return Scaffold( appBar: HYAppBar(title: 'BottomAppBar'), floatingActionButton: FloatingActionButton( onPressed: () => setState(() => selectedIndex = 1), child: HYIcon.add, ), floatingActionButtonLocation: FloatingActionButtonLocation.centerDocked, bottomNavigationBar: BottomAppBar( color: Colors.white, shape: CircularNotchedRectangle(), child: Row( mainAxisAlignment: MainAxisAlignment.spaceAround, children: <Widget>[ HYIconButton.home( onPressed: () => setState(() => selectedIndex = 0)), SizedBox(), HYIconButton.setting( onPressed: () => setState(() => selectedIndex = 2)), ], ), ), body: items[selectedIndex], ); } } class PageViewPage extends StatefulWidget { @override State<StatefulWidget> createState() => PageViewPageState(); } class PageViewPageState extends State<PageViewPage> with SingleTickerProviderStateMixin { PageController _pageController; List<String> items = ['News', 'Image', 'Video']; List<Widget> icons = [HYIcon.news, HYIcon.image, HYIcon.video]; @override void initState() { super.initState(); } @override Widget build(BuildContext context) { return Scaffold( appBar: AppBar( title: HYText(title: 'Hello World'), centerTitle: true, ), body: PageView( controller: _pageController, children: items .map((item) => Center(child: HYText.bigAndBold(title: item))) .toList()), ); } }
#include "pch.h" #include <stdexcept> #include "CppUnitTest.h" using namespace Microsoft::VisualStudio::CppUnitTestFramework; import boring32.winsock; namespace WinSock { TEST_CLASS(WinSockInit) { public: TEST_METHOD(TestInit) { Boring32::WinSock::WinSockInit init(2,2); } TEST_METHOD(TestBadInit) { Assert::ExpectException<Boring32::WinSock::WinSockError>( []() { Boring32::WinSock::WinSockInit init(0, 0); } ); } }; }
export const validateUserName = (username) => { var va = /^[^)!@#$%^&*(]*$/; return va.test(username) } export const validateEmail = (email) => { var va = /^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+[a-zA-Z]$/; return va.test(email) } export const validatePhoneNumber = (phoneNumber) => { var va = /^[(0|84|\+84)]+[0-9]*$/; return va.test(phoneNumber) } export const validateAddress = (address) => { var va = /^[^)!@#$%^&*(]*$/; return va.test(address) }
import { Component, Input, Output, EventEmitter } from "@angular/core"; import GridColumnSettings from "../../../shared/danphe-grid/grid-column-settings.constant"; import { AccountingSettingsBLService } from "../../settings/shared/accounting-settings.bl.service"; import { MessageboxService } from "../../../shared/messagebox/messagebox.service"; @Component({ selector: "fiscal-yearlog-shared", templateUrl: "./fiscal-yearlog-shared.component.html" }) export class fiscalyearlogSharedComponent { public fsyearactivityList: Array<any> = null; public fsyearactivityGridColumns: Array<any> = null; public loadDetail: boolean = false; @Input('loadDetail') public set reloadfsDetails(_reloadDetails) { this.loadDetail = _reloadDetails; if (this.loadDetail) { this.getfsyearactivitydetail(); } } constructor(public accountingSettingsBLService: AccountingSettingsBLService, public msgBox: MessageboxService,) { this.fsyearactivityGridColumns = GridColumnSettings.fsyearactivity; this.getfsyearactivitydetail(); } public getfsyearactivitydetail() { this.accountingSettingsBLService.getfsyearactivitydetail() .subscribe(res => { if (res.Status == "OK") { this.fsyearactivityList = res.Results; } else { alert("Failed ! " + res.ErrorMessage); } }); } }
## AutoreleasePool 和 Runloop 的关系? 每一个线程,包括主线程,都会拥有一个专属的 `RunLoop` 对象,并且会在有需要的时候自动创建。子线程的`runloop` 需要自己手动创建,如果子线程的 `runloop` 没有任何事件,`runloop`会马上退出。 另外每一个线程都会维护自己的 `autoreleasepool` 堆栈,当 `runloop` 迭代结束时会向 `autoreleasepool` 发送 `release` 消息。 ### Reference https://blog.sunnyxx.com/2014/10/15/behind-autorelease/
# Safarie the Simple AI ! Safarie is a simple AI designed by me for ACICTS Bits'21. 👾 ### How to use ? Please download both Python files and run the main.py ! ### Let's contribute. If you like to contribute this project, Please make a folk and after you made changes, Open a pull request ! 💣 ### Don't copy and paste ! ***Source Code Protected 🚀***
package ay2021s1_cs2103_w16_3.finesse.logic.parser.bookmarkparsers; import static ay2021s1_cs2103_w16_3.finesse.commons.core.Messages.MESSAGE_INVALID_COMMAND_FORMAT; import ay2021s1_cs2103_w16_3.finesse.commons.core.index.Index; import ay2021s1_cs2103_w16_3.finesse.logic.commands.bookmark.DeleteBookmarkCommand; import ay2021s1_cs2103_w16_3.finesse.logic.parser.Parser; import ay2021s1_cs2103_w16_3.finesse.logic.parser.ParserUtil; import ay2021s1_cs2103_w16_3.finesse.logic.parser.exceptions.ParseException; /** * Parses input arguments and creates a new DeleteBookmarkCommand object */ public class DeleteBookmarkCommandParser implements Parser<DeleteBookmarkCommand> { /** * Parses the given {@code String} of arguments in the context of the DeleteBookmarkCommand * and returns a DeleteBookmarkCommand object for execution. * @throws ParseException if the user input does not conform the expected format */ public DeleteBookmarkCommand parse(String args) throws ParseException { try { Index index = ParserUtil.parseIndex(args); return new DeleteBookmarkCommand(index); } catch (ParseException pe) { throw new ParseException( String.format(MESSAGE_INVALID_COMMAND_FORMAT, DeleteBookmarkCommand.MESSAGE_USAGE), pe); } } }
<?hh <<__EntryPoint>> function main(): void { //line 3 //line 4 //line 5 $s = new SplFileObject(__FILE__); echo $s->current(); }
# slack-clone-client Using graphql, react, express ## Getting started: - Run ```yarn``` and ```yarn start``` - Visit http://localhost:3000. - uri: - http://localhost:3000/login - http://localhost:3000/reigster - http://localhost:3000/view-team
import 'package:mvc_pattern/mvc_pattern.dart'; import 'package:admin/models/NavigationModel.dart'; import 'package:flutter/cupertino.dart'; class NavigationController extends ControllerMVC { static final NavigationController _navigationController = NavigationController._internal(); factory NavigationController() { return _navigationController; } NavigationController._internal(); static StatefulWidget get loginScreen => NavigationModel.screens[0]; static StatefulWidget get mainScreen => NavigationModel.screens[1]; }
use std::{convert::From, error, fmt, io, num::ParseIntError}; /// Enum of all possible errors during manipulation of asar archives. #[derive(Debug)] pub enum Error { IoError(io::Error), ParseIntError(ParseIntError), JsonError(serde_json::Error), GlobError(glob::GlobError), } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::IoError(ref err) => write!(f, "IO Error: {}", err), Error::ParseIntError(ref err) => write!(f, "Error parsing int: {}", err), Error::JsonError(ref err) => write!(f, "Error parsing JSON: {}", err), Error::GlobError(ref err) => write!(f, "Error parsing glob: {}", err), } } } impl error::Error for Error { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { Error::IoError(ref err) => Some(err), Error::ParseIntError(ref err) => Some(err), Error::JsonError(ref err) => Some(err), Error::GlobError(ref err) => Some(err), } } } impl From<io::Error> for Error { fn from(err: io::Error) -> Self { Error::IoError(err) } } impl From<serde_json::Error> for Error { fn from(err: serde_json::Error) -> Self { Error::JsonError(err) } } impl From<glob::GlobError> for Error { fn from(err: glob::GlobError) -> Self { Error::GlobError(err) } } impl From<ParseIntError> for Error { fn from(err: ParseIntError) -> Self { Error::ParseIntError(err) } }
using UnityEngine; using UnityEngine.UI; namespace Elka.UI.Controller { public class UIOverlay : MonoBehaviour { private Image overlay; private Button overlayButton; private Canvas mCanvas; private void Awake() { overlay = GetComponent<Image>(); overlayButton = GetComponent<Button>() ?? gameObject.AddComponent<Button>(); mCanvas = GetComponent<Canvas>(); } private void Start() { overlayButton.onClick.RemoveAllListeners(); overlayButton.onClick.AddListener(UIController.CloseCurrentDialog); } private void OnDialogOpened(IUserInterface obj) { overlay.enabled = true; if (obj.EnableEscape) overlayButton.interactable = true; else overlayButton.interactable = false; mCanvas.sortingLayerID = obj.GetCanvas().sortingLayerID; mCanvas.sortingLayerName = obj.GetCanvas().sortingLayerName; mCanvas.sortingOrder = obj.GetCanvas().sortingOrder - 1; } private void OnDialogClosed() { overlay.enabled = false; } private void OnEnable() { UIController.onDialogOpen += OnDialogOpened; UIController.OnScreenClear += OnDialogClosed; } private void OnDisable() { UIController.onDialogOpen -= OnDialogOpened; UIController.OnScreenClear -= OnDialogClosed; } } }
uaa-keystone ============ a place to collaborate on Cloud Foundry UAA and OpenStack Keystone. code in this repo is likely to be quickly merged into the UAA repo and it is likely to be short-lived repository.
# Example 05: Logging Here, we see an example of how to use the AlephZero logger. It's a standalone docker image that is configured to save all messaged on topics matching `from/*` to `/tmp/logs`. See prior examples for how to start and stop the processes.
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Classes and functions pertaining to the loading and batching of data. """ # ============================================================================= # IMPORTS AND DEPENDENCIES # ============================================================================= import numpy as np import torch # ============================================================================= # CLASSES # ============================================================================= class DataLoader: """ DataLoader class. Contains a cache and a queue. Features (X) and labels (y) can be added to the cache either by passing them as inputs in the constructor, or by calling the method add_to_cache(X, y). When data is added to the cache, the points are automatically batched into arrays of length batch_size, removed from the cache and added to the queue. For example, if a dataset containing 10 points is added to the cache with a batch size of 3, 3 batches of length 3 will be created, removed from the cache and added to the queue. In the end, the queue will contain 3 batches of size 3 and the cache will contain a single datapoint. If more (X, y) values are added to the cache by calling add_to_cache(X, y), they will be appended to the cache and the batching/queuing process will repeat, this time with the single datapoint at the front of the cache (first in line to be batched and added to the queue). The class is an iterator, and returns batches from the queue when iterated over until the queue is empty. If more (X, y) values get added to the cache, the class instance may be iterated over again and new batches will be produced. If the shuffle argument is set to True, any data added to the cache will be shuffled prior to batching. """ def __init__(self, X=None, y=None, batch_size=1, shuffle=False, seed=69): """ Initialise the DataLoader. Features (X) and labels (y) may be passed as inputs in the constructor, but this is not necessary. If they are set to their default values of None, the cache and queue will initially be empty. Args: X (np.ndarray or torch.tensor) : features (first dimension = N) y (np.ndarray or torch.tensor) : labels (first dimension = N) batch_size (int) : size of the batches to generate shuffle (bool) : whether or not to shuffle the datapoints before seed (int) : seed for the random number generator """ # Set/initialise attributes self.batch_size = batch_size self.shuffle = shuffle self._cache = [] self._queue = [] # Initialise the random number generator (for shuffling) if shuffle: self._rng = np.random.default_rng(seed) else: self._rng = None # If data has been passed as an argument, add it to cache/queue if X is not None and y is not None: self.add_to_cache(X, y) def __iter__(self): """ Return the iterator object (implicitly called before loops). """ return self def __next__(self): """ Return the next value in the sequence. Implicitly called at each loop increment. Raises a StopIteration exception when there are no more values to return (queue is empty), which is implicitly captured by looping constructs to stop iterating. """ try: X, y = self._queue.pop(0) except IndexError: raise StopIteration return X, y def __str__(self): """ Represent the class instance as a string. """ return f"DataLoader object with batch size {self.batch_size}" def __len__(self): """ Returns the length of the queue. """ return len(self._queue) def add_to_cache(self, X, y): """ Add features (X) and labels (y) to the cache. If shuffle was set to true in the constructor, the datapoints are shuffled before being added to the cache. After points are added to the cache, the _cache_to_queue() method is called automatically, which creates as many batches as possible based on the batch size, removes them from the cache and adds them to the queue. Args: X (np.ndarray or torch.tensor) : features (first dimension = N) y (np.ndarray or torch.tensor) : labels (first dimension = N) """ # Make sure X and y dimensions are compatible assert X.shape[0] == y.shape[0], "First dim. of X & y must be aligned!" # Shuffle the datapoints if shuffle was set to true in the constructor if self.shuffle: shuffler = self._rng.permutation(X.shape[0]) X, y = X[shuffler], y[shuffler] # Append each datapoint to the cache for datapoint in zip(X, y): self._cache.append(datapoint) # Batch the datapoints; clear them from cache; add them to queue self._cache_to_queue() def _cache_to_queue(self): """ Batch cached datapoints, clear them from cache, add them to queue. If the number of points in the cache isn't divisible by the batch size, some points will remain in the cache. These will be the first points to get batched if new (X, y) values are added to the cache. Raises: TypeError : if the features and labels (X, y) are neither of type np.ndarray nor torch.tensor """ # Repeat until there are insufficient points to form a batch while len(self._cache) >= self.batch_size: # Extract a batch from the cache; clear it from the cache batch = self._cache[:self.batch_size] del self._cache[:self.batch_size] # Stack datapoints into batch of correct dimensions if type(batch[0][0]) == np.ndarray: X = np.stack([datapoint[0] for datapoint in batch], axis=0) y = np.stack([datapoint[1] for datapoint in batch], axis=0) elif type(batch[0][0] == torch.Tensor): X = torch.stack([datapoint[0] for datapoint in batch], axis=0) y = torch.stack([datapoint[1] for datapoint in batch], axis=0) else: raise TypeError("Data must be np.ndarray or torch.tensor") # Add batch to the queue self._queue.append((X, y)) # ============================================================================= # MAIN ENTRY POINT # ============================================================================= if __name__ == "__main__": pass
""" Copyright (c) 2022 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. """ """ Case Type : 功能测试-表空间 Case Name : 表空间删除-删除用户与owner存在成员关系 Description : 0.创建系统用户及普通用户A; 1.创建tablespace1,指定普通用户A为所有者; 2.创建普通用户B; 3.修改A为B的直接成员; 4.B用户删除表空间; 5.修改B为A的直接成员; 6.B用户删除表空间; 7.删除不存在的表空间; Expect : 0.创建系统用户及普通用户A; 创建成功 1.创建tablespace1,指定普通用户A为所有者; 创建成功 2.创建普通用户B; 创建成功 3.修改A为B的直接成员; 修改成功 4.B用户删除表空间; 删除失败 5.修改B为A的直接成员; 修改成功 6.B用户删除表空间; 删除成功 7.删除不存在的表空间; 删除成功 History : """ import os import unittest from testcase.utils.CommonSH import CommonSH from testcase.utils.Constant import Constant from testcase.utils.Logger import Logger from yat.test import macro class Tablespace(unittest.TestCase): def setUp(self): self.log = Logger() self.log.info(f'-----{os.path.basename(__file__)} start-----') self.pri_sh = CommonSH('PrimaryDbUser') self.constant = Constant() self.tbspc_name = 'tsp_tbspc0033' self.tbspc_location = 'tbspc0033' self.pwd = macro.PASSWD_INITIAL self.sys_user = 'u_tbspc0033_sys' self.com_user1 = 'u_tbspc0033_com1' self.com_user2 = 'u_tbspc0033_com2' self.connect_sys = f'-U {self.sys_user} -W {self.pwd}' self.connect_com1 = f'-U {self.com_user1} -W {self.pwd}' self.connect_com2 = f'-U {self.com_user2} -W {self.pwd}' self.grant_success = 'GRANT ROLE' self.err_flag1 = 'ERROR: permission denied for tablespace' self.err_flag2 = f'ERROR: Tablespace "{self.tbspc_name}" ' \ f'does not exist.' def test_main(self): step_txt = '----step0:创建系统用户及普通用户A; expect:创建成功----' self.log.info(step_txt) create_sql = f"drop tablespace if exists {self.tbspc_name}; " \ f"drop user if exists {self.com_user1} cascade; " \ f"drop user if exists {self.sys_user} cascade; " \ f"create user {self.com_user1} password '{self.pwd}';" \ f"create user {self.sys_user} sysadmin password '{self.pwd}';" self.log.info(create_sql) create_result = self.pri_sh.execut_db_sql(create_sql) self.log.info(create_result) assert_flag = create_result.splitlines().count( self.constant.CREATE_ROLE_SUCCESS_MSG) self.assertEqual(assert_flag, 2, "执行失败" + step_txt) step_txt = '----step1:创建tablespace1,指定普通用户A为所有者; expect:创建成功----' self.log.info(step_txt) create_sql = f"create tablespace {self.tbspc_name} " \ f"owner {self.com_user1} " \ f"relative location '{self.tbspc_location}';" create_result = self.pri_sh.execut_db_sql(create_sql, sql_type=self.connect_sys) self.log.info(create_result) self.assertIn(self.constant.TABLESPCE_CREATE_SUCCESS, create_result, "执行失败" + step_txt) self.log.info('--查询tablespace所有者--') check_owner = f"select spcowner from pg_tablespace where " \ f"spcname = '{self.tbspc_name}';" owner1 = self.pri_sh.execut_db_sql(check_owner).splitlines()[-2] self.log.info(owner1) select_sql = f"select usesysid from pg_user where " \ f"usename= '{self.com_user1}';" owner2 = self.pri_sh.execut_db_sql(select_sql).splitlines()[-2] self.log.info(owner2) self.assertEqual(owner1, owner2, "执行失败" + step_txt) step_txt = '----step2:创建普通用户B; expect:创建成功----' self.log.info(step_txt) create_sql = f"drop user if exists {self.com_user2} cascade; " \ f"create user {self.com_user2} password '{self.pwd}';" self.log.info(create_sql) create_result = self.pri_sh.execut_db_sql(create_sql) self.log.info(create_result) self.assertIn(self.constant.CREATE_ROLE_SUCCESS_MSG, create_result, "执行失败" + step_txt) step_txt = '----step3:修改A为B的直接成员; expect:修改成功----' self.log.info(step_txt) grant_sql = f"revoke {self.com_user2} from {self.com_user1};" \ f"grant {self.com_user2} to {self.com_user1};\du;" grant_result = self.pri_sh.execut_db_sql(grant_sql, sql_type=self.connect_sys) self.log.info(grant_result) self.assertIn(self.grant_success, grant_result, "执行失败" + step_txt) step_txt = '----step4:B用户删除表空间; expect:删除失败----' self.log.info(step_txt) drop_sql = f"drop tablespace {self.tbspc_name} ;" drop_result = self.pri_sh.execut_db_sql(drop_sql, sql_type=self.connect_com2) self.log.info(drop_result) self.assertIn(self.err_flag1, drop_result, "执行失败" + step_txt) step_txt = '----step5:修改B为A的直接成员; expect:修改成功----' self.log.info(step_txt) grant_sql = f"revoke {self.com_user2} from {self.com_user1};" \ f"grant {self.com_user1} to {self.com_user2};\du;" grant_result = self.pri_sh.execut_db_sql(grant_sql, sql_type=self.connect_sys) self.log.info(grant_result) self.assertIn(self.grant_success, grant_result, "执行失败" + step_txt) step_txt = '----step6:B用户删除表空间; expect:删除成功----' self.log.info(step_txt) drop_sql = f"drop tablespace {self.tbspc_name} ;" drop_result = self.pri_sh.execut_db_sql(drop_sql, sql_type=self.connect_com2) self.log.info(drop_result) self.assertIn(self.constant.TABLESPCE_DROP_SUCCESS, drop_result, "执行失败" + step_txt) step_txt = '----step7:删除不存在的表空间; expect:删除成功----' self.log.info(step_txt) drop_result = self.pri_sh.execut_db_sql(drop_sql, sql_type=self.connect_com1) self.log.info(drop_result) self.assertIn(self.err_flag2, drop_result, "执行失败" + step_txt) def tearDown(self): self.log.info('----this is teardown----') step1_txt = '----清理表空间及用户; expect:成功----' self.log.info(step1_txt) clean_sql = f"drop tablespace if exists {self.tbspc_name}; " \ f"drop user if exists {self.com_user1} cascade; " \ f"drop user if exists {self.com_user2} cascade; " \ f"drop user if exists {self.sys_user} cascade; " clean_result = self.pri_sh.execut_db_sql(clean_sql) self.log.info(clean_result) self.log.info(f'-----{os.path.basename(__file__)} end-----') drop_user = clean_result.count(self.constant.DROP_ROLE_SUCCESS_MSG) drop_tbspc = clean_result.count(self.constant.TABLESPCE_DROP_SUCCESS) self.assertEqual(3, drop_user, "执行失败" + step1_txt) self.assertEqual(1, drop_tbspc, "执行失败" + step1_txt)
extern crate db_service; extern crate message_handler; extern crate schema; extern crate utils; use super::aura_message_sender::AuraMessageSender; use super::aura_messages::{AuraMessageTypes, AuthorBlock, BlockAcceptance, RoundOwner}; use super::config::initialize_config; use db_service::db_fork_ref::SchemaFork; use db_service::db_layer::{fork_db, patch_db, snapshot_db}; use db_service::db_snapshot_ref::SchemaSnap; use exonum_merkledb::{ObjectHash, Snapshot}; use futures::{channel::mpsc::*, executor::*, future, prelude::*, task::*}; use message_handler::messages::MessageTypes; use schema::block::SignedBlock; use schema::transaction_pool::{TxnPool, POOL}; use std::collections::{HashMap, HashSet}; use std::sync::{Arc, Mutex}; use std::thread; use std::time::Duration; use std::time::SystemTime; use utils::configreader::Configuration; use utils::keypair::KeypairType; use utils::serializer::{deserialize, serialize, Deserialize, Serialize}; pub struct Aura { // peer identity keypair: KeypairType, // peer public key pk: String, // validator's details validator_mapping: HashMap<String, u64>, // 10th part of step_time (in millis) leader_epoch: u64, // empty block acceptance force_sealing: bool, // last start time of consensus (in seconds) start_time: u64, } /// WaitingBLocksQueue will store waiting block queue and /// and ongoing author block details /// WaitingBLocksQueue will help to create new block and verify new upcoming blocks. /// WaitingBLocksQueue will be pushed to permanent state after "b" blocks get majority. pub struct WaitingBLocksQueue { // waiting blocks queue pub queue: Vec<SignedBlock>, // ongoing block proposal acceptance pub last_block_acceptance: HashSet<String>, // ongoing block proposal hash pub last_block_hash: String, } impl WaitingBLocksQueue { pub fn new() -> WaitingBLocksQueue { WaitingBLocksQueue { queue: Vec::new(), last_block_acceptance: HashSet::new(), last_block_hash: String::from("temp_hash"), } } } /// AURA consensus key-details pub struct MetaData { // validator's pool size validator_pool_size: u64, // validator's public_key mapping with auther_ordering number validator_mapping: HashMap<String, u64>, // peer keypair kp: KeypairType, // data sender in P2P system sender: Sender<Option<MessageTypes>>, // last start time of consensus (in seconds) start_time: u64, // round number at the time of last restart round_number: u64, // step time of round (in seconds) step_time: u64, // waiting blocks queue size block_queue_size: usize, // peer public key public_key: String, } // AURA consensus custom headers for the signed block #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CustomHeaders { timestamp: u64, round_number: u64, } impl Aura { // init_state will create genesis block if predefined storage is empty // or if storage is not empty it will start from previous state // read genesis block details from config file (future work) fn init_state(&mut self, _db_path: &String, _sender: &mut Sender<Option<MessageTypes>>) { let fork = fork_db(); { let mut schema = SchemaFork::new(&fork); if schema.blockchain_length() == 0 { let custom_headers: CustomHeaders = CustomHeaders { timestamp: self.start_time, round_number: 0, }; let custom_headers: Vec<u8> = match serialize(&custom_headers) { Ok(value) => value, Err(_) => Vec::new(), }; let genesis_signed_block = schema.initialize_db(custom_headers, self.start_time as u128); info!( "genesis block created with hash {:?}", genesis_signed_block.get_hash() ); } else { info!( "started from previous state {} {}", schema.blockchain_length(), schema.state_trie_merkle_hash() ) } } patch_db(fork); } // fn will compute what is the round number at present time fn calculate_round_number(meta_data: &MetaData) -> u64 { let current_epoch: u64 = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .unwrap() .as_secs() - meta_data.start_time; let round_count: u64 = (current_epoch / meta_data.step_time) + meta_data.round_number; round_count } // fn will compute what is the round leader at present time fn primary_leader(meta_data: &MetaData) -> String { let round_count = Aura::calculate_round_number(meta_data); let leader_id: u64 = round_count - (round_count / meta_data.validator_pool_size) * meta_data.validator_pool_size; for (pk, id) in meta_data.validator_mapping.iter() { if id.clone() == leader_id { return pk.clone(); } } panic!("Should match current identity with anyone validator identity"); } // fn will process incoming AuthorBlockEnum data fn handle_author_block_enum( author_block: AuthorBlock, waiting_blocks_queue: &mut WaitingBLocksQueue, meta_data_obj: &mut MetaData, ) { let current_leader: String = Aura::primary_leader(&meta_data_obj); // only rightful auther should propose block if current_leader != author_block.block.block.peer_id.clone() { info!("malicious author proposing block!"); return; } // validate block signature if !author_block.verify() { warn!( "malicious block proposed by author {:?}!", author_block.block.block.peer_id ); return; } // validate block height & previous block hash match waiting_blocks_queue.queue.last() { Some(last_waiting_block) => { let last_waiting_block: &SignedBlock = last_waiting_block; // if last_waiting_block.block.id + 1 != author_block.block.block.id { // warn!( // "malicious block proposed by author {:?}!", // author_block.block.block.peer_id // ); // warn!("malicious block proposed, invalid height compare to waiting block!"); // warn!( // "block should proposed on height {:?}, but got block on height {:?}", // last_waiting_block.block.id + 1, // author_block.block.block.id // ); // return; // } // if last_waiting_block.get_hash() != author_block.block.block.prev_hash { // warn!( // "malicious block proposed by author {:?}!", // author_block.block.block.peer_id // ); // warn!("malicious block proposed, invalid previous block hash compare to waiting block!"); // warn!( // "previous_hash shuold be {:?}, but previous hash is {:?}", // last_waiting_block.get_hash(), // author_block.block.get_hash() // ); // return; // } let custom_header: CustomHeaders = match deserialize(&author_block.block.block.custom_headers) { Ok(value) => value, Err(_) => { warn!("block custom headers couldn't deserialized"); return; } }; let last_custom_header: CustomHeaders = match deserialize(&last_waiting_block.block.custom_headers) { Ok(value) => value, Err(_) => { if last_waiting_block.block.id == 0 { CustomHeaders { timestamp: 0, round_number: 0, } } else { return; } } }; if custom_header.round_number <= last_custom_header.round_number { warn!( "malicious block proposed by author {:?}!", author_block.block.block.peer_id ); warn!( "malicious block proposed, invalid round number compare to waiting block!" ); warn!( "block should proposed higher round number then {:?}, but got block on round number {:?}", last_custom_header.round_number, custom_header.round_number ); return; } if custom_header.timestamp <= last_custom_header.timestamp { warn!( "malicious block proposed by author {:?}!", author_block.block.block.peer_id ); warn!("malicious block proposed, invalid timestamp compare to waiting block!"); warn!( "block should proposed higher timestamp then {:?}, but got block on timestamp {:?}", last_custom_header.timestamp, custom_header.timestamp ); return; } } None => { let snapshot: Box<dyn Snapshot> = snapshot_db(); { let schema = SchemaSnap::new(&snapshot); // if schema.get_blockchain_length() != author_block.block.block.id { // warn!( // "malicious block proposed by author {:?}!", // author_block.block.block.peer_id // ); // warn!("malicious block proposed, invalid height from snapshot!"); // warn!( // "block should proposed on height {:?}, but got block on height {:?}", // schema.get_blockchain_length(), // author_block.block.block.id // ); // return; // } // if schema.get_root_block_hash() != author_block.block.block.prev_hash { // warn!( // "malicious block proposed by author {:?}!", // author_block.block.block.peer_id // ); // warn!("malicious block proposed, invalid previous block hash compare to snapshot!"); // warn!( // "previous_hash shuold be {:?}, but previous hash is {:?}", // schema.get_root_block_hash(), // author_block.block.get_hash() // ); // return; // } let custom_header: CustomHeaders = match deserialize(&author_block.block.block.custom_headers) { Ok(value) => value, Err(_) => { warn!("block custom headers couldn't deserialized"); return; } }; let root_block: SignedBlock = match schema.get_root_block() { Some(block) => block, None => { warn!("previous block couldn't found"); return; } }; let last_custom_header: CustomHeaders = match deserialize(&root_block.block.custom_headers) { Ok(value) => value, Err(_) => { if root_block.block.id == 0 { CustomHeaders { timestamp: 0, round_number: 0, } } else { warn!("block custom headers couldn't deserialized"); return; } } }; if custom_header.round_number <= last_custom_header.round_number { warn!( "malicious block proposed by author {:?}!", author_block.block.block.peer_id ); warn!( "malicious block proposed, invalid round number compare to snapshot!" ); warn!( "block should proposed higher round number then {:?}, but got block on round number {:?}", last_custom_header.round_number, custom_header.round_number ); return; } if custom_header.timestamp <= last_custom_header.timestamp { warn!( "malicious block proposed by author {:?}!", author_block.block.block.peer_id ); warn!("malicious block proposed, invalid timestamp compare to snapshot!"); warn!( "block should proposed higher timestamp then {:?}, but got block on timestamp {:?}", last_custom_header.timestamp, custom_header.timestamp ); return; } } } } // we cannot validate block state // let author_block: AuthorBlock = AuthorBlock::create(signed_block.clone()); // AuraMessageSender::send_author_block_msg(sender, author_block); let block_acceptance: BlockAcceptance = BlockAcceptance::create(&meta_data_obj.kp, author_block.block.get_hash()); AuraMessageSender::send_block_acceptance_msg(&mut meta_data_obj.sender, block_acceptance); info!( "block accepted, created by {:?} with id {:?}, & hash {:?}", author_block.block.block.peer_id, author_block.block.block.id, author_block.block.get_hash().to_hex() ); waiting_blocks_queue.last_block_hash = author_block.block.get_hash().to_hex(); waiting_blocks_queue.last_block_acceptance.clear(); waiting_blocks_queue .last_block_acceptance .insert(meta_data_obj.public_key.clone()); waiting_blocks_queue .last_block_acceptance .insert(author_block.block.block.peer_id.clone()); waiting_blocks_queue.queue.push(author_block.block); } // fn will process incoming BlockAcceptenceEnum data fn handle_block_acceptence_enum( block_acceptance: BlockAcceptance, waiting_blocks_queue: &mut WaitingBLocksQueue, meta_data_obj: &MetaData, ) { // data coming from verifed validator if !meta_data_obj .validator_mapping .contains_key(&block_acceptance.public_key) { warn!( "Data coming from untrusted source {:?}", block_acceptance.public_key ); return; } // block acceptance for the correct_block if waiting_blocks_queue.last_block_hash != block_acceptance.block_hash.to_hex() { warn!("Data coming for different block"); warn!( "current waiting block hash {:?} & data came for {:?}", waiting_blocks_queue.last_block_hash, block_acceptance.block_hash.to_hex() ); return; } // verify data signature using sender's public key if !block_acceptance.verify() { warn!( "malicious aceeptance came from {:?}", block_acceptance.public_key ); return; } info!( "valid block acceptance came from-> {:?}", block_acceptance.public_key ); waiting_blocks_queue .last_block_acceptance .insert(block_acceptance.public_key); } // fn will handle incoming RoundOwnerEnum data fn handle_round_owner_enum( round_owner: RoundOwner, waiting_blocks_queue: &mut WaitingBLocksQueue, meta_data_obj: &MetaData, ) { if waiting_blocks_queue.queue.len() == 0 { info!("no waiting block to check aceeptance"); return; } let current_owner: String = Aura::primary_leader(&meta_data_obj); if current_owner != round_owner.public_key { warn!( "malicious round owner claim created by {:?}", round_owner.public_key ); return; } if round_owner.verify(meta_data_obj.step_time) { if String::from("temp_hash") != waiting_blocks_queue.last_block_hash.clone() { info!( "block accepted by {:?}", waiting_blocks_queue.last_block_acceptance ); let got_votes = waiting_blocks_queue.last_block_acceptance.len() as u64; let minimum_votes: u64 = (meta_data_obj.validator_pool_size * 2) / 3; if minimum_votes <= got_votes { waiting_blocks_queue.last_block_acceptance.clear(); waiting_blocks_queue.last_block_hash = String::from("temp_hash"); // let signed_block: &SignedBlock = waiting_blocks_queue.queue.last().unwrap(); // POOL.sync_pool(&signed_block.block.txn_pool); } else { let length = waiting_blocks_queue.queue.len(); waiting_blocks_queue.last_block_hash = String::from("temp_hash"); warn!( "last block got votes {:?} and required {:?}", got_votes, minimum_votes ); waiting_blocks_queue.queue.remove(length - 1); error!("last block couldn't got majority either delete the block or restart the consensus"); } } else { info!("last block hash is NULL, can't initiate block acceptance process"); } Aura::finalise_waiting_blocks(waiting_blocks_queue, &meta_data_obj) } else { warn!("data is either tempered or delayed/replayed"); } } // fn will update waiting blocks in sequence to local db fn process_blocks(blocks_count: usize, waiting_blocks_queue: &mut WaitingBLocksQueue) { let fork = fork_db(); { let mut schema = SchemaFork::new(&fork); for each in waiting_blocks_queue.queue.iter() { if !schema.update_block(each) { return; } } } let mut blocks_count = blocks_count; let fork = fork_db(); { let mut schema = SchemaFork::new(&fork); while blocks_count > 0 { let signed_block: SignedBlock = waiting_blocks_queue.queue.remove(0); if schema.update_block(&signed_block) { POOL.sync_pool(&signed_block.block.txn_pool); info!( "block with id {} & hash {} added in database", signed_block.block.id, signed_block.object_hash() ); } else { error!( "block with id {} & hash {} couldn't added in database", signed_block.block.id, signed_block.object_hash() ); // // remove all increasing index order blocks // let mut current_block_index: u64 = signed_block.block.id; // loop { // match waiting_blocks_queue.queue.get(0) { // Some(waiting_block) => { // let waiting_block: &SignedBlock = waiting_block; // if waiting_block.block.id > current_block_index { // current_block_index = waiting_block.block.id; // waiting_blocks_queue.queue.remove(0); // } // else { // break; // } // } // None => { // waiting_blocks_queue.last_block_acceptance.clear(); // waiting_blocks_queue.last_block_hash = String::from("temp_hash"); // break; // } // } // } break; } blocks_count = blocks_count - 1; } } if blocks_count == 0 { patch_db(fork); info!("Blocks are updated in the database"); } } // fn will finalise blocks periodically in permanent db fn finalise_waiting_blocks( waiting_blocks_queue: &mut WaitingBLocksQueue, meta_data_obj: &MetaData, ) { let queue_length: usize = waiting_blocks_queue.queue.len(); if queue_length > meta_data_obj.block_queue_size + 1 { info!("queue length {:?}", queue_length); let blocks_to_be_confirmed: usize = queue_length / 3 * 2; Aura::process_blocks(blocks_to_be_confirmed, waiting_blocks_queue); info!( "after processing queue length {:?}", waiting_blocks_queue.queue.len() ); } } // fn will listen incoming data from other peers via P2P system fn aura_msg_receiver( waiting_blocks_queue: Arc<Mutex<WaitingBLocksQueue>>, meta_data: Arc<Mutex<MetaData>>, rx: Arc<Mutex<Receiver<Option<Vec<u8>>>>>, ) { thread::spawn(move || { block_on(future::poll_fn(move |cx: &mut Context| { loop { match rx.lock().unwrap().poll_next_unpin(cx) { Poll::Ready(Some(msg)) => match msg { None => info!("Empty msg received !"), Some(msgtype) => { if let Ok(msgtype) = deserialize::<AuraMessageTypes>(msgtype.as_slice()) { match msgtype { AuraMessageTypes::AuthorBlockEnum(data) => { let author_block: AuthorBlock = data; info!("AuthorBlock data received"); let mut waiting_blocks_queue_obj = waiting_blocks_queue.lock().unwrap(); let mut meta_data_obj = meta_data.lock().unwrap(); Aura::handle_author_block_enum( author_block, &mut waiting_blocks_queue_obj, &mut meta_data_obj, ); } AuraMessageTypes::BlockAcceptanceEnum(data) => { let block_acceptance: BlockAcceptance = data; info!("BlockAcceptance data received"); let mut waiting_blocks_queue_obj = waiting_blocks_queue.lock().unwrap(); let meta_data_obj = meta_data.lock().unwrap(); Aura::handle_block_acceptence_enum( block_acceptance, &mut waiting_blocks_queue_obj, &meta_data_obj, ); } AuraMessageTypes::RoundOwnerEnum(data) => { let round_config: RoundOwner = data; info!("RoundOwner data received"); let mut waiting_blocks_queue_obj = waiting_blocks_queue.lock().unwrap(); let meta_data_obj = meta_data.lock().unwrap(); Aura::handle_round_owner_enum( round_config, &mut waiting_blocks_queue_obj, &meta_data_obj, ); } } } } }, Poll::Ready(None) => { info!("channel closed !"); return Poll::Ready(1); } Poll::Pending => break, } } Poll::Pending })); }); } // fn will create new block to propose after processing waiting blocks fn propose_block( &self, waiting_blocks_queue: &mut WaitingBLocksQueue, meta_data: &MetaData, ) -> SignedBlock { loop { let fork = fork_db(); let mut schema = SchemaFork::new(&fork); let mut update_success_flag: bool = true; for each_block in waiting_blocks_queue.queue.iter() { debug!("blocks order {:?}", each_block.block.id); if !schema.update_block(each_block) { update_success_flag = false; break; } } if update_success_flag { let timestamp: u64 = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .unwrap() .as_secs(); let round_number: u64 = Aura::calculate_round_number(meta_data); let custom_headers: CustomHeaders = CustomHeaders { timestamp, round_number, }; let custom_headers: Vec<u8> = match serialize(&custom_headers) { Ok(value) => value, Err(_) => Vec::new(), }; if self.force_sealing { return schema.create_block(&self.keypair, custom_headers); } else { let (_fork_instance, signed_block) = schema.forge_new_block(&self.keypair, custom_headers); return signed_block; } } else { // init sync_state_flag with false #[allow(unused_assignments)] let mut sync_state_flag: bool = false; let fork_to_sync = fork_db(); { let mut sync_schema = SchemaFork::new(&fork_to_sync); sync_state_flag = sync_schema.sync_state(); } patch_db(fork_to_sync); // flush the waiting blocks queue if sync_state_flag { if waiting_blocks_queue.queue.len() > 0 { waiting_blocks_queue.queue.remove(0); } else { waiting_blocks_queue.last_block_acceptance.clear(); waiting_blocks_queue.last_block_hash = String::from("temp_hash"); } } } } } // fn will create new blocks periodically after checking round ownership fn state_machine( &mut self, waiting_blocks_queue: Arc<Mutex<WaitingBLocksQueue>>, meta_data: Arc<Mutex<MetaData>>, sender: &mut Sender<Option<MessageTypes>>, ) { let mut wait_till_one_round: u64 = (self.validator_mapping.len() * 10) as u64; wait_till_one_round = wait_till_one_round * self.leader_epoch; thread::sleep(Duration::from_millis(wait_till_one_round)); let fork = fork_db(); { let mut schema = SchemaFork::new(&fork); schema.sync_state(); } patch_db(fork); #[allow(unused_assignments)] let mut leader_flag = false; loop { /* calculate round number and find out who is the leader need to check continuously if node is the leader, propose block on top of waiting block queue */ { let mut am_i_leader: bool = false; { let meta_data_obj = meta_data.lock().unwrap(); let current_leader: String = Aura::primary_leader(&meta_data_obj); if current_leader == self.pk.clone() { am_i_leader = true; } } if am_i_leader { info!("I'm the leader NOW!!"); let round_owner: RoundOwner = RoundOwner::create(&self.keypair); AuraMessageSender::send_round_owner_msg(sender, round_owner.clone()); thread::sleep(Duration::from_millis(self.leader_epoch)); { let meta_data_obj = meta_data.lock().unwrap(); let mut waiting_blocks_queue_obj = waiting_blocks_queue.lock().unwrap(); Aura::handle_round_owner_enum( round_owner, &mut waiting_blocks_queue_obj, &meta_data_obj, ); let signed_block: SignedBlock = self.propose_block(&mut waiting_blocks_queue_obj, &meta_data_obj); info!( "new block created.. id {},hash {}", signed_block.block.id, signed_block.object_hash() ); let author_block: AuthorBlock = AuthorBlock::create(signed_block.clone()); AuraMessageSender::send_author_block_msg(sender, author_block); waiting_blocks_queue_obj.last_block_hash = signed_block.get_hash().to_hex(); waiting_blocks_queue_obj.queue.push(signed_block); waiting_blocks_queue_obj.last_block_acceptance.clear(); waiting_blocks_queue_obj .last_block_acceptance .insert(meta_data_obj.public_key.clone()); } leader_flag = true; } } if leader_flag { leader_flag = false; thread::sleep(Duration::from_millis(self.leader_epoch * 10)); } else { thread::sleep(Duration::from_millis(self.leader_epoch)); } } } pub fn init_aura_consensus( config: &Configuration, consensus_file_path: &str, sender: &mut Sender<Option<MessageTypes>>, msg_receiver: Arc<Mutex<Receiver<Option<Vec<u8>>>>>, ) { initialize_config(consensus_file_path); let aura_config: &crate::config::Configuration = &crate::config::AURA_CONFIG; let mut validator_mapping: HashMap<String, u64> = HashMap::new(); for i in 0..aura_config.validator_set.len() { validator_mapping.insert( aura_config.validator_set[i].clone(), aura_config.validator_ids[i].clone(), ); } let mut aura_obj = Aura { keypair: config.node.keypair.clone(), pk: hex::encode(config.node.keypair.public().encode()), validator_mapping: validator_mapping.clone(), leader_epoch: 100 * aura_config.step_time, force_sealing: aura_config.force_sealing, start_time: aura_config.start_time, }; let consensus_meta_data = MetaData { validator_pool_size: aura_config.validator_set.len() as u64, validator_mapping, kp: config.node.keypair.clone(), public_key: hex::encode(config.node.keypair.public().encode()), sender: sender.clone(), start_time: aura_config.start_time, round_number: aura_config.round_number, step_time: aura_config.step_time, block_queue_size: aura_config.block_list_size, }; let meta_data = Arc::new(Mutex::new(consensus_meta_data)); let waiting_blocks_queue: Arc<Mutex<WaitingBLocksQueue>> = Arc::new(Mutex::new(WaitingBLocksQueue::new())); Aura::aura_msg_receiver( waiting_blocks_queue.clone(), meta_data.clone(), msg_receiver, ); if config.node.genesis_block { aura_obj.init_state(&config.db.dbpath, &mut sender.clone()); } else { let fork = fork_db(); { let mut schema = SchemaFork::new(&fork); schema.sync_state(); } patch_db(fork); } aura_obj.state_machine(waiting_blocks_queue.clone(), meta_data, sender); } }
CREATE TABLE users ( id bigint not null AUTO_INCREMENT, name varchar(255), primary key (id) ); CREATE TABLE addresses ( id bigint not null AUTO_INCREMENT, user_id bigint null, primary key (id), FOREIGN KEY (user_id) REFERENCES users(id) );
#!/usr/bin/env python3.8 import argparse import csv import dataclasses import datetime as dt import lzma import os import requests import sqlitedict import threading import time as timelib from concurrent.futures import ThreadPoolExecutor @dataclasses.dataclass class InventoryStation: name: str province: str climateId: str stationId: int wmoId: int tcId: str latitudeDecimalDegrees: float longitudeDecimalDegrees: float latitude: int longitude: int elevation: float firstYear: int lastYear: int hlyFirstYear: int hlyLastYear: int dlyFirstYear: int dlyLastYear: int mlyFirstYear: int mlyLastYear: int def dailyYearsIter(self): if self.dlyFirstYear is not None: yield from range(self.dlyFirstYear, self.dlyLastYear+1) pool = ThreadPoolExecutor(max_workers=8) futures = [] class LocalSession(threading.local): def __init__(self): super().__init__() self.session = requests.Session() threadLocal = LocalSession() stationRefresh = sqlitedict.SqliteDict('StationRefresh.db', autocommit=True) def getOneFile(url, dirname, localPath): print(url) os.makedirs(dirname, exist_ok=True) response = threadLocal.session.get(url, timeout=10) f = lzma.open(localPath, 'wb') f.write(response.content) f.close() stationRefresh[localPath] = timelib.time() # print('done') def calcRefresh(year, lastRefresh): today = dt.date.today() threeDaysAgo = today - dt.timedelta(days=3) if year == today.year or year == threeDaysAgo.year: if timelib.time() - lastRefresh > 3600: # It's the last 3 days, refresh once per hour return True elif year == today.year - 1: if timelib.time() - lastRefresh > 3600*24*30: # It's last year, refresh once per month return True elif timelib.time() - lastRefresh > 3600*24*365: # Refresh at least once per year return True return False def readCsvData(args): csvData = ( open(args.station_inventory) .read() .split('\n') ) while not csvData[0].startswith('"Name"'): csvData.pop(0) return csv.reader(csvData) def getStation(tokens): fields = dataclasses.fields(InventoryStation) for i, field in enumerate(fields): if len(tokens[i]) == 0: tokens[i] = None else: tokens[i] = field.type(tokens[i]) station = InventoryStation(*tokens) return station def update(args): for rowIndex, tokens in enumerate(readCsvData(args)): if rowIndex == 0: expectedHeader = [ "Name", "Province", "Climate ID", "Station ID", "WMO ID", "TC ID", "Latitude (Decimal Degrees)", "Longitude (Decimal Degrees)", "Latitude", "Longitude", "Elevation (m)", "First Year", "Last Year", "HLY First Year", "HLY Last Year", "DLY First Year", "DLY Last Year", "MLY First Year", "MLY Last Year" ] assert tokens == expectedHeader continue if len(tokens) == 0: continue station = getStation(tokens) dirname = f'stations/{station.stationId//1000}/{station.stationId}' # print(f'{station.name.title()}: {dirname}: {station.dlyFirstYear}-{station.dlyLastYear}') for year in station.dailyYearsIter(): fname = f'{dirname}/{year}.csv.xz' if args.force is False: lastRefresh = stationRefresh.get(fname, 0) if calcRefresh(year, lastRefresh) is False: continue url = ( f'https://climate.weather.gc.ca/climate_data/bulk_data_e.html' f'?format=csv&stationID={station.stationId}&Year={year}' f'&Month=1&Day=1&timeframe=2' ) futures.append(pool.submit(getOneFile, url, dirname, fname)) while len(futures): futures.pop(0).result() def main(): parser = argparse.ArgumentParser( description='Download weather history from Environment Canada.') parser.add_argument('--force', action='store_true', help='Redownload all data, regardless of age.') parser.add_argument('--station-inventory', default='Station Inventory EN.csv', help='Where to read station data from.') args = parser.parse_args() update(args) if __name__=='__main__': main()
import React from 'react'; import { StyleSheet, View, ScrollView } from 'react-native'; import { Header } from '../../components/header'; import LoginScreen2 from './screen2'; import LoginScreen3 from './screen3'; type LoginComponentProps = {}; const Login: React.FunctionComponent<LoginComponentProps> = () => { return ( <> <Header title="Login Example" /> <View style={styles.container}> <ScrollView> <LoginScreen2 /> <LoginScreen3 /> </ScrollView> </View> </> ); }; const styles = StyleSheet.create({ container: { flex: 1, backgroundColor: 'black', position: 'relative', }, }); export default Login;
use super::file_location::*; /// /// Indicates an error with parsing a SAFAS file /// #[derive(Debug, Clone, PartialEq)] pub enum ParseError { /// Found an unimplemented feature Unimplemented, /// Suffered an interior error InternalError(FileLocation, String), /// A value is not value as a character InvalidCharacter(FileLocation, String), /// Invalid character in a bit number NotABitNumber(FileLocation, String), /// The bit count/size of a number is not set to a valid value InvalidBitCount(FileLocation, String), /// Invalid character in a hex number NotAHexNumber(FileLocation, String), /// Invalid character in an integer number NotAnIntegerNumber(FileLocation, String), /// A close parenthesis was found when one was not expected UnexpectedCloseParen(FileLocation), /// An expected close parenthesis could not be found MissingCloseParen(FileLocation) }
package com.pawanjeswani.superrvadapter import android.view.View internal interface BinderAbstract<RH, BH> { /** * For creating view holder for views between elements from * @param view * And return the view holder */ fun onCreateViewHolderBetweenElements(view: View): BH /** * For creating view holder for real view from * @param view * And return the view holder */ fun onCreateRealViewHolder(view: View): RH /** * for creating other item with DummyObject */ fun createOtherItemList() }
require_relative 'response' module Router BASE_ROUTES_FOLDER = './routes' DEFAULT_INDEX_ACTION = :index module_function def dispatch(request) route = lookup(request) response = route.send(action(request)) Response.build(response) rescue NotFound, NoMethodError Response.not_found end # TODO: Hide this method from outside the module def lookup(request) primary_path = request.path_segments.first target_class = primary_path.capitalize file_path = File.join(BASE_ROUTES_FOLDER, "#{primary_path}.rb") raise NotFound.new(request) unless File.exists?(file_path) require file_path Object.const_get(target_class).new(request) rescue LoadError raise NotFound.new(request) unless File.exists?(file_path) end def action(request) request.path_segments[1] || DEFAULT_INDEX_ACTION end class NotFound < StandardError; end end
; RUN: opt %s -scalarizer -scalarize-load-store -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" ; Function Attrs: nounwind uwtable define void @f1(<4 x i32>* nocapture %a, <4 x i32>* nocapture readonly %b, <4 x i32>* nocapture readonly %c) #0 { ; CHECK: @f1( ; CHECK: %a.i0 = bitcast <4 x i32>* %a to i32* ; CHECK: %a.i1 = getelementptr i32* %a.i0, i32 1 ; CHECK: %a.i2 = getelementptr i32* %a.i0, i32 2 ; CHECK: %a.i3 = getelementptr i32* %a.i0, i32 3 ; CHECK: %c.i0 = bitcast <4 x i32>* %c to i32* ; CHECK: %c.i1 = getelementptr i32* %c.i0, i32 1 ; CHECK: %c.i2 = getelementptr i32* %c.i0, i32 2 ; CHECK: %c.i3 = getelementptr i32* %c.i0, i32 3 ; CHECK: %b.i0 = bitcast <4 x i32>* %b to i32* ; CHECK: %b.i1 = getelementptr i32* %b.i0, i32 1 ; CHECK: %b.i2 = getelementptr i32* %b.i0, i32 2 ; CHECK: %b.i3 = getelementptr i32* %b.i0, i32 3 ; CHECK: tail call void @llvm.dbg.value(metadata <4 x i32>* %a, i64 0, metadata !{{[0-9]+}}, metadata {{.*}}), !dbg !{{[0-9]+}} ; CHECK: tail call void @llvm.dbg.value(metadata <4 x i32>* %b, i64 0, metadata !{{[0-9]+}}, metadata {{.*}}), !dbg !{{[0-9]+}} ; CHECK: tail call void @llvm.dbg.value(metadata <4 x i32>* %c, i64 0, metadata !{{[0-9]+}}, metadata {{.*}}), !dbg !{{[0-9]+}} ; CHECK: %bval.i0 = load i32* %b.i0, align 16, !dbg ![[TAG1:[0-9]+]], !tbaa ![[TAG2:[0-9]+]] ; CHECK: %bval.i1 = load i32* %b.i1, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]] ; CHECK: %bval.i2 = load i32* %b.i2, align 8, !dbg ![[TAG1]], !tbaa ![[TAG2]] ; CHECK: %bval.i3 = load i32* %b.i3, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]] ; CHECK: %cval.i0 = load i32* %c.i0, align 16, !dbg ![[TAG1]], !tbaa ![[TAG2]] ; CHECK: %cval.i1 = load i32* %c.i1, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]] ; CHECK: %cval.i2 = load i32* %c.i2, align 8, !dbg ![[TAG1]], !tbaa ![[TAG2]] ; CHECK: %cval.i3 = load i32* %c.i3, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]] ; CHECK: %add.i0 = add i32 %bval.i0, %cval.i0, !dbg ![[TAG1]] ; CHECK: %add.i1 = add i32 %bval.i1, %cval.i1, !dbg ![[TAG1]] ; CHECK: %add.i2 = add i32 %bval.i2, %cval.i2, !dbg ![[TAG1]] ; CHECK: %add.i3 = add i32 %bval.i3, %cval.i3, !dbg ![[TAG1]] ; CHECK: store i32 %add.i0, i32* %a.i0, align 16, !dbg ![[TAG1]], !tbaa ![[TAG2]] ; CHECK: store i32 %add.i1, i32* %a.i1, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]] ; CHECK: store i32 %add.i2, i32* %a.i2, align 8, !dbg ![[TAG1]], !tbaa ![[TAG2]] ; CHECK: store i32 %add.i3, i32* %a.i3, align 4, !dbg ![[TAG1]], !tbaa ![[TAG2]] ; CHECK: ret void entry: tail call void @llvm.dbg.value(metadata <4 x i32>* %a, i64 0, metadata !15, metadata !{}), !dbg !20 tail call void @llvm.dbg.value(metadata <4 x i32>* %b, i64 0, metadata !16, metadata !{}), !dbg !20 tail call void @llvm.dbg.value(metadata <4 x i32>* %c, i64 0, metadata !17, metadata !{}), !dbg !20 %bval = load <4 x i32>* %b, align 16, !dbg !21, !tbaa !22 %cval = load <4 x i32>* %c, align 16, !dbg !21, !tbaa !22 %add = add <4 x i32> %bval, %cval, !dbg !21 store <4 x i32> %add, <4 x i32>* %a, align 16, !dbg !21, !tbaa !22 ret void, !dbg !25 } ; Function Attrs: nounwind readnone declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1 attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind readnone } !llvm.dbg.cu = !{!0} !llvm.module.flags = !{!18, !26} !llvm.ident = !{!19} !0 = !{!"0x11\0012\00clang version 3.4 (trunk 194134) (llvm/trunk 194126)\001\00\000\00\000", !1, !2, !2, !3, !2, !2} ; [ DW_TAG_compile_unit ] [/home/richards/llvm/build//tmp/add.c] [DW_LANG_C99] !1 = !{!"/tmp/add.c", !"/home/richards/llvm/build"} !2 = !{i32 0} !3 = !{!4} !4 = !{!"0x2e\00f1\00f1\00\003\000\001\000\006\00256\001\004", !1, !5, !6, null, void (<4 x i32>*, <4 x i32>*, <4 x i32>*)* @f1, null, null, !14} ; [ DW_TAG_subprogram ] [line 3] [def] [scope 4] [f] !5 = !{!"0x29", !1} ; [ DW_TAG_file_type ] [/home/richards/llvm/build//tmp/add.c] !6 = !{!"0x15\00\000\000\000\000\000\000", i32 0, null, null, !7, null, null, null} ; [ DW_TAG_subroutine_type ] [line 0, size 0, align 0, offset 0] [from ] !7 = !{null, !8, !8, !8} !8 = !{!"0xf\00\000\0064\0064\000\000", null, null, !9} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from V4SI] !9 = !{!"0x16\00V4SI\001\000\000\000\000", !1, null, !10} ; [ DW_TAG_typedef ] [V4SI] [line 1, size 0, align 0, offset 0] [from ] !10 = !{!"0x1\00\000\00128\00128\000\002048", null, null, !11, !12, i32 0, null, null, null} ; [ DW_TAG_array_type ] [line 0, size 128, align 128, offset 0] [vector] [from int] !11 = !{!"0x24\00int\000\0032\0032\000\000\005", null, null} ; [ DW_TAG_base_type ] [int] [line 0, size 32, align 32, offset 0, enc DW_ATE_signed] !12 = !{!13} !13 = !{!"0x21\000\004"} ; [ DW_TAG_subrange_type ] [0, 3] !14 = !{!15, !16, !17} !15 = !{!"0x101\00a\0016777219\000", !4, !5, !8} ; [ DW_TAG_arg_variable ] [a] [line 3] !16 = !{!"0x101\00b\0033554435\000", !4, !5, !8} ; [ DW_TAG_arg_variable ] [b] [line 3] !17 = !{!"0x101\00c\0050331651\000", !4, !5, !8} ; [ DW_TAG_arg_variable ] [c] [line 3] !18 = !{i32 2, !"Dwarf Version", i32 4} !19 = !{!"clang version 3.4 (trunk 194134) (llvm/trunk 194126)"} !20 = !MDLocation(line: 3, scope: !4) !21 = !MDLocation(line: 5, scope: !4) !22 = !{!23, !23, i64 0} !23 = !{!"omnipotent char", !24, i64 0} !24 = !{!"Simple C/C++ TBAA"} !25 = !MDLocation(line: 6, scope: !4) !26 = !{i32 1, !"Debug Info Version", i32 2}
(in-package :graph) (defun -sort-edge (e) (sort e #'<)) (defun cycle->edge-set (cycle) (declare (list cycle)) (loop for a in cycle and b in (cdr cycle) collect (-sort-edge (list a b)))) (defun edge-set->graph (es) (declare (list es)) (loop with grph = (make) for (a b) in es do (add grph a b) finally (return grph))) (defun -do-cycle-walk (grph visited cycle) (declare #.*opt-settings* (graph grph) (hash-table visited) (vector cycle)) (let ((edges (get-incident-edges grph (vector-last cycle)))) (when (not (= (length (the list edges)) 2)) (return-from -do-cycle-walk nil)) (loop named lp for v of-type pos-int being the hash-keys of (hset:make :init (alexandria:flatten edges)) if (not (hset:mem visited v)) do (vextend v cycle) (hset:add visited v) (return-from lp t)))) (defun -hash-table-first (ht) (declare (hash-table ht)) (loop for k of-type pos-int being the hash-keys of ht repeat 1 return k)) (defun edge-set->cycle (es &aux (grph (edge-set->graph es))) (declare (list es) (graph grph)) (with-struct (graph- adj) grph (loop with s = (-hash-table-first adj) with cycle = (make-adjustable-vector :init (list s) :type 'pos-int) with visited = (hset:make :init (list s)) with n of-type pos-int = (hash-table-count adj) until (= (length cycle) n) if (not (-do-cycle-walk grph visited cycle)) do (return-from edge-set->cycle nil) finally (return (if (not (= (hash-table-count visited) (length cycle))) ; TODO: don't print this (progn (print "edge-set->cycle warning: disjoint cycle.") nil) (math:close-path (to-list cycle))))))) (defun edge-set-symdiff (esa esb) (declare (list esa esb)) (remove-if (lambda (e) (and (member e esa :test #'equal) (member e esb :test #'equal))) (union esa esb :test #'equal))) (defun cycle-basis->edge-sets (basis) (declare (list basis)) (loop for c of-type list in basis collect (cycle->edge-set c))) (defun edge-sets->cycle-basis (es) (declare (list es)) (loop for e of-type list in es collect (edge-set->cycle e))) (defun -edge-set-weight (es weightfx) (declare (list es) (function weightfx)) (loop for e of-type list in es sumMing (apply weightfx e))) (defun -sort-edge-sets (edge-sets weightfx) (declare (list edge-sets) (function weightfx)) (mapcar #'second (sort (loop for es of-type list in edge-sets collect (list (-edge-set-weight es weightfx) es)) #'> :key #'first)))
class Problem def self.solution(arr, key) binary_search_rotated(arr, key) end def self.binary_search_modified_rec(arr, from, to, key) # assuming all the keys are unique. if (from > to) return -1 end mid = from + ((to - from) / 2).floor if (arr[mid] == key) return mid end if (arr[from] < arr[mid] && key < arr[mid] && key >= arr[from]) return binary_search_modified_rec(arr, from, mid - 1, key) elsif (arr[mid] < arr[to] && key > arr[mid] && key <= arr[to]) return binary_search_modified_rec(arr, mid + 1, to, key) elsif (arr[from] > arr[mid]) return binary_search_modified_rec(arr, from, mid - 1, key) elsif (arr[to] < arr[mid]) return binary_search_modified_rec(arr, mid + 1, to, key) end return -1 end def self.binary_search_rotated(arr, key) return binary_search_modified_rec(arr, 0, arr.length - 1, key) end end require "./3.test"
#encoding: utf-8 require "model-base" class WeixinerInfo include DataMapper::Resource include Utils::DataMapper::Model extend Utils::DataMapper::Model include Utils::ActionLogger property :id , Serial property :subscribe, Boolean property :openid , String property :nickname , String # 用户的性别,值为 1 时是男性,值为 2 时是女性,值为 0 时是未知 property :sex , Integer property :language , String property :city , String property :province , String property :country , String property :headimgurl , Text property :subscribe_time, Integer property :unionid , String property :remark , Text belongs_to :weixiner # instance methods def human_name "微信用户信息" end end
<?php namespace FondOfSpryker\Zed\PriceProductPriceList\Dependency\Facade; use Generated\Shared\Transfer\PriceProductTransfer; use Spryker\Zed\PriceProduct\Business\PriceProductFacadeInterface; class PriceProductPriceListToPriceProductFacadeBridge implements PriceProductPriceListToPriceProductFacadeInterface { /** * @var \Spryker\Zed\PriceProduct\Business\PriceProductFacadeInterface */ protected $priceProductFacade; /** * @param \Spryker\Zed\PriceProduct\Business\PriceProductFacadeInterface $priceProductFacade */ public function __construct(PriceProductFacadeInterface $priceProductFacade) { $this->priceProductFacade = $priceProductFacade; } /** * @param \Generated\Shared\Transfer\PriceProductTransfer $priceProductTransfer * * @return \Generated\Shared\Transfer\PriceProductTransfer */ public function persistPriceProductStore(PriceProductTransfer $priceProductTransfer): PriceProductTransfer { return $this->priceProductFacade->persistPriceProductStore($priceProductTransfer); } }
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests BaseNotificationPipeline.""" import mock import tempfile import unittest from datetime import datetime import MySQLdb from google.cloud.security.notifier.pipelines import base_notification_pipeline as bnp from tests.unittest_utils import ForsetiTestCase class FakePipeline(bnp.BaseNotificationPipeline): def run(): pass class BaseNotificationPipelineTest(ForsetiTestCase): """Tests for base_notification_pipeline.""" @mock.patch( 'google.cloud.security.common.data_access._db_connector.DbConnector', autospec=True) def setUp(self, mock_conn): """Setup.""" fake_global_conf = { 'db_host': 'x', 'db_name': 'y', 'db_user': 'z', } fake_pipeline_conf = { 'gcs_path': 'gs://blah' } self.fake_pipeline = FakePipeline( 'abc', '123', None, fake_global_conf, {}, fake_pipeline_conf) @mock.patch( 'google.cloud.security.common.data_access.violation_dao.ViolationDao', autospec=True) def test_get_violation_dao(self, mock_violation_dao): """Test _get_violation_dao().""" self.fake_pipeline._get_violation_dao() mock_violation_dao.assert_called_once_with(self.fake_pipeline.global_configs) @mock.patch.object(bnp.BaseNotificationPipeline, '_get_violation_dao') def test_get_violations(self, mock_violation_dao): """Test _get_violations().""" fake_timestamp = '1111' got_violations = ['a', 'b', 'c'] got_bucket_acl_violations = ['x', 'y', 'z'] mock_get_all_violations = mock.MagicMock( side_effect=[got_violations, got_bucket_acl_violations]) mock_violation_dao.return_value.get_all_violations = mock_get_all_violations expected = { 'violations': got_violations, 'bucket_acl_violations': got_bucket_acl_violations } actual = self.fake_pipeline._get_violations(fake_timestamp) self.assertEquals(expected, actual) if __name__ == '__main__': unittest.main()
#pragma once #include "../FilaInt.h" /** * Questão 05 * Função info * Retorna por referência o maior, * o menor e a média */ void info(Fila *fila, int *maior, int *menor, float *media);
--============================================================================== -- GPI - Gunther Pippèrr -- Desc: get the rights on a DB role -- Date: November 2013 --============================================================================== set verify off set linesize 130 pagesize 300 define ROLENAME = '&1' prompt prompt Parameter 1 = Role Name => &&ROLENAME. prompt column role format a32 column grantee format a20 column GRANTOR format a20 column PRIVILEGE format a20 column cnt format 9999 column TABLE_NAME format a20 ttitle left "Role Info" skip 2 select lpad(' ', 2 * level) || granted_role "Role, his roles and privileges" from ( /* THE USERS */ select null grantee ,role granted_role from dba_roles where upper(role) like upper('&&ROLENAME.') /* THE ROLES TO ROLES RELATIONS */ union select grantee ,granted_role from dba_role_privs /* THE ROLES TO PRIVILEGE RELATIONS */ union select grantee ,privilege from dba_sys_privs) start with grantee is null connect by grantee = prior granted_role / ttitle left "Object rights on this Role &&ROLENAME." skip 2 select GRANTOR ,grantee ,PRIVILEGE ,table_name ,count(*) as cnt from DBA_TAB_PRIVS where grantee like upper('&&ROLENAME.') group by owner ,grantee ,GRANTOR ,PRIVILEGE ,table_name order by owner,table_name, PRIVILEGE / ttitle off
class Organization::Public::Piece::CategorizedDocsController < Organization::Public::PieceController def pre_dispatch @piece = Organization::Piece::CategorizedDoc.find(Page.current_piece.id) @item = Page.current_item render plain: '' unless @item.is_a?(Organization::Group) end def index sys_group_ids = case @piece.page_filter when 'self' [@item.sys_group.id] else @item.public_descendants.map { |g| g.sys_group.id } end @docs = @piece.content.docs .organized_into(sys_group_ids) .categorized_into(@piece.category_ids) .order(@piece.docs_order_as_hash) .limit(@piece.list_count) @docs = GpArticle::DocsPreloader.new(@docs).preload(:public_node_ancestors) end end
# This is an automatically generated file. # DO NOT EDIT or your changes may be overwritten import base64 from enum import IntEnum from xdrlib import Packer, Unpacker from ..__version__ import __issues__ from ..exceptions import ValueError __all__ = ["TransactionResultCode"] class TransactionResultCode(IntEnum): """ XDR Source Code ---------------------------------------------------------------- enum TransactionResultCode { txFEE_BUMP_INNER_SUCCESS = 1, // fee bump inner transaction succeeded txSUCCESS = 0, // all operations succeeded txFAILED = -1, // one of the operations failed (none were applied) txTOO_EARLY = -2, // ledger closeTime before minTime txTOO_LATE = -3, // ledger closeTime after maxTime txMISSING_OPERATION = -4, // no operation was specified txBAD_SEQ = -5, // sequence number does not match source account txBAD_AUTH = -6, // too few valid signatures / wrong network txINSUFFICIENT_BALANCE = -7, // fee would bring account below reserve txNO_ACCOUNT = -8, // source account not found txINSUFFICIENT_FEE = -9, // fee is too small txBAD_AUTH_EXTRA = -10, // unused signatures attached to transaction txINTERNAL_ERROR = -11, // an unknown error occurred txNOT_SUPPORTED = -12, // transaction type not supported txFEE_BUMP_INNER_FAILED = -13, // fee bump inner transaction failed txBAD_SPONSORSHIP = -14 // sponsorship not confirmed }; ---------------------------------------------------------------- """ txFEE_BUMP_INNER_SUCCESS = 1 txSUCCESS = 0 txFAILED = -1 txTOO_EARLY = -2 txTOO_LATE = -3 txMISSING_OPERATION = -4 txBAD_SEQ = -5 txBAD_AUTH = -6 txINSUFFICIENT_BALANCE = -7 txNO_ACCOUNT = -8 txINSUFFICIENT_FEE = -9 txBAD_AUTH_EXTRA = -10 txINTERNAL_ERROR = -11 txNOT_SUPPORTED = -12 txFEE_BUMP_INNER_FAILED = -13 txBAD_SPONSORSHIP = -14 def pack(self, packer: Packer) -> None: packer.pack_int(self.value) @classmethod def unpack(cls, unpacker: Unpacker) -> "TransactionResultCode": value = unpacker.unpack_int() return cls(value) def to_xdr_bytes(self) -> bytes: packer = Packer() self.pack(packer) return packer.get_buffer() @classmethod def from_xdr_bytes(cls, xdr: bytes) -> "TransactionResultCode": unpacker = Unpacker(xdr) return cls.unpack(unpacker) def to_xdr(self) -> str: xdr_bytes = self.to_xdr_bytes() return base64.b64encode(xdr_bytes).decode() @classmethod def from_xdr(cls, xdr: str) -> "TransactionResultCode": xdr_bytes = base64.b64decode(xdr.encode()) return cls.from_xdr_bytes(xdr_bytes) @classmethod def _missing_(cls, value): raise ValueError( f"{value} is not a valid {cls.__name__}, please upgrade the SDK or submit an issue here: {__issues__}." )
package org.jetbrains.ngramgenerator.io import com.fasterxml.jackson.databind.ObjectMapper import java.io.File object FileWriter { fun write(file: File, dirPath: String, targetDirPath: String, content: Any) { val relativePath = file.relativeTo(File(dirPath)) val outputPath = File("$targetDirPath/$relativePath") File("$targetDirPath/${relativePath.parent ?: ""}").mkdirs() outputPath.writeText(ObjectMapper().writeValueAsString(content)) } fun write(filename: String, dirPath: String, targetDirPath: String, content: Any) { this.write(File(filename), dirPath, targetDirPath, content) } fun write(filename: String, content: Any) { File(filename).writeText(ObjectMapper().writeValueAsString(content)) } }
# AGH-schedule-optimizer Make those lengthy calendar entries go away ![Image of before and after this script](http://i.imgur.com/ljf1Sae.png) ## Installation 1. Install [Node.js](https://nodejs.org/) 2. `npm install -g shelljs` ## Usage 1. Download `plan_zajec.ics` from https://dziekanat.agh.edu.pl/ into this repo 2. Optionally edit mappings in `optimize.js` (don't worry it's JSON) 3. Run `shjs optimize` 4. Import `plan_zajec_plus.ics` to Google Calendar for example ## FAQ **Why did you write this file in english?** I dunno
require File.expand_path('../../../spec_helper', __FILE__) require File.expand_path('../fixtures/common', __FILE__) require File.expand_path('../shared/glob', __FILE__) describe "Dir.[]" do it_behaves_like :dir_glob, :[] end describe "Dir.[]" do it_behaves_like :dir_glob_recursive, :[] end describe "Dir.[]" do before :all do DirSpecs.create_mock_dirs @cwd = Dir.pwd Dir.chdir DirSpecs.mock_dir end after :all do Dir.chdir @cwd DirSpecs.delete_mock_dirs end it "calls #to_path to convert multiple patterns" do pat1 = mock('file_one.ext') pat1.should_receive(:to_path).and_return('file_one.ext') pat2 = mock('file_two.ext') pat2.should_receive(:to_path).and_return('file_two.ext') Dir[pat1, pat2].should == %w[file_one.ext file_two.ext] end end
using UnityEngine; using UnityEngine.UI; namespace i5.Toolkit.Core.Utilities.UnityAdapters { public class ScrollRectAdapter : IScrollView { public ScrollRect Adaptee { get; private set; } public Vector2 NormalizedPosition { get { return Adaptee.normalizedPosition; } set { Adaptee.normalizedPosition = value; } } public ScrollRectAdapter(ScrollRect adaptee) { Adaptee = adaptee; } } }
use std::process; extern crate split_gpg_user; use split_gpg_user::spawn_similarly; const SERVER_BIN_NAME: &'static str = "split-gpg-user-server"; fn main() { let server = spawn_similarly(SERVER_BIN_NAME); let status = server.expect("Error running the server").wait().expect("Server errored"); process::exit(status.code().unwrap_or(1)); }
import 'package:geofence_service/models/geofence_radius_sort_type.dart'; /// Options for [GeofenceService]. class GeofenceServiceOptions { /// The time interval in milliseconds to check the geofence status. /// The default is `5000`. int _interval = 5000; /// Geo-fencing error range in meters. /// The default is `100`. int _accuracy = 100; /// Sets the delay between [GeofenceStatus.ENTER] and [GeofenceStatus.DWELL] in milliseconds. /// The default is `300000`. int _loiteringDelayMs = 300000; /// Sets the status change delay in milliseconds. /// [GeofenceStatus.ENTER] and [GeofenceStatus.EXIT] events may be called frequently /// when the location is near the boundary of the geofence. Use this option to minimize event calls at this time. /// If the option value is too large, realtime geo-fencing is not possible, so use it carefully. /// The default is `10000`. int _statusChangeDelayMs = 10000; /// Whether to use the activity recognition API. /// The default is `true`. bool _useActivityRecognition = true; /// Whether to allow mock locations. /// The default is `false`. bool _allowMockLocations = false; /// Whether to show the developer log. /// If this value is set to true, logs for geofence service activities (start, stop, etc.) can be viewed. /// It does not work in release mode. /// The default is `false`. bool _printDevLog = false; /// Sets the sort type of the geofence radius. /// The default is `GeofenceRadiusSortType.DESC`. GeofenceRadiusSortType _geofenceRadiusSortType = GeofenceRadiusSortType.DESC; int get interval => _interval; set interval(int? value) => _interval = value ?? _interval; int get accuracy => _accuracy; set accuracy(int? value) => _accuracy = value ?? _accuracy; int get loiteringDelayMs => _loiteringDelayMs; set loiteringDelayMs(int? value) => _loiteringDelayMs = value ?? _loiteringDelayMs; int get statusChangeDelayMs => _statusChangeDelayMs; set statusChangeDelayMs(int? value) => _statusChangeDelayMs = value ?? _statusChangeDelayMs; bool get useActivityRecognition => _useActivityRecognition; set useActivityRecognition(bool? value) => _useActivityRecognition = value ?? _useActivityRecognition; bool get allowMockLocations => _allowMockLocations; set allowMockLocations(bool? value) => _allowMockLocations = value ?? _allowMockLocations; bool get printDevLog => _printDevLog; set printDevLog(bool? value) => _printDevLog = value ?? _printDevLog; GeofenceRadiusSortType get geofenceRadiusSortType => _geofenceRadiusSortType; set geofenceRadiusSortType(GeofenceRadiusSortType? value) => _geofenceRadiusSortType = value ?? _geofenceRadiusSortType; }
import { pickBy } from 'lodash'; import { Using } from 'src/types/formulas'; import { Action } from '../actions'; import { ShapesAction, ShapesState, ShapeType } from '../types/shapes'; import makeDefaultShape from '../util/makeDefaultShape'; export const shapesInitialState: ShapesState = { rect: makeDefaultShape(ShapeType.Rect, 'Rectangle') }; export default ( state: ShapesState = shapesInitialState, action: Action ): ShapesState => { const newState = { ...state }; switch (action.type) { case ShapesAction.UpdateUsing: newState[action.shapeID].formulas[action.prop].using = action.using; return newState; case ShapesAction.UpdateFormula: switch (newState[action.shapeID].formulas[action.prop].using) { case Using.Constant: newState[action.shapeID].formulas[action.prop].const = action.formula; break; case Using.Function: newState[action.shapeID].formulas[action.prop].fn = action.formula; break; } return newState; case ShapesAction.NewShape: return { ...state, [action.shapeID]: action.shape }; case ShapesAction.DeleteShape: return pickBy(state, (_, key: string): boolean => key !== action.shapeID); case ShapesAction.SetValues: for (const prop in action.values) { if (!action.values.hasOwnProperty(prop)) { continue; } newState[action.shapeID].formulas[prop].values = action.values[prop]; } return newState; case ShapesAction.UpdateValues: newState[action.shapeID].formulas[action.prop].values = action.values; return newState; case ShapesAction.ToggleVisible: newState[action.shapeID].visible = !state[action.shapeID].visible; default: return newState; } };
//! Curves. #[cfg(test)] extern crate assert; extern crate num_traits as num; use num::Float; use std::marker::PhantomData; /// A curve. pub trait Curve<T: Float> { /// Evalute the curve at a point in `[0, 1]`. fn evaluate(&self, T) -> T; } /// A trace of a curve. #[derive(Clone, Copy, Debug)] pub struct Trace<'l, T: Float, C: 'l + Curve<T>> { curve: &'l C, steps: usize, position: usize, phantom: PhantomData<T>, } impl<'l, T: Float, C: Curve<T>> Trace<'l, T, C> { #[inline] fn new(curve: &'l C, steps: usize) -> Self { Trace { curve: curve, steps: steps, position: 0, phantom: PhantomData } } } macro_rules! implement { ($($float:ty),*) => ($( impl<'l, T: Curve<$float>> Iterator for Trace<'l, $float, T> { type Item = $float; fn next(&mut self) -> Option<Self::Item> { let position = self.position; if position < self.steps { self.position += 1; Some(self.curve.evaluate(position as $float / (self.steps - 1) as $float)) } else { None } } } )*); } implement!(f32, f64); pub mod bezier;
// Analyse and manipulate an image using OpenCV by processing // a list of actions described in a yaml file. // Created by Dilpesh Patel on 2019/08/02 #include <iostream> #include <string> #include "image_processor.hpp" static const char *const USAGE = "usage: ./play_image <yaml_path>\n"; static const std::string OUTPUT_FOLDER = "output"; int main(int argc, const char *argv[]) { // one parameter must be supplied bool is_ready; if (argc != 2) { std::cout << USAGE; return -1; } // TODO: add missing try/catch block std::string file_path = argv[1]; ImageProcessor::ImageProcessor ip(file_path); ip.init(); ip.run(); std::cout << "Run Successful" << std::endl; return 0; }
using System; using System.Collections.Generic; using System.Collections.ObjectModel; using System.Linq; using System.Text; using System.Threading.Tasks; using Microsoft.VisualStudio.TestTools.UnitTesting; using Uno.Extensions; using Windows.Foundation; using Windows.Foundation.Collections; using Windows.UI.Xaml; using Windows.UI.Xaml.Controls; using Windows.UI.Xaml.Controls.Primitives; using Windows.UI.Xaml.Data; namespace Uno.UI.Tests.ItemsControlTests_CustomContainer { [TestClass] public class Given_ListViewBase_CustomContainer { [TestMethod] public void When_TemplateRootIsOwnContainer() { var count = 0; var panel = new StackPanel(); var SUT = new MyItemsControl() { ItemsPanelRoot = panel, ItemTemplate = new DataTemplate(() => { count++; return new MyCustomItemContainer() { MyValue = 42 }; }), Template = new ControlTemplate(() => new ItemsPresenter()), }; SUT.ApplyTemplate(); SUT.ItemsSource = new object[] { 42 }; var container = SUT.ContainerFromIndex(0) as MyCustomItemContainer; Assert.IsNotNull(container); Assert.IsTrue(container.IsGeneratedContainer); Assert.IsFalse(container.ContentTemplateRoot is MyCustomItemContainer); Assert.AreEqual(42, container.DataContext); } [TestMethod] public void When_TemplateSelector_RootIsOwnContainer() { var count = 0; var panel = new StackPanel(); var itemsPresenter = new MyItemsControl(); var itemTemplate = new DataTemplate(() => { count++; return new MyCustomItemContainer() { MyValue = 42 }; }); var SUT = new MyItemsControl() { ItemsPanelRoot = panel, ItemTemplateSelector = new MyDataTemplateSelector(i => itemTemplate), Template = new ControlTemplate(() => new ItemsPresenter()), }; SUT.ApplyTemplate(); SUT.ItemsSource = new object[] { 42 }; var container = SUT.ContainerFromIndex(0) as MyCustomItemContainer; Assert.IsTrue(container is MyCustomItemContainer); Assert.IsTrue(container.IsGeneratedContainer); Assert.IsFalse(container.ContentTemplateRoot is MyCustomItemContainer); Assert.AreEqual(42, container.DataContext); } [TestMethod] public void When_ObservableCollection() { var count = 0; var panel = new StackPanel(); var itemsPresenter = new MyItemsControl(); var itemTemplate = new DataTemplate(() => { count++; return new Border(); }); var SUT = new MyItemsControl() { ItemsPanelRoot = panel, ItemTemplate = itemTemplate, Template = new ControlTemplate(() => new ItemsPresenter()), }; SUT.ApplyTemplate(); Assert.AreEqual(0, count); var collection = new ObservableCollection<int>(); SUT.ItemsSource = collection; Assert.AreEqual(0, count); collection.Add(1); Assert.AreEqual(1, count); collection.Add(42); Assert.AreEqual(2, count); collection.Add(43); Assert.AreEqual(3, count); collection.RemoveAt(0); Assert.AreEqual(3, count); collection[0] = 44; Assert.AreEqual(4, count); } } public class MyDataTemplateSelector : DataTemplateSelector { private Func<object, DataTemplate> _selector; public MyDataTemplateSelector(Func<object, DataTemplate> selector) => _selector = selector; protected override DataTemplate SelectTemplateCore(object item) => _selector.Invoke(item); } public class MyItemsControl : ListView { protected override DependencyObject GetContainerForItemOverride() => new MyCustomItemContainer(); protected override bool IsItemItsOwnContainerOverride(object item) => item is MyCustomItemContainer; } public class MyCustomItemContainer : SelectorItem { public int MyValue { get { return (int)GetValue(MyValueProperty); } set { SetValue(MyValueProperty, value); } } // Using a DependencyProperty as the backing store for MyValue. This enables animation, styling, binding, etc... public static readonly DependencyProperty MyValueProperty = DependencyProperty.Register("MyValue", typeof(int), typeof(MyCustomItemContainer), new PropertyMetadata(0)); } }
import CognitoAuth from '../src/CognitoAuth'; import CognitoConstants from '../src/CognitoConstants'; const authData: any = { ClientId: "ClientId", AppWebDomain: "localhost:3000", TokenScopesArray: ['email', 'profile', 'openid'], RedirectUriSignIn: "http://localhost:3000", RedirectUriSignOut: "http://localhost:3000", IdentityProvider: "facebook", UserPoolId: "UserPoolId", }; const cognitoAuth: CognitoAuth = new CognitoAuth(authData); (global as any).open = jest.fn(); const payload = { username: 'prova', exp: (Date.now() + 100) }; const jwtToken = btoa(JSON.stringify({ "kid": "kid", "alg": "alg" })) + "." + btoa(JSON.stringify(payload)) const response = { id_token: jwtToken, access_token: jwtToken, refresh_token: jwtToken, } const mockXHR = { open: jest.fn(), send: jest.fn(), onreadystatechange: jest.fn(), readyState: 4, responseText: JSON.stringify(response), statusText: "OK", status: 200, setRequestHeader: jest.fn(), [CognitoConstants.WITHCREDENTIALS]: CognitoConstants.WITHCREDENTIALS }; const oldXMLHttpRequest = (window as any).XMLHttpRequest; (window as any).XMLHttpRequest = jest.fn(() => mockXHR); //(window as any).open = jest.fn(); //const open = jest.fn() Object.defineProperty(window, 'open', jest.fn()); it('test getSession login', function (done) { cognitoAuth.getSession(); (mockXHR as any).onreadystatechange(); expect((global as any).open).toBeCalled(); done(); }); it('test parseCognitoWebResponse token', function (done) { let str = Object.entries(response).map(([key, val]) => `${key}=${val}`).join('&'); const urlParse = "http://localhost:3000#state=state&" + str; const result = cognitoAuth.parseCognitoWebResponse(urlParse); (mockXHR as any).onreadystatechange(); return result.then((data) => { expect(data.accessToken).toEqual({ payload: payload, jwtToken: jwtToken, }) done(); } ); }); it('test getSession exist', function (done) { const result = cognitoAuth.getSession(); (mockXHR as any).onreadystatechange(); return result.then((data) => { expect(data.accessToken).toEqual({ payload: payload, jwtToken: jwtToken, }) done(); } ); }); it('test refresh error', function (done) { const result = cognitoAuth.refreshSession("token"); (mockXHR as any).onreadystatechange(); return result.catch((e) => { expect(e).not.toBeNull(); done(); } ); }); it('test signedin', function (done) { expect(cognitoAuth.isUserSignedIn()).toBeTruthy(); done(); }); it('test signout', function (done) { cognitoAuth.signOut(); expect(cognitoAuth.getSignInUserSession()).toBeNull(); expect((global as any).open).toBeCalled(); done(); });
class IncreaseMavenJnlpVersionSize < ActiveRecord::Migration[5.1] def up change_column :maven_jnlp_versioned_jnlp_urls, :date_str, :string, :limit => nil end def down change_column :maven_jnlp_versioned_jnlp_urls, :date_str, :string, :limit => 15 end end
import readline from 'readline' import c from 'picocolors' const spinnerMap = new WeakMap() const spinnerFrames = ['-', '\\', '|', '/'] function getSpinner() { let index = 0 return () => { index = ++index % spinnerFrames.length return spinnerFrames[index] } } function getLines(str = '', width = 80) { return str .replace(/\u001b[^m]*?m/g, '') .split('\n') .reduce((col, l) => (col += Math.max(1, Math.ceil(l.length / width))), 0) } function getStateSymbol(task) { if (task.state === 'done') { return c.green('√') } else if (task.state === 'fail') { return c.red('×') } else if (task.state === 'warn') { return c.yellow('↓') } else if (task.state === 'run') { let spinner = spinnerMap.get(task) if (!spinner) { spinner = getSpinner() spinnerMap.set(task, spinner) } return c.yellow(spinner()) } else { return c.gray('*') } } function getTitles(task) { const titles = [task.title] let current = task while (current.parent) { current = current.parent if (current.title) titles.unshift(current.title) } return titles } function renderTree(tasks, level = 0) { let output = [] for (const task of tasks) { const title = task.title const prefix = `${getStateSymbol(task)} ` output.push(' '.repeat(level) + prefix + title) if (task.tasks && task.tasks.length > 0) { if (task.state !== 'done') { output = output.concat(renderTree(task.tasks, level + 1)) } } } return output.join('\n') } function renderCI(tasks) { let output = '' for (const task of tasks) { if (task.state && task.state !== 'end' && task.state !== 'run' && !task.tasks) { const title = getTitles(task).join(c.yellow(' ≫ ')) const prefix = `${getStateSymbol(task)} ` output += prefix + title + '\n' task.state = 'end' } if (task.tasks && task.tasks.length > 0) { output += renderCI(task.tasks) } } return output } export function createRenderer(stream, { isTTY = true } = {}) { let tasks = [] let lines = 0 let timer return { clear() { for (let i = 0; i < lines; i++) { i > 0 && readline.moveCursor(stream, 0, -1) readline.cursorTo(stream, 0) readline.clearLine(stream, 0) } lines = 0 }, write(str, clear = false) { if (clear) { this.clear() } stream.write(str) }, render() { const output = isTTY ? renderTree(tasks) : renderCI(tasks) if (isTTY) { this.write(output, true) lines = getLines(output, stream.columns) } else { this.write(output) } return this }, spin(task) { task && tasks.push(task) return this.render() }, loop() { timer = setTimeout(() => this.loop(), 130) return this.spin() }, start(task) { tasks.push(task) if (timer) return this if (isTTY) stream.write(`\x1b[?25l`) return this.loop() }, stop() { if (timer) timer = clearTimeout(timer) if (isTTY) { this.write(`${renderTree(tasks)}\n`, true) this.write(`\x1b[?25h`) } else { this.write(renderCI(tasks)) } return this }, } }
import { combineReducers } from 'redux'; /// import cachedRequests from './cachedRequests'; /// import expanded from './expanded'; import ids from './ids'; import loadedOnce from './loadedOnce'; import params from './params'; /// import selectedIds from './selectedIds'; import total from './total'; const defaultReducer = () => null; export default combineReducers({ /** * ts-jest does some aggressive module mocking when unit testing reducers individually. * To avoid 'No reducer provided for key "..."' warnings, * we pass default reducers. Sorry for legibility. * * @see https://stackoverflow.com/questions/43375079/redux-warning-only-appearing-in-tests */ /// cachedRequests: cachedRequests || defaultReducer, /// expanded: expanded || defaultReducer, ids: ids || defaultReducer, loadedOnce: loadedOnce || defaultReducer, params: params || defaultReducer, /// selectedIds: selectedIds || defaultReducer, total: total || defaultReducer, });
class Transaction < ActiveRecord::Base belongs_to :transaction_request belongs_to :charge belongs_to :plaid_category, foreign_key: :category_id, primary_key: :plaid_id belongs_to :merchant def linked_account transaction_request.linked_account end def financial_institution linked_account.financial_institution end end
## API ### 属性 | 参数 | 说明 | 类型 | 默认值 | | --- | --- | --- | --- | | `v-model` | 绑定的值 | _any_ | **false** | | `checked-value` | 选中状态的值 | _any_ | **true** | | `unchecked-value` | 未选中状态的值 | _any_ | **false** | | `label` | 标签名 | _string \| number_ | **-** | | `size` | 复选框尺寸, 可选值为 `normal` `small` `mini` | _string_ | **normal** | | `disabled` | 是否禁用 | _boolean_ | **false** | | `readonly` | 是否只读 | _boolean_ | **false** | | `ripple` | 是否开启水波纹 | _boolean_ | **true** | | `validate-trigger` | 触发验证的时机,可选值为 `onChange` `onClick` | _CheckboxValidateTrigger[]_ | **['onChange']** | | `rules` | 验证规则,返回 `true` 表示验证通过,其余的值则转换为文本作为用户提示 | _Array<(value: any) => any>_ | **-** | ### 事件 | 事件名 | 说明 | 参数 | | --- | --- | --- | | `input` | 绑定值变化时触发 | **value: any** | | `click` | 点击时触发 | **e: Event** | | `change` | 状态变更时触发 | **value: any** | ### 插槽 | 插槽名 | 说明 | 参数 | | --- | --- | --- | | `default` | 显示的文本 | **-** |
use strict; use warnings; use CGI; use FormValidator::Lite qw/Email Date/; my $q = CGI->new; $q->param( param1 => 'ABCD' ); $q->param( param2 => 12345 ); $q->param( mail1 => 'lyo.kato@gmail.com' ); $q->param( mail2 => 'lyo.kato@gmail.com' ); $q->param( year => 2005 ); $q->param( month => 11 ); $q->param( day => 27 ); for (0..1000) { my $result = FormValidator::Lite->new($q)->check( param1 => [ 'NOT_BLANK', 'ASCII', [ 'LENGTH', 2, 5 ] ], param2 => [ 'NOT_BLANK', 'INT' ], mail1 => [ 'NOT_BLANK', 'EMAIL_LOOSE' ], mail2 => [ 'NOT_BLANK', 'EMAIL_LOOSE' ], { mails => [ 'mail1', 'mail2' ] } => ['DUPLICATION'], { date => [ 'year', 'month', 'day' ] } => ['DATE'], ); $result->has_error; }