hexsha stringlengths 40 40 | size int64 5 1.05M | ext stringclasses 98 values | lang stringclasses 21 values | max_stars_repo_path stringlengths 3 945 | max_stars_repo_name stringlengths 4 118 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 945 | max_issues_repo_name stringlengths 4 118 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 134k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 945 | max_forks_repo_name stringlengths 4 135 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 1.05M | avg_line_length float64 1 1.03M | max_line_length int64 2 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
be378eea5163bc414d71195ef66bb4e79b336476 | 2,243 | rs | Rust | writing_automated_tests/src/how_to_write_tests.rs | learn-frame/learn-rust | 22c471ccbfc4a3555af0838b5b45b5d82ab0e616 | [
"MIT"
] | null | null | null | writing_automated_tests/src/how_to_write_tests.rs | learn-frame/learn-rust | 22c471ccbfc4a3555af0838b5b45b5d82ab0e616 | [
"MIT"
] | null | null | null | writing_automated_tests/src/how_to_write_tests.rs | learn-frame/learn-rust | 22c471ccbfc4a3555af0838b5b45b5d82ab0e616 | [
"MIT"
] | null | null | null | /// 1. 设置任何所需的数据或状态
/// 2. 运行需要测试的代码
/// 3. 断言其结果是我们所期望的
///
/// 复习: 属性(attribute) 是关于 Rust 代码片段的元数据, 常见的如
/// #[derive], #[test], #[allow]
///
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
#[allow(unused)]
fn can_hold(&self, other: &Rectangle) -> bool {
self.width > other.width && self.height > other.height
}
}
#[allow(unused)]
pub fn add_two(a: i32) -> i32 {
a + 2
}
#[allow(unused)]
pub fn greeting(name: &str) -> String {
String::from("Hello!")
}
#[allow(unused)]
pub fn make_error() {
panic!("I'm error!");
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
assert_eq!(add_two(2), 4);
assert_ne!(add_two(2), 5);
}
#[test]
fn larger_can_hold_smaller() {
let larger = Rectangle {
width: 8,
height: 7,
};
let smaller = Rectangle {
width: 5,
height: 1,
};
assert!(larger.can_hold(&smaller));
assert!(!smaller.can_hold(&larger));
}
#[test]
fn greeting_contains_name() {
let result = greeting("Carol");
assert!(
result.contains("Carol"),
// 自定义错误信息
"Greeting did not contain name, value was `{}`",
result
);
}
// #[should_panic] 用来验证那些应该出错的函数
#[test]
#[should_panic(expected = "出错就对咯!")]
fn need_error() {
make_error();
}
// 下面这个函数因为不出错, 所以测试会失败
#[test]
#[should_panic]
fn need_error_1() {
add_two(3);
}
// 但吊诡的是, 如果你执行多个函数
// 只要有一个出错, 就能通过
// 因此一个 should_panic 宏执行一个函数比较好
#[test]
#[should_panic]
fn need_error_2() {
add_two(3);
make_error();
}
// 也可以是用 Result<T, E>
// 不能对这些使用 Result<T, E> 的测试使用 #[should_panic] 注解
#[test]
fn use_result() -> Result<(), String> {
if add_two(2) == 4 {
Ok(())
} else {
Err(String::from("two plus two does not equal four"))
}
}
}
// 执行 cargo test
//
// test how_to_write_tests::tests::it_works ... ok
// running 1 test
// test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s
| 19.675439 | 96 | 0.523852 |
20f6525358bc00d4fd0b1f84cbbacce982cb9df3 | 1,097 | css | CSS | frontend/src/styles/custom.css | theaidem/githubble | cd400340c735c8f4ef0006c90a92ce41afdf4b79 | [
"MIT"
] | 31 | 2016-04-01T13:41:03.000Z | 2021-09-29T02:22:24.000Z | frontend/src/styles/custom.css | theaidem/githubble | cd400340c735c8f4ef0006c90a92ce41afdf4b79 | [
"MIT"
] | 6 | 2016-09-24T09:11:39.000Z | 2022-02-26T17:17:15.000Z | frontend/src/styles/custom.css | theaidem/githubble | cd400340c735c8f4ef0006c90a92ce41afdf4b79 | [
"MIT"
] | 2 | 2016-04-02T09:51:37.000Z | 2016-04-02T17:31:00.000Z | /* Contains your custom styles */
#app {
height: 100%;
}
#app .ui.menu .container {
display: block;
}
#app .ui.menu .column {
padding-left: 0;
padding-right: 0;
}
#app .ui.menu .column .item:before {
width: 0px;
}
#app .ui.menu .column .item .ui.grid{
width: 100%;
margin-left: 0;
margin-right: 0;
}
#app .ui.menu .item img.logo {
margin-right: 0.5em;
width: 2em;
}
#app .ui.menu .item.header {
color: #4385fb;
padding: 18px;
}
#app .ui.menu .item.stat {
background: #DCF5FF;
}
#app .ui.menu .item.stat .column {
width: 100%;
font-size: 12px;
}
#app .board .content.container {
padding-top: 80px;
}
.event-cart {
text-align: center;
position: fixed;
width: 260px;
}
.event-cart.empty.octicon {
font-size: 5em;
}
#app .event.item .content.actor-picture {
width: 35px;
height: 35px;
}
#app .event.item.viewing {
background: #98D5FF;
}
.stats .right {
float: right;
}
.stats .most-forked .item .octicon,
.stats .most-starred .item .octicon {
float: left;
}
.stats .most-forked .item .content,
.stats .most-starred .item .content {
padding-left: 18px;
} | 13.7125 | 41 | 0.644485 |
b8745455df8b38965a8279236be184f2ea7df5f0 | 396 | rs | Rust | crates/metron_macro/src/geninmod.rs | bbinber/metron | 0de07740f3b7564dd2c354dd629f0241a094f674 | [
"MIT"
] | null | null | null | crates/metron_macro/src/geninmod.rs | bbinber/metron | 0de07740f3b7564dd2c354dd629f0241a094f674 | [
"MIT"
] | null | null | null | crates/metron_macro/src/geninmod.rs | bbinber/metron | 0de07740f3b7564dd2c354dd629f0241a094f674 | [
"MIT"
] | null | null | null | use proc_macro::TokenStream;
use proc_macro2::Span;
use quote::quote;
use syn::parse::{Parse, ParseStream, Result};
use syn::punctuated::Punctuated;
use syn::{parse_macro_input, Ident, ItemStruct, LitInt, LitStr};
pub fn geninmod(item: TokenStream) -> TokenStream {
// let attr = dbg!(attr);
let item = dbg!(item);
let expanded = quote! {
// item
};
expanded.into()
} | 26.4 | 64 | 0.664141 |
1f68cd36ed5ee94835efdc9f6bf6ab97de8f46b9 | 2,896 | css | CSS | JS Applications/XS/style.css | GeorgiGarnenkov/JavaScriptCore | f1ec73a7cc45d7fbca0c9df417b240ce68ed6900 | [
"MIT"
] | null | null | null | JS Applications/XS/style.css | GeorgiGarnenkov/JavaScriptCore | f1ec73a7cc45d7fbca0c9df417b240ce68ed6900 | [
"MIT"
] | null | null | null | JS Applications/XS/style.css | GeorgiGarnenkov/JavaScriptCore | f1ec73a7cc45d7fbca0c9df417b240ce68ed6900 | [
"MIT"
] | null | null | null | @import url(https://fonts.googleapis.com/css?family=Open+Sans);
div.jumbotron, html{
font-family: 'Open Sans', serif;
background-image: url(./background.jpg);
background-size: cover;
}
main{
text-align: center;
}
header {
margin-top: 15px;
text-align: center;
}
button.btn.btn-light{
margin-left: 5px;
margin-right: 5px;
border: 0px;
background-color: white;
font-size: 100%;
font-weight: bolder;
width: 150px;
border-radius: 12px;
}
button.btn.btn-light:hover{
background-color: white;
color: #D01291;
box-shadow: 0.5px 0.5px 10px 0px lightgray;
}
#name, #club{
color: rgb(247, 9, 167);
}
.btn.btn-light:hover,
.btn.btn-light:focus {
box-shadow: 0 0.5em 0.5em -0.4em var(--hover);
}
#reload{
margin-top: 80px;
margin-bottom: 80px;
text-align: center;
margin-left: 40%;
}
#timer{
float: right;
margin-right: 60%;
visibility: hidden;
border-radius: 15px;
padding-right: 100px;
}
p{
color: #D01291;
font-weight: bold;
}
progress {
border: none;
height: 25px;
}
progress {
border-radius: 15px;
color: lightblue;
}
progress::-webkit-progress-value {
background: lightblue;
}
progress::-moz-progress-bar {
background: lightcolor;
}
progress::-webkit-progress-value {
background: #ECD2E4;
}
progress::-webkit-progress-bar {
background: #EDE3EB;
}
#btnHideTable{
background-color: #EDE3EB;
color: black;
font-weight: bolder;
border: 0px;
width: 52%;
text-align: left;
}
#btnHideTable:hover{
background-color: #EDE3EB;
color: black;
}
#btnHideTable:focus,
#btnHideTable:active {
box-shadow: none;
}
i.fa.fa-envelope{
color: rgb(218, 200, 214);
transform: scale(1.6,1.3);
font-weight: lighter;
}
#myTable{
margin-top: 15px;
margin-left: auto;
margin-right: auto;
width: 52%;
}
thead{
text-align: left;
font-size: medium;
}
tbody>tr.tr{
background-color: white;
text-align: left;
box-shadow: 0.5px 0.5px 20px 0px lightgray;
border-radius: 50px;
font-weight: bolder;
font-size: medium;
}
tbody::before
{
content: '';
display: table-row;
height: 10px;
}
tr::before{
content: '';
display: table-row;
height: 5px;
}
#btnReload{
border-radius: 50px;
width: 200px;
height: 50px;
font-size: larger;
color: white;
font-weight: bolder;
background-color: #FF2295;
border: 0px;
}
.image{
margin-top: 5px;
width: 50px;
height: 50px;
margin-bottom: 5px;
}
.notification {
padding-top: 20px;
margin: 1em;
display: none;
position: absolute;;
top: 47%;
left: 37%;
text-align: center;
height: 120px;
width: 420px;
border-radius: 5px;
}
#errorBox{
background: #FF235B;
color: white
}
| 15.404255 | 63 | 0.606008 |
7153b5624b085aa1e0fc2c88e34eea47ceafa586 | 1,570 | ts | TypeScript | src/app/auth/training/past-training/past-training.component.ts | harshyadav9/maxAangularMaterial | c4c89dfbcbcb26d541654b2201de717abaa89182 | [
"MIT"
] | null | null | null | src/app/auth/training/past-training/past-training.component.ts | harshyadav9/maxAangularMaterial | c4c89dfbcbcb26d541654b2201de717abaa89182 | [
"MIT"
] | null | null | null | src/app/auth/training/past-training/past-training.component.ts | harshyadav9/maxAangularMaterial | c4c89dfbcbcb26d541654b2201de717abaa89182 | [
"MIT"
] | null | null | null | import { Component, OnInit , ViewChild, AfterViewInit, OnDestroy } from '@angular/core';
import { TrainingService } from '../training.service';
import { Excercise } from '../excercise.model';
import { MatTableDataSource , MatSort , PageEvent, MatPaginator} from '@angular/material';
import { Subscription } from 'rxjs';
@Component({
selector: 'app-past-training',
templateUrl: './past-training.component.html',
styleUrls: ['./past-training.component.css']
})
export class PastTrainingComponent implements OnInit, AfterViewInit , OnDestroy {
excercises:Excercise[] = [];
subsc:Subscription;
pageEvent: PageEvent;
displayedColumns:string[] = ["name","calories","duration","date","state"];
dataSource = new MatTableDataSource<Excercise>();
// MatSort gives access to underlying property of matsort and mat-sort-header
@ViewChild(MatSort) sort:MatSort
@ViewChild(MatPaginator) paginator:MatPaginator
constructor(private trainingService:TrainingService) { }
ngOnInit() {
this.subsc = this.trainingService.completeOrCancelExc.subscribe((data:Excercise[])=>{
this.dataSource.data = data;
console.log("this.dataSource.data",this.dataSource.data);
});
this.trainingService.fetchCancelOrCompleteExcercise();
};
ngAfterViewInit(){
this.dataSource.sort = this.sort;
this.dataSource.paginator = this.paginator;
}
doFilter(filterValue:string){
this.dataSource.filter = filterValue.trim().toLowerCase();
}
ngOnDestroy(){
if(this.subsc)
this.subsc.unsubscribe();
}
}
| 29.622642 | 90 | 0.711465 |
cf862275bd19fedffc2f7b122ec4c16ee4ed2df4 | 73 | css | CSS | styles/specific_job.css | almarionoah/endeavor | e2d4c22798412b6c9b86b775c7bca7bf8f38aec6 | [
"MIT"
] | null | null | null | styles/specific_job.css | almarionoah/endeavor | e2d4c22798412b6c9b86b775c7bca7bf8f38aec6 | [
"MIT"
] | 1 | 2019-06-12T19:24:39.000Z | 2019-06-12T19:24:39.000Z | styles/specific_job.css | almarionoah/endeavor | e2d4c22798412b6c9b86b775c7bca7bf8f38aec6 | [
"MIT"
] | null | null | null | #company_logo{
width: 200px;
height: 200px;
margin-left: 40%;
} | 14.6 | 20 | 0.616438 |
1fc4e7bce79a8d74b682fa9194236e8a99e614eb | 187 | css | CSS | customestyle.css | engammar111/bootstarp | 116e0292b085a370cba6d584d5cee1d68cb23d78 | [
"MIT"
] | null | null | null | customestyle.css | engammar111/bootstarp | 116e0292b085a370cba6d584d5cee1d68cb23d78 | [
"MIT"
] | null | null | null | customestyle.css | engammar111/bootstarp | 116e0292b085a370cba6d584d5cee1d68cb23d78 | [
"MIT"
] | null | null | null | .btn-success {
color: orange;
background-color: red;
}
.btn-success:hover {
background-color: yellow;
color: black;
}
.bg-primary {
background-color: yellow;
color: black;
}
| 13.357143 | 27 | 0.668449 |
857621577f099fbe301996f513e60d4f3c9af9ad | 6,072 | js | JavaScript | uiclient/js/services.js | JanezSedeljsak/tcp-socket-demo | 9dbb43c091b181f1a4989a86232e33403b9c6467 | [
"CC0-1.0"
] | 1 | 2021-07-06T05:00:54.000Z | 2021-07-06T05:00:54.000Z | uiclient/js/services.js | JanezSedeljsak/tcp-socket-demo | 9dbb43c091b181f1a4989a86232e33403b9c6467 | [
"CC0-1.0"
] | null | null | null | uiclient/js/services.js | JanezSedeljsak/tcp-socket-demo | 9dbb43c091b181f1a4989a86232e33403b9c6467 | [
"CC0-1.0"
] | null | null | null | const pystruct = require('python-struct');
const { ipcRenderer } = require('electron');
app.service('$drag', function () {
this.for = function (elmnt) {
let pos1 = 0, pos2 = 0, pos3 = 0, pos4 = 0;
if (document.getElementById(elmnt.id + "-header")) {
document.getElementById(elmnt.id + "-header").onmousedown = dragMouseDown;
} else elmnt.onmousedown = dragMouseDown;
function dragMouseDown(e) {
var containers = document.getElementsByClassName("chat-container");
for (var i = 0; i < containers.length; i++) containers.item(i).style.zIndex = 0;
e = e || window.event;
e.preventDefault();
e.target.parentElement.style.zIndex = 5;
pos3 = e.clientX;
pos4 = e.clientY;
document.onmouseup = closeDragElement;
document.onmousemove = elementDrag;
}
function elementDrag(e) {
e = e || window.event;
e.preventDefault();
pos1 = pos3 - e.clientX;
pos2 = pos4 - e.clientY;
pos3 = e.clientX;
pos4 = e.clientY;
elmnt.style.top = (elmnt.offsetTop - pos2) + "px";
elmnt.style.left = (elmnt.offsetLeft - pos1) + "px";
}
function closeDragElement() {
document.onmouseup = null;
document.onmousemove = null;
}
}
});
app.service('$parser', function () {
this.capFirstLetter = str => str.split(" ").map((word) => word[0].toUpperCase() + word.substring(1).toLowerCase()).join(" ");
this.sendData = (socketClient, object) => {
let jsonString = JSON.stringify(object);
// tmp fix remove non utf-8 characters
jsonString = jsonString.replace(/[^\x20-\x7E]/g, '');
const byteHeader = pystruct.pack("!H", jsonString.length);
socketClient.write(byteHeader + jsonString);
};
this.decodeData = byteArr => {
const enc = new TextDecoder("utf-8");
const len = pystruct.unpack("!H", byteArr)[0];
const dec = enc.decode(byteArr);
return JSON.parse(dec.substr(dec.length - len));
};
});
app.service('$appWindow', function ($window) {
this.exit = () => $window.close();
this.minimize = (winName="") => ipcRenderer.send(winName == 'admin' ? 'request-minimize-admin' : 'request-minimize');
});
app.service('$notification', function () {
this.defaultSettingsByType = {
normal: {
position: 'top-end',
showConfirmButton: false,
timer: 2000
},
form: {
input: 'text',
inputAttributes: { autocapitalize: 'off' },
showCancelButton: true
}
};
this.show = function(type, settings, callback=() => {}) {
const def = type in this.defaultSettingsByType ? this.defaultSettingsByType[type] : {};
Swal.fire({ ...def, ...settings }).then(function(input){
if (input.isConfirmed) {
if (!callback) return;
callback(input);
}
}, () => {});
}
});
app.service('$certService', function($notification) {
this.myHash = s => {
let a = 0, c = 0, o;
for (let h = s.length - 1; h >= 0; h--) {
o = s.charCodeAt(h);
a = (a << 6 & 268435455) + o + (o << 14);
c = a & 266338304;
a = c !== 0 ? a ^ c >> 21 : a;
}
return `__${String(a).split("").reverse().join("")}__`;
};
this.openAdminApp = () => {
$notification.show('form', { title: 'Enter admin code', confirmButtonText: 'Open app', input: 'password' }, (input) => {
if (this.myHash(input.value) == '__433063862__') {
ipcRenderer.send('draw-admin');
}
});
};
this.getAllCertRequests = () => {
const data = ipcRenderer.sendSync('call-certificate-service', { action: 'get-requested-certificates' });
if ('certificates' in data && Array.isArray(data['certificates'])) {
return data['certificates'];
}
$notification.show('normal', { icon: 'error', title: `Error occured!` });
return [];
}
this.confirmCertificate = (certName) => {
const data = ipcRenderer.sendSync('call-certificate-service', { certName, action: 'confirm-certificate' });
if ('success' in data && data['success']) return;
$notification.show('normal', { icon: 'error', title: `Error occured!` });
};
this.sendCertificateRequest = (certName) => {
const data = ipcRenderer.sendSync('call-certificate-service', { certName, action: 'generate-certificate' });
if ('success' in data && data['success']) {
$notification.show('normal', { icon: 'success', title: `Your certificate was succesfully created!` });
return;
}
$notification.show('normal', { icon: 'error', title: `Error occured while creating your certificate!` });
};
this.getUserCertificate = (certName, allowAdmin=false) => {
const data = ipcRenderer.sendSync('call-certificate-service', { certName, action: 'get-certificate', allowAdmin });
if ('success' in data && data['success'] && 'certData' in data) {
const tcpSocketConfig = [3333, '127.0.0.1'];
return {
host: tcpSocketConfig[1],
port: tcpSocketConfig[0],
secureProtocol: 'TLSv1_2_method',
rejectUnauthorized: false,
...data['certData']
};
}
$notification.show('normal', { icon: 'error', title: data['message'] });
return undefined;
}
this.getAllCertificates = () => {
const data = ipcRenderer.sendSync('call-certificate-service', { action: 'get-all-certificates' });
if ('members' in data && Array.isArray(data['members'])) {
return data['members'];
}
$notification.show('normal', { icon: 'error', title: `Error occured!` });
return [];
}
}); | 36.8 | 129 | 0.54776 |
4a949726cddfa178f06c43458834316a3840ea22 | 3,655 | kt | Kotlin | app/src/main/java/com/halcyonmobile/multiplatformplayground/ui/Applications.kt | AlexGabor/MultiplatformPlayground | 9cc32907a67fb892e7e4a0e1c0b94ae821aaebd5 | [
"Apache-2.0"
] | null | null | null | app/src/main/java/com/halcyonmobile/multiplatformplayground/ui/Applications.kt | AlexGabor/MultiplatformPlayground | 9cc32907a67fb892e7e4a0e1c0b94ae821aaebd5 | [
"Apache-2.0"
] | null | null | null | app/src/main/java/com/halcyonmobile/multiplatformplayground/ui/Applications.kt | AlexGabor/MultiplatformPlayground | 9cc32907a67fb892e7e4a0e1c0b94ae821aaebd5 | [
"Apache-2.0"
] | null | null | null | package com.halcyonmobile.multiplatformplayground.ui
import androidx.compose.foundation.Image
import androidx.compose.foundation.clickable
import androidx.compose.foundation.layout.Box
import androidx.compose.foundation.layout.Column
import androidx.compose.foundation.layout.PaddingValues
import androidx.compose.foundation.layout.Row
import androidx.compose.foundation.layout.fillMaxSize
import androidx.compose.foundation.layout.fillMaxWidth
import androidx.compose.foundation.layout.padding
import androidx.compose.foundation.layout.size
import androidx.compose.foundation.layout.wrapContentSize
import androidx.compose.foundation.lazy.LazyColumn
import androidx.compose.foundation.shape.RoundedCornerShape
import androidx.compose.material.CircularProgressIndicator
import androidx.compose.material.Surface
import androidx.compose.material.Text
import androidx.compose.runtime.Composable
import androidx.compose.runtime.onActive
import androidx.compose.ui.Alignment
import androidx.compose.ui.Modifier
import androidx.compose.ui.graphics.ColorFilter
import androidx.compose.ui.res.vectorResource
import androidx.compose.ui.unit.dp
import com.halcyonmobile.multiplatformplayground.R
import com.halcyonmobile.multiplatformplayground.model.ui.ApplicationUiModel
import com.halcyonmobile.multiplatformplayground.ui.theme.AppTheme
import dev.chrisbanes.accompanist.coil.CoilImage
@Composable
fun Applications(
items: List<ApplicationUiModel>,
onApplicationClicked: (ApplicationUiModel.App) -> Unit,
contentPadding: PaddingValues = PaddingValues(),
onBottomReached: () -> Unit = {}
) {
LazyColumn(
modifier = Modifier.fillMaxSize(),
contentPadding = contentPadding
) {
itemsIndexed(items = items,
itemContent = { index, item ->
if (items.lastIndex == index) {
onActive {
onBottomReached()
}
}
when (item) {
is ApplicationUiModel.App -> Application(uiModel = item, onApplicationClicked)
ApplicationUiModel.Loading -> Box(
Modifier.fillParentMaxWidth(),
Alignment.Center
) {
CircularProgressIndicator(Modifier.padding(16.dp))
}
}
})
}
}
@Composable
private fun Application(
uiModel: ApplicationUiModel.App,
onClick: (ApplicationUiModel.App) -> Unit
) {
Row(
modifier = Modifier.fillMaxWidth().clickable(onClick = { onClick(uiModel) })
.padding(horizontal = 16.dp, vertical = 8.dp)
) {
Surface(shape = RoundedCornerShape(8.dp)) {
CoilImage(data = uiModel.icon, modifier = Modifier.size(64.dp))
}
Column(modifier = Modifier.fillMaxSize().align(Alignment.Top).padding(start = 16.dp)) {
Text(text = uiModel.name, style = AppTheme.typography.body1)
Text(text = uiModel.developer, style = AppTheme.typography.caption)
Row(Modifier.wrapContentSize(Alignment.TopStart)) {
Text(
text = uiModel.rating.toString(),
modifier = Modifier.align(Alignment.CenterVertically),
style = AppTheme.typography.caption
)
Image(
imageVector = vectorResource(id = R.drawable.ic_rating),
colorFilter = ColorFilter.tint(AppTheme.colors.secondary),
modifier = Modifier.size(16.dp).padding(start = 4.dp)
)
}
}
}
}
| 39.728261 | 98 | 0.665116 |
aecd1bc023f081d1662f8fe3083c3f742c37fa6c | 14,830 | sql | SQL | testall.sql | palak795/akounting | 2b8609e1812fe56f55082f203e36f573c90e3fb0 | [
"MIT"
] | null | null | null | testall.sql | palak795/akounting | 2b8609e1812fe56f55082f203e36f573c90e3fb0 | [
"MIT"
] | null | null | null | testall.sql | palak795/akounting | 2b8609e1812fe56f55082f203e36f573c90e3fb0 | [
"MIT"
] | null | null | null | -- phpMyAdmin SQL Dump
-- version 4.9.5deb2
-- https://www.phpmyadmin.net/
--
-- Host: localhost:3306
-- Generation Time: Apr 29, 2021 at 10:23 AM
-- Server version: 10.3.25-MariaDB-0ubuntu0.20.04.1
-- PHP Version: 7.4.3
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET AUTOCOMMIT = 0;
START TRANSACTION;
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
--
-- Database: `testall`
--
-- --------------------------------------------------------
--
-- Table structure for table `failed_jobs`
--
CREATE TABLE `failed_jobs` (
`id` bigint(20) UNSIGNED NOT NULL,
`uuid` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`connection` text COLLATE utf8mb4_unicode_ci NOT NULL,
`queue` text COLLATE utf8mb4_unicode_ci NOT NULL,
`payload` longtext COLLATE utf8mb4_unicode_ci NOT NULL,
`exception` longtext COLLATE utf8mb4_unicode_ci NOT NULL,
`failed_at` timestamp NOT NULL DEFAULT current_timestamp()
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- --------------------------------------------------------
--
-- Table structure for table `migrations`
--
CREATE TABLE `migrations` (
`id` int(10) UNSIGNED NOT NULL,
`migration` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`batch` int(11) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
--
-- Dumping data for table `migrations`
--
INSERT INTO `migrations` (`id`, `migration`, `batch`) VALUES
(1, '2014_10_12_000000_create_users_table', 1),
(2, '2014_10_12_100000_create_password_resets_table', 1),
(3, '2019_08_19_000000_create_failed_jobs_table', 1),
(4, '2021_04_22_062358_create_students_table', 1),
(5, '2021_04_22_074627_alter_students_table', 2);
-- --------------------------------------------------------
--
-- Table structure for table `password_resets`
--
CREATE TABLE `password_resets` (
`email` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`token` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`created_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- --------------------------------------------------------
--
-- Table structure for table `students`
--
CREATE TABLE `students` (
`id` bigint(20) UNSIGNED NOT NULL,
`studentname` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`fathername` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`mothername` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`email` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`date` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`education` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`country` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`image` text COLLATE utf8mb4_unicode_ci NOT NULL,
`address` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`created_at` timestamp NULL DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT NULL,
`deleted_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
--
-- Dumping data for table `students`
--
INSERT INTO `students` (`id`, `studentname`, `fathername`, `mothername`, `email`, `date`, `education`, `country`, `image`, `address`, `created_at`, `updated_at`, `deleted_at`) VALUES
(1, 'palvi dhadwal', 'salinder', 'anita devi', 'anita123@gmail.com', '2021-04-01', 'BCA,MCA,BBA', 'Pakistan', '1619075436.png', 'dasuya', '2021-04-22 01:40:36', '2021-04-22 06:31:50', NULL),
(3, 'Gaganpreet', 'Lakhi', 'Satwinder', 'gaganpreet123@gmail.com', '2021-03-31', 'BCA,MCA,BBA', 'Pakistan', '1619078115.png', 'dasuya', '2021-04-22 02:25:15', '2021-04-22 04:30:07', NULL),
(4, 'Nayra', 'aman kumar', 'Shivani kaushal', 'nayra123@gmail.com', '2017-08-01', 'BCA,MCA,BBA', 'Pakistan', '1619089219.png', 'fatehpur', '2021-04-22 05:30:19', '2021-04-22 05:33:56', NULL),
(5, 'palvi', 'salinder singh', 'anita devi', 'superadmin@edufirm.com', '2021-04-22', 'BCA,MCA', 'Canada', '1619092684.png', 'fathpur', '2021-04-22 06:28:04', '2021-04-22 06:28:04', NULL),
(6, 'palvi', 'salinder singh', 'anita devi', 'kwcmeerut@gmail.com', '2021-04-01', 'BCA,MCA', 'Pakistan', '1619095823.png', 'fatehpur', '2021-04-22 07:20:23', '2021-04-25 23:01:08', NULL),
(7, 'nayra', 'amankumar', 'shivanikaushal', 'nayra123@gmail.com', '2017-8-01', 'MCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:01:34', '2021-04-23 00:01:34', NULL),
(8, 'nayra', 'amankumar', 'shivanikaushal', 'nayra123@gmail.com', '2017-08-01', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:03:00', '2021-04-23 00:03:00', NULL),
(9, 'shivan', 'amankumar', 'shivanikaushal', 'shivan123@gmail.com', '2021-04-15', 'BCA,MCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:15:15', '2021-04-23 00:28:50', NULL),
(10, 'shivan', 'amankumar', 'shivanikaushal', 'shivan123@gmail.com', '2021-04-13', 'BCA,MCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:16:11', '2021-04-23 00:19:46', NULL),
(11, 'shivan1', 'amankumar1', 'shivanikaushal1', '1shivan123@gmail.com', '1998-08-25', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(12, 'shivan2', 'amankumar2', 'shivanikaushal2', '2shivan123@gmail.com', '1998-08-26', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(13, 'shivan3', 'amankumar3', 'shivanikaushal3', '3shivan123@gmail.com', '1998-08-27', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(14, 'shivan4', 'amankumar4', 'shivanikaushal4', '4shivan123@gmail.com', '1998-08-28', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(15, 'shivan5', 'amankumar5', 'shivanikaushal5', '5shivan123@gmail.com', '1998-08-29', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(16, 'shivan6', 'amankumar6', 'shivanikaushal6', '6shivan123@gmail.com', '1998-08-30', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(17, 'shivan7', 'amankumar7', 'shivanikaushal7', '7shivan123@gmail.com', '1998-08-31', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(18, 'shivan8', 'amankumar8', 'shivanikaushal8', '8shivan123@gmail.com', '1998-09-01', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(19, 'shivan9', 'amankumar9', 'shivanikaushal9', '9shivan123@gmail.com', '1998-09-02', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(20, 'shivan10', 'amankumar10', 'shivanikaushal10', '10shivan123@gmail.com', '1998-09-03', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(21, 'shivan11', 'amankumar11', 'shivanikaushal11', '11shivan123@gmail.com', '1998-09-04', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(22, 'shivan12', 'amankumar12', 'shivanikaushal12', '12shivan123@gmail.com', '1998-09-05', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(23, 'shivan13', 'amankumar13', 'shivanikaushal13', '13shivan123@gmail.com', '1998-09-06', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(24, 'shivan14', 'amankumar14', 'shivanikaushal14', '14shivan123@gmail.com', '1998-09-07', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(25, 'shivan15', 'amankumar15', 'shivanikaushal15', '15shivan123@gmail.com', '1998-09-08', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(26, 'shivan16', 'amankumar16', 'shivanikaushal16', '16shivan123@gmail.com', '1998-09-09', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(27, 'shivan17', 'amankumar17', 'shivanikaushal17', '17shivan123@gmail.com', '1998-09-10', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(28, 'shivan18', 'amankumar18', 'shivanikaushal18', '18shivan123@gmail.com', '1998-09-11', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(29, 'shivan19', 'amankumar19', 'shivanikaushal19', '19shivan123@gmail.com', '1998-09-12', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(30, 'shivan20', 'amankumar20', 'shivanikaushal20', '20shivan123@gmail.com', '1998-09-13', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 00:26:30', '2021-04-23 00:26:30', NULL),
(31, 'shivan1', 'amankumar1', 'shivanikaushal1', '1shivan123@gmail.com', '1998-08-25', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(32, 'shivan2', 'amankumar2', 'shivanikaushal2', '2shivan123@gmail.com', '1998-08-26', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(33, 'shivan3', 'amankumar3', 'shivanikaushal3', '3shivan123@gmail.com', '1998-08-27', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(34, 'shivan4', 'amankumar4', 'shivanikaushal4', '4shivan123@gmail.com', '1998-08-28', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(35, 'shivan5', 'amankumar5', 'shivanikaushal5', '5shivan123@gmail.com', '1998-08-29', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(36, 'shivan6', 'amankumar6', 'shivanikaushal6', '6shivan123@gmail.com', '1998-08-30', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(37, 'shivan7', 'amankumar7', 'shivanikaushal7', '7shivan123@gmail.com', '1998-08-31', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(38, 'shivan8', 'amankumar8', 'shivanikaushal8', '8shivan123@gmail.com', '1998-09-01', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(39, 'shivan9', 'amankumar9', 'shivanikaushal9', '9shivan123@gmail.com', '1998-09-02', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(40, 'shivan10', 'amankumar10', 'shivanikaushal10', '10shivan123@gmail.com', '1998-09-03', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(41, 'shivan11', 'amankumar11', 'shivanikaushal11', '11shivan123@gmail.com', '1998-09-04', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(42, 'shivan12', 'amankumar12', 'shivanikaushal12', '12shivan123@gmail.com', '1998-09-05', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(43, 'shivan13', 'amankumar13', 'shivanikaushal13', '13shivan123@gmail.com', '1998-09-06', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(44, 'shivan14', 'amankumar14', 'shivanikaushal14', '14shivan123@gmail.com', '1998-09-07', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(45, 'shivan15', 'amankumar15', 'shivanikaushal15', '15shivan123@gmail.com', '1998-09-08', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(46, 'shivan16', 'amankumar16', 'shivanikaushal16', '16shivan123@gmail.com', '1998-09-09', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(47, 'shivan17', 'amankumar17', 'shivanikaushal17', '17shivan123@gmail.com', '1998-09-10', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(48, 'shivan18', 'amankumar18', 'shivanikaushal18', '18shivan123@gmail.com', '1998-09-11', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(49, 'shivan19', 'amankumar19', 'shivanikaushal19', '19shivan123@gmail.com', '1998-09-12', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL),
(50, 'shivan20', 'amankumar20', 'shivanikaushal20', '20shivan123@gmail.com', '1998-09-13', 'MCA,BCA', 'India', '1619078115.png', 'fatehpur', '2021-04-23 01:05:19', '2021-04-23 01:05:19', NULL);
-- --------------------------------------------------------
--
-- Table structure for table `users`
--
CREATE TABLE `users` (
`id` bigint(20) UNSIGNED NOT NULL,
`name` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`email` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`email_verified_at` timestamp NULL DEFAULT NULL,
`password` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`remember_token` varchar(100) COLLATE utf8mb4_unicode_ci DEFAULT NULL,
`created_at` timestamp NULL DEFAULT NULL,
`updated_at` timestamp NULL DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
--
-- Indexes for dumped tables
--
--
-- Indexes for table `failed_jobs`
--
ALTER TABLE `failed_jobs`
ADD PRIMARY KEY (`id`),
ADD UNIQUE KEY `failed_jobs_uuid_unique` (`uuid`);
--
-- Indexes for table `migrations`
--
ALTER TABLE `migrations`
ADD PRIMARY KEY (`id`);
--
-- Indexes for table `password_resets`
--
ALTER TABLE `password_resets`
ADD KEY `password_resets_email_index` (`email`);
--
-- Indexes for table `students`
--
ALTER TABLE `students`
ADD PRIMARY KEY (`id`);
--
-- Indexes for table `users`
--
ALTER TABLE `users`
ADD PRIMARY KEY (`id`),
ADD UNIQUE KEY `users_email_unique` (`email`);
--
-- AUTO_INCREMENT for dumped tables
--
--
-- AUTO_INCREMENT for table `failed_jobs`
--
ALTER TABLE `failed_jobs`
MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT;
--
-- AUTO_INCREMENT for table `migrations`
--
ALTER TABLE `migrations`
MODIFY `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=6;
--
-- AUTO_INCREMENT for table `students`
--
ALTER TABLE `students`
MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=51;
--
-- AUTO_INCREMENT for table `users`
--
ALTER TABLE `users`
MODIFY `id` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT;
COMMIT;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
| 62.310924 | 193 | 0.672353 |
f169e6df4ad1c789f786bc112f41f87edd79bc0c | 1,662 | sql | SQL | Wiley_task/5_Table_of_records.sql | Alexgta/SQL-PLSQL-Examples | f810a3cb42d7450c055458b10f28a359e4e8d6f5 | [
"Apache-2.0"
] | null | null | null | Wiley_task/5_Table_of_records.sql | Alexgta/SQL-PLSQL-Examples | f810a3cb42d7450c055458b10f28a359e4e8d6f5 | [
"Apache-2.0"
] | null | null | null | Wiley_task/5_Table_of_records.sql | Alexgta/SQL-PLSQL-Examples | f810a3cb42d7450c055458b10f28a359e4e8d6f5 | [
"Apache-2.0"
] | null | null | null | CREATE TABLE "ORDMMAPP"."TEST_EMPLOY"
(EMPLOY_ID NUMBER,
FIRST_NAME VARCHAR2(100),
LAST_NAME VARCHAR2(100),
SALARY NUMBER
);
Insert into ORDMMAPP.TEST_EMPLOY (EMPLOY_ID,FIRST_NAME,LAST_NAME,SALARY) values (1,'Jhon','Smith',100);
Insert into ORDMMAPP.TEST_EMPLOY (EMPLOY_ID,FIRST_NAME,LAST_NAME,SALARY) values (2,'Igor','Doe',200);
Insert into ORDMMAPP.TEST_EMPLOY (EMPLOY_ID,FIRST_NAME,LAST_NAME,SALARY) values (3,'Peter ','Smith',300);
Insert into ORDMMAPP.TEST_EMPLOY (EMPLOY_ID,FIRST_NAME,LAST_NAME,SALARY) values (4,'Scott','Lee',400);
Insert into ORDMMAPP.TEST_EMPLOY (EMPLOY_ID,FIRST_NAME,LAST_NAME,SALARY) values (5,'Jones','Ivanov',500);
create or replace function test_records_of_tables (p_first_name varchar2 default null, p_last_name varchar2 default null)
RETURN NUMBER
AS
TYPE employ_rec IS RECORD (
employ_id NUMBER,
first_name VARCHAR2(100),
last_name VARCHAR2(100),
salary NUMBER
);
TYPE employ_tbl_type IS TABLE OF employ_rec INDEX BY VARCHAR2(200);
employ_tbl employ_tbl_type;
CURSOR cur_employ IS
SELECT t.employ_id, t.first_name, t.last_name, t.salary
FROM test_employ t;
v_key1 varchar2(200);
v_result NUMBER := 0;
BEGIN
v_result := -1;
FOR rc IN cur_employ LOOP
v_key1 := rc.first_name || rc.last_name;
employ_tbl(v_key1).employ_id := rc.employ_id;
employ_tbl(v_key1).first_name := rc.first_name;
employ_tbl(v_key1).last_name := rc.last_name;
employ_tbl(v_key1).salary := rc.salary;
END LOOP;
BEGIN
v_result := employ_tbl(p_first_name || p_last_name).salary;
EXCEPTION
WHEN OTHERS THEN
v_result := -1;
END;
RETURN v_result;
END test_records_of_tables;
| 29.157895 | 121 | 0.749699 |
297cc4ab05910de161521c595144deb567d5e558 | 212 | asm | Assembly | libsrc/_DEVELOPMENT/math/float/math16/lm16/c/sccz80/exp10.asm | witchcraft2001/z88dk | 11adca337a4125aff611ddfdf3fc2401e8dda5b2 | [
"ClArtistic"
] | 640 | 2017-01-14T23:33:45.000Z | 2022-03-30T11:28:42.000Z | libsrc/_DEVELOPMENT/math/float/math16/lm16/c/sccz80/exp10.asm | C-Chads/z88dk | a4141a8e51205c6414b4ae3263b633c4265778e6 | [
"ClArtistic"
] | 1,600 | 2017-01-15T16:12:02.000Z | 2022-03-31T12:11:12.000Z | libsrc/_DEVELOPMENT/math/float/math16/lm16/c/sccz80/exp10.asm | C-Chads/z88dk | a4141a8e51205c6414b4ae3263b633c4265778e6 | [
"ClArtistic"
] | 215 | 2017-01-17T10:43:03.000Z | 2022-03-23T17:25:02.000Z |
SECTION code_fp_math16
PUBLIC exp10f16
EXTERN _m16_exp10f
defc exp10f16 = _m16_exp10f
; SDCC bridge for Classic
IF __CLASSIC
PUBLIC _exp10f16
EXTERN cm16_sdcc_exp10
defc _exp10f16 = cm16_sdcc_exp10
ENDIF
| 14.133333 | 32 | 0.825472 |
7105dfbf2e6132a59f7624d301bddb837e890414 | 223 | ts | TypeScript | flex-webchat-interactive/node_modules/@mui/lab/internal/pickers/wrappers/DesktopTooltipWrapper.d.ts | mark-marshall/twilio-flex-interactive-webchat | ea049dd8509ce2ea34d487024ad7e96c0e0ffd16 | [
"MIT"
] | null | null | null | flex-webchat-interactive/node_modules/@mui/lab/internal/pickers/wrappers/DesktopTooltipWrapper.d.ts | mark-marshall/twilio-flex-interactive-webchat | ea049dd8509ce2ea34d487024ad7e96c0e0ffd16 | [
"MIT"
] | null | null | null | flex-webchat-interactive/node_modules/@mui/lab/internal/pickers/wrappers/DesktopTooltipWrapper.d.ts | mark-marshall/twilio-flex-interactive-webchat | ea049dd8509ce2ea34d487024ad7e96c0e0ffd16 | [
"MIT"
] | null | null | null | /// <reference types="react" />
import { InternalDesktopWrapperProps } from './DesktopWrapper';
declare function DesktopTooltipWrapper(props: InternalDesktopWrapperProps): JSX.Element;
export default DesktopTooltipWrapper;
| 44.6 | 88 | 0.816143 |
9bf8117562e863ebb18cda227b1332b1675705f7 | 1,139 | js | JavaScript | AbiokaRdn.Host/src/app/components/resendVerification/resendVerificationController.js | tugrulelmas/AbiokarRdn | 3c8ed28ed3c29514a52e973bf30b2f5643205555 | [
"MIT"
] | null | null | null | AbiokaRdn.Host/src/app/components/resendVerification/resendVerificationController.js | tugrulelmas/AbiokarRdn | 3c8ed28ed3c29514a52e973bf30b2f5643205555 | [
"MIT"
] | null | null | null | AbiokaRdn.Host/src/app/components/resendVerification/resendVerificationController.js | tugrulelmas/AbiokarRdn | 3c8ed28ed3c29514a52e973bf30b2f5643205555 | [
"MIT"
] | null | null | null | (function () {
'use strict';
var resendVerification = {
templateUrl: '/app/components/resendVerification/resendVerification.html',
controller: ResendVerificationController,
controllerAs: 'vm'
};
/* @ngInject */
function ResendVerificationController($http) {
var vm = this;
vm.model = {};
vm.resend = resend;
vm.resent = false;
vm.loading = false;
function resend() {
vm.loading = true;
$http.post("./user/" + vm.model.Email + "/ResendVerification").then(function (response) {
vm.resent = true;
}, function () {
vm.loading = false;
});
}
}
angular.module('abioka')
.component('resendVerification', resendVerification)
.config(config);
/* @ngInject */
function config($stateProvider) {
$stateProvider
.state('resendVerification', {
url: '/resendVerification',
template: '<resend-verification></resend-verification>',
isPublic: true
});
}
})();
| 27.119048 | 101 | 0.532046 |
717ed315e5fb9bc279822a47f6a783c97b9e88c5 | 222 | ts | TypeScript | types/geometries/CylinderGeometry.d.ts | ukonpower/glpower | 86138d7b38e9a239a85e603e56c3d3ac4adfa742 | [
"MIT"
] | 2 | 2020-05-10T15:19:35.000Z | 2021-09-03T23:06:38.000Z | types/geometries/CylinderGeometry.d.ts | ukonpower/glPower | 86138d7b38e9a239a85e603e56c3d3ac4adfa742 | [
"MIT"
] | 4 | 2021-03-09T19:48:55.000Z | 2022-02-18T12:19:36.000Z | types/geometries/CylinderGeometry.d.ts | ukonpower/glPower | 86138d7b38e9a239a85e603e56c3d3ac4adfa742 | [
"MIT"
] | 2 | 2020-01-28T08:43:14.000Z | 2021-09-03T23:06:44.000Z | import { Geometry } from './Geometry';
export declare class CylinderGeometry extends Geometry {
constructor(radiusTop?: number, radiusBottom?: number, height?: number, radSegments?: number, heightSegments?: number);
}
| 44.4 | 123 | 0.756757 |
05c66eecbf82a7b3cd071f7db869f23fa39fc45e | 411 | rb | Ruby | spec/factories/produced_item_warehousex_items.rb | emclab/produced_item_warehousex | 4872aa34f7081de1922f19e9177d5398bc781036 | [
"MIT"
] | null | null | null | spec/factories/produced_item_warehousex_items.rb | emclab/produced_item_warehousex | 4872aa34f7081de1922f19e9177d5398bc781036 | [
"MIT"
] | null | null | null | spec/factories/produced_item_warehousex_items.rb | emclab/produced_item_warehousex | 4872aa34f7081de1922f19e9177d5398bc781036 | [
"MIT"
] | null | null | null | # Read about factories at https://github.com/thoughtbot/factory_girl
FactoryGirl.define do
factory :produced_item_warehousex_item, :class => 'ProducedItemWarehousex::Item' do
batch_id 1
name 'a product 123'
in_date "2014-03-27"
in_qty 1
stock_qty 1
last_updated_by_id 1
storage_location "MyString"
brief_note "MyText"
packaging_desp "MyText"
checkin_by_id 1
end
end
| 24.176471 | 85 | 0.727494 |
6bac4420ba2a3eaddadc37a312cd0e5ecec1f93d | 101,311 | c | C | hihope_neptune-oh_hid/00_src/v0.1/third_party/cryptsetup/lib/luks2/luks2_reencrypt.c | dawmlight/vendor_oh_fun | bc9fb50920f06cd4c27399f60076f5793043c77d | [
"Apache-2.0"
] | 1 | 2022-02-15T08:51:55.000Z | 2022-02-15T08:51:55.000Z | hihope_neptune-oh_hid/00_src/v0.3/third_party/cryptsetup/lib/luks2/luks2_reencrypt.c | dawmlight/vendor_oh_fun | bc9fb50920f06cd4c27399f60076f5793043c77d | [
"Apache-2.0"
] | null | null | null | hihope_neptune-oh_hid/00_src/v0.3/third_party/cryptsetup/lib/luks2/luks2_reencrypt.c | dawmlight/vendor_oh_fun | bc9fb50920f06cd4c27399f60076f5793043c77d | [
"Apache-2.0"
] | null | null | null | /*
* LUKS - Linux Unified Key Setup v2, reencryption helpers
*
* Copyright (C) 2015-2019, Red Hat, Inc. All rights reserved.
* Copyright (C) 2015-2019, Ondrej Kozina
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "luks2_internal.h"
#include "utils_device_locking.h"
static json_object *reencrypt_segment(struct luks2_hdr *hdr, unsigned new)
{
return LUKS2_get_segment_by_flag(hdr, new ? "backup-final" : "backup-previous");
}
static json_object *reencrypt_segment_new(struct luks2_hdr *hdr)
{
return reencrypt_segment(hdr, 1);
}
static json_object *reencrypt_segment_old(struct luks2_hdr *hdr)
{
return reencrypt_segment(hdr, 0);
}
static const char *reencrypt_segment_cipher_new(struct luks2_hdr *hdr)
{
return json_segment_get_cipher(reencrypt_segment(hdr, 1));
}
static const char *reencrypt_segment_cipher_old(struct luks2_hdr *hdr)
{
return json_segment_get_cipher(reencrypt_segment(hdr, 0));
}
static int reencrypt_get_sector_size_new(struct luks2_hdr *hdr)
{
return json_segment_get_sector_size(reencrypt_segment(hdr, 1));
}
static int reencrypt_get_sector_size_old(struct luks2_hdr *hdr)
{
return json_segment_get_sector_size(reencrypt_segment(hdr, 0));
}
static uint64_t reencrypt_data_offset(struct luks2_hdr *hdr, unsigned new)
{
json_object *jobj = reencrypt_segment(hdr, new);
if (jobj)
return json_segment_get_offset(jobj, 0);
return LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
}
static uint64_t LUKS2_reencrypt_get_data_offset_moved(struct luks2_hdr *hdr)
{
json_object *jobj_segment = LUKS2_get_segment_by_flag(hdr, "backup-moved-segment");
if (!jobj_segment)
return 0;
return json_segment_get_offset(jobj_segment, 0);
}
static uint64_t reencrypt_get_data_offset_new(struct luks2_hdr *hdr)
{
return reencrypt_data_offset(hdr, 1);
}
static uint64_t reencrypt_get_data_offset_old(struct luks2_hdr *hdr)
{
return reencrypt_data_offset(hdr, 0);
}
static int reencrypt_digest(struct luks2_hdr *hdr, unsigned new)
{
int segment = LUKS2_get_segment_id_by_flag(hdr, new ? "backup-final" : "backup-previous");
if (segment < 0)
return segment;
return LUKS2_digest_by_segment(hdr, segment);
}
int LUKS2_reencrypt_digest_new(struct luks2_hdr *hdr)
{
return reencrypt_digest(hdr, 1);
}
int LUKS2_reencrypt_digest_old(struct luks2_hdr *hdr)
{
return reencrypt_digest(hdr, 0);
}
/* none, checksums, journal or shift */
static const char *reencrypt_resilience_type(struct luks2_hdr *hdr)
{
json_object *jobj_keyslot, *jobj_area, *jobj_type;
int ks = LUKS2_find_keyslot(hdr, "reencrypt");
if (ks < 0)
return NULL;
jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
return NULL;
return json_object_get_string(jobj_type);
}
static const char *reencrypt_resilience_hash(struct luks2_hdr *hdr)
{
json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash;
int ks = LUKS2_find_keyslot(hdr, "reencrypt");
if (ks < 0)
return NULL;
jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
return NULL;
if (strcmp(json_object_get_string(jobj_type), "checksum"))
return NULL;
if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
return NULL;
return json_object_get_string(jobj_hash);
}
static uint32_t reencrypt_alignment(struct luks2_hdr *hdr)
{
json_object *jobj_keyslot, *jobj_area, *jobj_type, *jobj_hash, *jobj_sector_size;
int ks = LUKS2_find_keyslot(hdr, "reencrypt");
if (ks < 0)
return 0;
jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
if (!json_object_object_get_ex(jobj_area, "type", &jobj_type))
return 0;
if (strcmp(json_object_get_string(jobj_type), "checksum"))
return 0;
if (!json_object_object_get_ex(jobj_area, "hash", &jobj_hash))
return 0;
if (!json_object_object_get_ex(jobj_area, "sector_size", &jobj_sector_size))
return 0;
return json_object_get_uint32(jobj_sector_size);
}
static json_object *_enc_create_segments_shift_after(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
uint64_t data_offset)
{
int reenc_seg, i = 0;
json_object *jobj_copy, *jobj_seg_new = NULL, *jobj_segs_post = json_object_new_object();
uint64_t tmp;
if (!rh->jobj_segs_hot || !jobj_segs_post)
goto err;
if (json_segments_count(rh->jobj_segs_hot) == 0)
return jobj_segs_post;
reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
if (reenc_seg < 0)
goto err;
while (i < reenc_seg) {
jobj_copy = json_segments_get_segment(rh->jobj_segs_hot, i);
if (!jobj_copy)
goto err;
json_object_object_add_by_uint(jobj_segs_post, i++, json_object_get(jobj_copy));
}
if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1), &jobj_seg_new)) {
if (json_object_copy(json_segments_get_segment(rh->jobj_segs_hot, reenc_seg), &jobj_seg_new))
goto err;
json_segment_remove_flag(jobj_seg_new, "in-reencryption");
tmp = rh->length;
} else {
json_object_object_add(jobj_seg_new, "offset", json_object_new_uint64(rh->offset + data_offset));
json_object_object_add(jobj_seg_new, "iv_tweak", json_object_new_uint64(rh->offset >> SECTOR_SHIFT));
tmp = json_segment_get_size(jobj_seg_new, 0) + rh->length;
}
/* alter size of new segment, reenc_seg == 0 we're finished */
json_object_object_add(jobj_seg_new, "size", reenc_seg > 0 ? json_object_new_uint64(tmp) : json_object_new_string("dynamic"));
json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_seg_new);
return jobj_segs_post;
err:
json_object_put(jobj_segs_post);
return NULL;
}
static json_object *reencrypt_make_hot_segments_encrypt_shift(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
uint64_t data_offset)
{
int sg, crypt_seg, i = 0;
uint64_t segment_size;
json_object *jobj_seg_shrunk, *jobj_seg_new, *jobj_copy, *jobj_enc_seg = NULL,
*jobj_segs_hot = json_object_new_object();
if (!jobj_segs_hot)
return NULL;
crypt_seg = LUKS2_segment_by_type(hdr, "crypt");
/* FIXME: This is hack. Find proper way to fix it. */
sg = LUKS2_last_segment_by_type(hdr, "linear");
if (rh->offset && sg < 0)
goto err;
if (sg < 0)
return jobj_segs_hot;
jobj_enc_seg = json_segment_create_crypt(data_offset + rh->offset,
rh->offset >> SECTOR_SHIFT,
&rh->length,
reencrypt_segment_cipher_new(hdr),
reencrypt_get_sector_size_new(hdr),
1);
while (i < sg) {
jobj_copy = LUKS2_get_segment_jobj(hdr, i);
if (!jobj_copy)
goto err;
json_object_object_add_by_uint(jobj_segs_hot, i++, json_object_get(jobj_copy));
}
segment_size = LUKS2_segment_size(hdr, sg, 0);
if (segment_size > rh->length) {
jobj_seg_shrunk = NULL;
if (json_object_copy(LUKS2_get_segment_jobj(hdr, sg), &jobj_seg_shrunk))
goto err;
json_object_object_add(jobj_seg_shrunk, "size", json_object_new_uint64(segment_size - rh->length));
json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_seg_shrunk);
}
json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_enc_seg);
jobj_enc_seg = NULL; /* see err: label */
/* first crypt segment after encryption ? */
if (crypt_seg >= 0) {
jobj_seg_new = LUKS2_get_segment_jobj(hdr, crypt_seg);
if (!jobj_seg_new)
goto err;
json_object_object_add_by_uint(jobj_segs_hot, sg, json_object_get(jobj_seg_new));
}
return jobj_segs_hot;
err:
json_object_put(jobj_enc_seg);
json_object_put(jobj_segs_hot);
return NULL;
}
static json_object *reencrypt_make_segment_new(struct crypt_device *cd,
struct luks2_hdr *hdr,
const struct luks2_reenc_context *rh,
uint64_t data_offset,
uint64_t segment_offset,
uint64_t iv_offset,
const uint64_t *segment_length)
{
switch (rh->mode) {
case CRYPT_REENCRYPT_REENCRYPT:
case CRYPT_REENCRYPT_ENCRYPT:
return json_segment_create_crypt(data_offset + segment_offset,
crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
segment_length,
reencrypt_segment_cipher_new(hdr),
reencrypt_get_sector_size_new(hdr), 0);
case CRYPT_REENCRYPT_DECRYPT:
return json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
}
return NULL;
}
static json_object *reencrypt_make_post_segments_forward(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
uint64_t data_offset)
{
int reenc_seg;
json_object *jobj_new_seg_after, *jobj_old_seg, *jobj_old_seg_copy = NULL,
*jobj_segs_post = json_object_new_object();
uint64_t fixed_length = rh->offset + rh->length;
if (!rh->jobj_segs_hot || !jobj_segs_post)
goto err;
reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
if (reenc_seg < 0)
return NULL;
jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg + 1);
/*
* if there's no old segment after reencryption, we're done.
* Set size to 'dynamic' again.
*/
jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, jobj_old_seg ? &fixed_length : NULL);
if (!jobj_new_seg_after)
goto err;
json_object_object_add_by_uint(jobj_segs_post, 0, jobj_new_seg_after);
if (jobj_old_seg) {
if (rh->fixed_length) {
if (json_object_copy(jobj_old_seg, &jobj_old_seg_copy))
goto err;
jobj_old_seg = jobj_old_seg_copy;
fixed_length = rh->device_size - fixed_length;
json_object_object_add(jobj_old_seg, "size", json_object_new_uint64(fixed_length));
} else
json_object_get(jobj_old_seg);
json_object_object_add_by_uint(jobj_segs_post, 1, jobj_old_seg);
}
return jobj_segs_post;
err:
json_object_put(jobj_segs_post);
return NULL;
}
static json_object *reencrypt_make_post_segments_backward(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
uint64_t data_offset)
{
int reenc_seg;
uint64_t fixed_length;
json_object *jobj_new_seg_after, *jobj_old_seg,
*jobj_segs_post = json_object_new_object();
if (!rh->jobj_segs_hot || !jobj_segs_post)
goto err;
reenc_seg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
if (reenc_seg < 0)
return NULL;
jobj_old_seg = json_segments_get_segment(rh->jobj_segs_hot, reenc_seg - 1);
if (jobj_old_seg)
json_object_object_add_by_uint(jobj_segs_post, reenc_seg - 1, json_object_get(jobj_old_seg));
if (rh->fixed_length && rh->offset) {
fixed_length = rh->device_size - rh->offset;
jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, &fixed_length);
} else
jobj_new_seg_after = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset, rh->offset, NULL);
if (!jobj_new_seg_after)
goto err;
json_object_object_add_by_uint(jobj_segs_post, reenc_seg, jobj_new_seg_after);
return jobj_segs_post;
err:
json_object_put(jobj_segs_post);
return NULL;
}
static json_object *reencrypt_make_segment_reencrypt(struct crypt_device *cd,
struct luks2_hdr *hdr,
const struct luks2_reenc_context *rh,
uint64_t data_offset,
uint64_t segment_offset,
uint64_t iv_offset,
const uint64_t *segment_length)
{
switch (rh->mode) {
case CRYPT_REENCRYPT_REENCRYPT:
case CRYPT_REENCRYPT_ENCRYPT:
return json_segment_create_crypt(data_offset + segment_offset,
crypt_get_iv_offset(cd) + (iv_offset >> SECTOR_SHIFT),
segment_length,
reencrypt_segment_cipher_new(hdr),
reencrypt_get_sector_size_new(hdr), 1);
case CRYPT_REENCRYPT_DECRYPT:
return json_segment_create_linear(data_offset + segment_offset, segment_length, 1);
}
return NULL;
}
static json_object *reencrypt_make_segment_old(struct crypt_device *cd,
struct luks2_hdr *hdr,
const struct luks2_reenc_context *rh,
uint64_t data_offset,
uint64_t segment_offset,
const uint64_t *segment_length)
{
json_object *jobj_old_seg = NULL;
switch (rh->mode) {
case CRYPT_REENCRYPT_REENCRYPT:
case CRYPT_REENCRYPT_DECRYPT:
jobj_old_seg = json_segment_create_crypt(data_offset + segment_offset,
crypt_get_iv_offset(cd) + (segment_offset >> SECTOR_SHIFT),
segment_length,
reencrypt_segment_cipher_old(hdr),
reencrypt_get_sector_size_old(hdr),
0);
break;
case CRYPT_REENCRYPT_ENCRYPT:
jobj_old_seg = json_segment_create_linear(data_offset + segment_offset, segment_length, 0);
}
return jobj_old_seg;
}
static json_object *reencrypt_make_hot_segments_forward(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
uint64_t device_size,
uint64_t data_offset)
{
json_object *jobj_segs_hot, *jobj_reenc_seg, *jobj_old_seg, *jobj_new_seg;
uint64_t fixed_length, tmp = rh->offset + rh->length;
unsigned int sg = 0;
jobj_segs_hot = json_object_new_object();
if (!jobj_segs_hot)
return NULL;
if (rh->offset) {
jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, 0, 0, &rh->offset);
if (!jobj_new_seg)
goto err;
json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_new_seg);
}
jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
if (!jobj_reenc_seg)
goto err;
json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
if (tmp < device_size) {
fixed_length = device_size - tmp;
jobj_old_seg = reencrypt_make_segment_old(cd, hdr, rh, data_offset + rh->data_shift, rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
if (!jobj_old_seg)
goto err;
json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_old_seg);
}
return jobj_segs_hot;
err:
json_object_put(jobj_segs_hot);
return NULL;
}
static json_object *reencrypt_make_hot_segments_backward(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
uint64_t device_size,
uint64_t data_offset)
{
json_object *jobj_reenc_seg, *jobj_new_seg, *jobj_old_seg = NULL,
*jobj_segs_hot = json_object_new_object();
int sg = 0;
uint64_t fixed_length, tmp = rh->offset + rh->length;
if (!jobj_segs_hot)
return NULL;
if (rh->offset) {
if (json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_old_seg))
goto err;
json_object_object_add(jobj_old_seg, "size", json_object_new_uint64(rh->offset));
json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_old_seg);
}
jobj_reenc_seg = reencrypt_make_segment_reencrypt(cd, hdr, rh, data_offset, rh->offset, rh->offset, &rh->length);
if (!jobj_reenc_seg)
goto err;
json_object_object_add_by_uint(jobj_segs_hot, sg++, jobj_reenc_seg);
if (tmp < device_size) {
fixed_length = device_size - tmp;
jobj_new_seg = reencrypt_make_segment_new(cd, hdr, rh, data_offset, rh->offset + rh->length, rh->offset + rh->length, rh->fixed_length ? &fixed_length : NULL);
if (!jobj_new_seg)
goto err;
json_object_object_add_by_uint(jobj_segs_hot, sg, jobj_new_seg);
}
return jobj_segs_hot;
err:
json_object_put(jobj_segs_hot);
return NULL;
}
static int reencrypt_make_hot_segments(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
uint64_t device_size,
uint64_t data_offset)
{
rh->jobj_segs_hot = NULL;
if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
rh->data_shift && rh->jobj_segment_moved) {
log_dbg(cd, "Calculating hot segments for encryption with data move.");
rh->jobj_segs_hot = reencrypt_make_hot_segments_encrypt_shift(cd, hdr, rh, data_offset);
} else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
log_dbg(cd, "Calculating hot segments (forward direction).");
rh->jobj_segs_hot = reencrypt_make_hot_segments_forward(cd, hdr, rh, device_size, data_offset);
} else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
log_dbg(cd, "Calculating hot segments (backward direction).");
rh->jobj_segs_hot = reencrypt_make_hot_segments_backward(cd, hdr, rh, device_size, data_offset);
}
return rh->jobj_segs_hot ? 0 : -EINVAL;
}
static int reencrypt_make_post_segments(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
uint64_t data_offset)
{
rh->jobj_segs_post = NULL;
if (rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->direction == CRYPT_REENCRYPT_BACKWARD &&
rh->data_shift && rh->jobj_segment_moved) {
log_dbg(cd, "Calculating post segments for encryption with data move.");
rh->jobj_segs_post = _enc_create_segments_shift_after(cd, hdr, rh, data_offset);
} else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
log_dbg(cd, "Calculating post segments (forward direction).");
rh->jobj_segs_post = reencrypt_make_post_segments_forward(cd, hdr, rh, data_offset);
} else if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
log_dbg(cd, "Calculating segments (backward direction).");
rh->jobj_segs_post = reencrypt_make_post_segments_backward(cd, hdr, rh, data_offset);
}
return rh->jobj_segs_post ? 0 : -EINVAL;
}
static uint64_t reencrypt_data_shift(struct luks2_hdr *hdr)
{
json_object *jobj_keyslot, *jobj_area, *jobj_data_shift;
int ks = LUKS2_find_keyslot(hdr, "reencrypt");
if (ks < 0)
return 0;
jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, ks);
json_object_object_get_ex(jobj_keyslot, "area", &jobj_area);
if (!json_object_object_get_ex(jobj_area, "shift_size", &jobj_data_shift))
return 0;
return json_object_get_uint64(jobj_data_shift);
}
static crypt_reencrypt_mode_info reencrypt_mode(struct luks2_hdr *hdr)
{
const char *mode;
crypt_reencrypt_mode_info mi = CRYPT_REENCRYPT_REENCRYPT;
json_object *jobj_keyslot, *jobj_mode;
jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
if (!jobj_keyslot)
return mi;
json_object_object_get_ex(jobj_keyslot, "mode", &jobj_mode);
mode = json_object_get_string(jobj_mode);
/* validation enforces allowed values */
if (!strcmp(mode, "encrypt"))
mi = CRYPT_REENCRYPT_ENCRYPT;
else if (!strcmp(mode, "decrypt"))
mi = CRYPT_REENCRYPT_DECRYPT;
return mi;
}
static crypt_reencrypt_direction_info reencrypt_direction(struct luks2_hdr *hdr)
{
const char *value;
json_object *jobj_keyslot, *jobj_mode;
crypt_reencrypt_direction_info di = CRYPT_REENCRYPT_FORWARD;
jobj_keyslot = LUKS2_get_keyslot_jobj(hdr, LUKS2_find_keyslot(hdr, "reencrypt"));
if (!jobj_keyslot)
return di;
json_object_object_get_ex(jobj_keyslot, "direction", &jobj_mode);
value = json_object_get_string(jobj_mode);
/* validation enforces allowed values */
if (strcmp(value, "forward"))
di = CRYPT_REENCRYPT_BACKWARD;
return di;
}
typedef enum { REENC_OK = 0, REENC_ERR, REENC_ROLLBACK, REENC_FATAL } reenc_status_t;
void LUKS2_reenc_context_free(struct crypt_device *cd, struct luks2_reenc_context *rh)
{
if (!rh)
return;
if (rh->rp.type == REENC_PROTECTION_CHECKSUM) {
if (rh->rp.p.csum.ch) {
crypt_hash_destroy(rh->rp.p.csum.ch);
rh->rp.p.csum.ch = NULL;
}
if (rh->rp.p.csum.checksums) {
memset(rh->rp.p.csum.checksums, 0, rh->rp.p.csum.checksums_len);
free(rh->rp.p.csum.checksums);
rh->rp.p.csum.checksums = NULL;
}
}
json_object_put(rh->jobj_segs_hot);
rh->jobj_segs_hot = NULL;
json_object_put(rh->jobj_segs_post);
rh->jobj_segs_post = NULL;
json_object_put(rh->jobj_segment_old);
rh->jobj_segment_old = NULL;
json_object_put(rh->jobj_segment_new);
rh->jobj_segment_new = NULL;
json_object_put(rh->jobj_segment_moved);
rh->jobj_segment_moved = NULL;
free(rh->reenc_buffer);
rh->reenc_buffer = NULL;
crypt_storage_wrapper_destroy(rh->cw1);
rh->cw1 = NULL;
crypt_storage_wrapper_destroy(rh->cw2);
rh->cw2 = NULL;
free(rh->device_name);
free(rh->overlay_name);
free(rh->hotzone_name);
crypt_drop_keyring_key(cd, rh->vks);
crypt_free_volume_key(rh->vks);
device_release_excl(cd, crypt_data_device(cd));
crypt_unlock_internal(cd, rh->reenc_lock);
free(rh);
}
static size_t reencrypt_get_alignment(struct crypt_device *cd,
struct luks2_hdr *hdr)
{
int ss;
size_t alignment = device_block_size(cd, crypt_data_device(cd));
ss = reencrypt_get_sector_size_old(hdr);
if (ss > 0 && (size_t)ss > alignment)
alignment = ss;
ss = reencrypt_get_sector_size_new(hdr);
if (ss > 0 && (size_t)ss > alignment)
alignment = (size_t)ss;
return alignment;
}
/* returns void because it must not fail on valid LUKS2 header */
static void _load_backup_segments(struct luks2_hdr *hdr,
struct luks2_reenc_context *rh)
{
int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
if (segment >= 0) {
rh->jobj_segment_new = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
rh->digest_new = LUKS2_digest_by_segment(hdr, segment);
} else {
rh->jobj_segment_new = NULL;
rh->digest_new = -ENOENT;
}
segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
if (segment >= 0) {
rh->jobj_segment_old = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
rh->digest_old = LUKS2_digest_by_segment(hdr, segment);
} else {
rh->jobj_segment_old = NULL;
rh->digest_old = -ENOENT;
}
segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
if (segment >= 0)
rh->jobj_segment_moved = json_object_get(LUKS2_get_segment_jobj(hdr, segment));
else
rh->jobj_segment_moved = NULL;
}
static int reencrypt_offset_backward_moved(struct luks2_hdr *hdr, json_object *jobj_segments, uint64_t *reencrypt_length, uint64_t data_shift, uint64_t *offset)
{
uint64_t tmp, linear_length = 0;
int sg, segs = json_segments_count(jobj_segments);
/* find reencrypt offset with data shift */
for (sg = 0; sg < segs; sg++)
if (LUKS2_segment_is_type(hdr, sg, "linear"))
linear_length += LUKS2_segment_size(hdr, sg, 0);
/* all active linear segments length */
if (linear_length) {
if (linear_length < data_shift)
return -EINVAL;
tmp = linear_length - data_shift;
if (tmp && tmp < data_shift) {
*offset = data_shift;
*reencrypt_length = tmp;
} else
*offset = tmp;
return 0;
}
if (segs == 1) {
*offset = 0;
return 0;
}
/* should be unreachable */
return -EINVAL;
}
static int _offset_forward(struct luks2_hdr *hdr, json_object *jobj_segments, uint64_t *offset)
{
int segs = json_segments_count(jobj_segments);
if (segs == 1)
*offset = 0;
else if (segs == 2) {
*offset = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
if (!*offset)
return -EINVAL;
} else
return -EINVAL;
return 0;
}
static int _offset_backward(struct luks2_hdr *hdr, json_object *jobj_segments, uint64_t device_size, uint64_t *length, uint64_t *offset)
{
int segs = json_segments_count(jobj_segments);
uint64_t tmp;
if (segs == 1) {
if (device_size < *length)
*length = device_size;
*offset = device_size - *length;
} else if (segs == 2) {
tmp = json_segment_get_size(json_segments_get_segment(jobj_segments, 0), 0);
if (tmp < *length)
*length = tmp;
*offset = tmp - *length;
} else
return -EINVAL;
return 0;
}
/* must be always relative to data offset */
/* the LUKS2 header MUST be valid */
static int reencrypt_offset(struct luks2_hdr *hdr,
crypt_reencrypt_direction_info di,
uint64_t device_size,
uint64_t *reencrypt_length,
uint64_t *offset)
{
int sg;
json_object *jobj_segments;
uint64_t data_shift = reencrypt_data_shift(hdr);
if (!offset)
return -EINVAL;
/* if there's segment in reencryption return directly offset of it */
json_object_object_get_ex(hdr->jobj, "segments", &jobj_segments);
sg = json_segments_segment_in_reencrypt(jobj_segments);
if (sg >= 0) {
*offset = LUKS2_segment_offset(hdr, sg, 0) - (reencrypt_get_data_offset_new(hdr));
return 0;
}
if (di == CRYPT_REENCRYPT_FORWARD)
return _offset_forward(hdr, jobj_segments, offset);
else if (di == CRYPT_REENCRYPT_BACKWARD) {
if (reencrypt_mode(hdr) == CRYPT_REENCRYPT_ENCRYPT &&
LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
return reencrypt_offset_backward_moved(hdr, jobj_segments, reencrypt_length, data_shift, offset);
return _offset_backward(hdr, jobj_segments, device_size, reencrypt_length, offset);
}
return -EINVAL;
}
static uint64_t reencrypt_length(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
uint64_t keyslot_area_length,
uint64_t length_max)
{
unsigned long dummy, optimal_alignment;
uint64_t length, soft_mem_limit;
if (rh->rp.type == REENC_PROTECTION_NONE)
length = length_max ?: LUKS2_DEFAULT_NONE_REENCRYPTION_LENGTH;
else if (rh->rp.type == REENC_PROTECTION_CHECKSUM)
length = (keyslot_area_length / rh->rp.p.csum.hash_size) * rh->alignment;
else if (rh->rp.type == REENC_PROTECTION_DATASHIFT)
return reencrypt_data_shift(hdr);
else
length = keyslot_area_length;
/* hard limit */
if (length > LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH)
length = LUKS2_REENCRYPT_MAX_HOTZONE_LENGTH;
/* soft limit is 1/4 of system memory */
soft_mem_limit = crypt_getphysmemory_kb() << 8; /* multiply by (1024/4) */
if (soft_mem_limit && length > soft_mem_limit)
length = soft_mem_limit;
if (length_max && length > length_max)
length = length_max;
length -= (length % rh->alignment);
/* Emits error later */
if (!length)
return length;
device_topology_alignment(cd, crypt_data_device(cd), &optimal_alignment, &dummy, length);
/* we have to stick with encryption sector size alignment */
if (optimal_alignment % rh->alignment)
return length;
/* align to opt-io size only if remaining size allows it */
if (length > optimal_alignment)
length -= (length % optimal_alignment);
return length;
}
static int reencrypt_context_init(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reenc_context *rh, uint64_t device_size, const struct crypt_params_reencrypt *params)
{
int r;
uint64_t dummy, area_length;
rh->reenc_keyslot = LUKS2_find_keyslot(hdr, "reencrypt");
if (rh->reenc_keyslot < 0)
return -EINVAL;
if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &dummy, &area_length) < 0)
return -EINVAL;
rh->mode = reencrypt_mode(hdr);
rh->alignment = reencrypt_get_alignment(cd, hdr);
if (!rh->alignment)
return -EINVAL;
log_dbg(cd, "Hotzone size: %" PRIu64 ", device size: %" PRIu64 ", alignment: %zu.",
params->max_hotzone_size << SECTOR_SHIFT,
params->device_size << SECTOR_SHIFT, rh->alignment);
if ((params->max_hotzone_size << SECTOR_SHIFT) % rh->alignment) {
log_err(cd, _("Hotzone size must be multiple of calculated zone alignment (%zu bytes)."), rh->alignment);
return -EINVAL;
}
if ((params->device_size << SECTOR_SHIFT) % rh->alignment) {
log_err(cd, _("Device size must be multiple of calculated zone alignment (%zu bytes)."), rh->alignment);
return -EINVAL;
}
rh->direction = reencrypt_direction(hdr);
if (!strcmp(params->resilience, "datashift")) {
log_dbg(cd, "Initializing reencryption context with data_shift resilience.");
rh->rp.type = REENC_PROTECTION_DATASHIFT;
rh->data_shift = reencrypt_data_shift(hdr);
} else if (!strcmp(params->resilience, "journal")) {
log_dbg(cd, "Initializing reencryption context with journal resilience.");
rh->rp.type = REENC_PROTECTION_JOURNAL;
} else if (!strcmp(params->resilience, "checksum")) {
log_dbg(cd, "Initializing reencryption context with checksum resilience.");
rh->rp.type = REENC_PROTECTION_CHECKSUM;
r = snprintf(rh->rp.p.csum.hash,
sizeof(rh->rp.p.csum.hash), "%s", params->hash);
if (r < 0 || (size_t)r >= sizeof(rh->rp.p.csum.hash)) {
log_dbg(cd, "Invalid hash parameter");
return -EINVAL;
}
if (crypt_hash_init(&rh->rp.p.csum.ch, params->hash)) {
log_dbg(cd, "Failed to initialize checksum resilience hash %s", params->hash);
return -EINVAL;
}
r = crypt_hash_size(params->hash);
if (r < 1) {
log_dbg(cd, "Invalid hash size");
return -EINVAL;
}
rh->rp.p.csum.hash_size = r;
rh->rp.p.csum.checksums_len = area_length;
if (posix_memalign(&rh->rp.p.csum.checksums, device_alignment(crypt_metadata_device(cd)),
rh->rp.p.csum.checksums_len))
return -ENOMEM;
} else if (!strcmp(params->resilience, "none")) {
log_dbg(cd, "Initializing reencryption context with none resilience.");
rh->rp.type = REENC_PROTECTION_NONE;
} else {
log_err(cd, _("Unsupported resilience mode %s"), params->resilience);
return -EINVAL;
}
if (params->device_size) {
log_dbg(cd, "Switching reencryption to fixed size mode.");
device_size = params->device_size << SECTOR_SHIFT;
rh->fixed_length = true;
} else
rh->fixed_length = false;
rh->length = reencrypt_length(cd, hdr, rh, area_length, params->max_hotzone_size << SECTOR_SHIFT);
if (!rh->length) {
log_dbg(cd, "Invalid reencryption length.");
return -EINVAL;
}
if (reencrypt_offset(hdr, rh->direction, device_size, &rh->length, &rh->offset)) {
log_dbg(cd, "Failed to get reencryption offset.");
return -EINVAL;
}
if (rh->offset > device_size)
return -EINVAL;
if (rh->length > device_size - rh->offset)
rh->length = device_size - rh->offset;
log_dbg(cd, "reencrypt-direction: %s", rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward");
_load_backup_segments(hdr, rh);
if (rh->direction == CRYPT_REENCRYPT_BACKWARD)
rh->progress = device_size - rh->offset - rh->length;
else
rh->progress = rh->offset;
log_dbg(cd, "backup-previous digest id: %d", rh->digest_old);
log_dbg(cd, "backup-final digest id: %d", rh->digest_new);
log_dbg(cd, "reencrypt length: %" PRIu64, rh->length);
log_dbg(cd, "reencrypt offset: %" PRIu64, rh->offset);
log_dbg(cd, "reencrypt shift: %s%" PRIu64, (rh->data_shift && rh->direction == CRYPT_REENCRYPT_BACKWARD ? "-" : ""), rh->data_shift);
log_dbg(cd, "reencrypt alignment: %zu", rh->alignment);
log_dbg(cd, "reencrypt progress: %" PRIu64, rh->progress);
rh->device_size = device_size;
return rh->length < 512 ? -EINVAL : 0;
}
static size_t reencrypt_buffer_length(struct luks2_reenc_context *rh)
{
if (rh->data_shift)
return rh->data_shift;
return rh->length;
}
static int reencrypt_load_clean(struct crypt_device *cd,
struct luks2_hdr *hdr,
uint64_t device_size,
struct luks2_reenc_context **rh,
const struct crypt_params_reencrypt *params)
{
int r;
const struct crypt_params_reencrypt hdr_reenc_params = {
.resilience = reencrypt_resilience_type(hdr),
.hash = reencrypt_resilience_hash(hdr),
.device_size = params ? params->device_size : 0
};
struct luks2_reenc_context *tmp = crypt_zalloc(sizeof (*tmp));
if (!tmp)
return -ENOMEM;
r = -EINVAL;
if (!hdr_reenc_params.resilience)
goto err;
/* skip context update if data shift is detected in header */
if (!strcmp(hdr_reenc_params.resilience, "datashift"))
params = NULL;
log_dbg(cd, "Initializing reencryption context (%s).", params ? "update" : "load");
if (!params || !params->resilience)
params = &hdr_reenc_params;
r = reencrypt_context_init(cd, hdr, tmp, device_size, params);
if (r)
goto err;
if (posix_memalign(&tmp->reenc_buffer, device_alignment(crypt_data_device(cd)),
reencrypt_buffer_length(tmp))) {
r = -ENOMEM;
goto err;
}
*rh = tmp;
return 0;
err:
LUKS2_reenc_context_free(cd, tmp);
return r;
}
static int reencrypt_make_segments(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
uint64_t device_size)
{
int r;
uint64_t data_offset = reencrypt_get_data_offset_new(hdr);
log_dbg(cd, "Calculating segments.");
r = reencrypt_make_hot_segments(cd, hdr, rh, device_size, data_offset);
if (!r) {
r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
if (r)
json_object_put(rh->jobj_segs_hot);
}
if (r)
log_dbg(cd, "Failed to make reencryption segments.");
return r;
}
static int reencrypt_make_segments_crashed(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh)
{
int r;
uint64_t data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
if (!rh)
return -EINVAL;
rh->jobj_segs_hot = json_object_new_object();
if (!rh->jobj_segs_hot)
return -ENOMEM;
json_object_object_foreach(LUKS2_get_segments_jobj(hdr), key, val) {
if (json_segment_is_backup(val))
continue;
json_object_object_add(rh->jobj_segs_hot, key, json_object_get(val));
}
r = reencrypt_make_post_segments(cd, hdr, rh, data_offset);
if (r) {
json_object_put(rh->jobj_segs_hot);
rh->jobj_segs_hot = NULL;
}
return r;
}
static int reencrypt_load_crashed(struct crypt_device *cd,
struct luks2_hdr *hdr, uint64_t device_size, struct luks2_reenc_context **rh)
{
bool dynamic;
uint64_t minimal_size;
int r, reenc_seg;
struct crypt_params_reencrypt params = {};
if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic))
return -EINVAL;
if (!dynamic)
params.device_size = minimal_size >> SECTOR_SHIFT;
r = reencrypt_load_clean(cd, hdr, device_size, rh, ¶ms);
if (!r) {
reenc_seg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
if (reenc_seg < 0)
r = -EINVAL;
else
(*rh)->length = LUKS2_segment_size(hdr, reenc_seg, 0);
}
if (!r && ((*rh)->rp.type == REENC_PROTECTION_CHECKSUM)) {
/* we have to override calculated alignment with value stored in mda */
(*rh)->alignment = reencrypt_alignment(hdr);
if (!(*rh)->alignment) {
log_dbg(cd, "Failed to get read resilience sector_size from metadata.");
r = -EINVAL;
}
}
if (!r)
r = reencrypt_make_segments_crashed(cd, hdr, *rh);
if (r) {
LUKS2_reenc_context_free(cd, *rh);
*rh = NULL;
}
return r;
}
static int reencrypt_init_storage_wrappers(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
struct volume_key *vks)
{
int r;
struct volume_key *vk;
uint32_t wrapper_flags = (getuid() || geteuid()) ? 0 : DISABLE_KCAPI;
vk = crypt_volume_key_by_id(vks, rh->digest_old);
r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
reencrypt_get_data_offset_old(hdr),
crypt_get_iv_offset(cd),
reencrypt_get_sector_size_old(hdr),
reencrypt_segment_cipher_old(hdr),
vk, wrapper_flags | OPEN_READONLY);
if (r) {
log_err(cd, _("Failed to initialize old segment storage wrapper."));
return r;
}
rh->wflags1 = wrapper_flags | OPEN_READONLY;
log_dbg(cd, "Old cipher storage wrapper type: %d.", crypt_storage_wrapper_get_type(rh->cw1));
vk = crypt_volume_key_by_id(vks, rh->digest_new);
r = crypt_storage_wrapper_init(cd, &rh->cw2, crypt_data_device(cd),
reencrypt_get_data_offset_new(hdr),
crypt_get_iv_offset(cd),
reencrypt_get_sector_size_new(hdr),
reencrypt_segment_cipher_new(hdr),
vk, wrapper_flags);
if (r) {
log_err(cd, _("Failed to initialize new segment storage wrapper."));
return r;
}
rh->wflags2 = wrapper_flags;
log_dbg(cd, "New cipher storage wrapper type: %d", crypt_storage_wrapper_get_type(rh->cw2));
return 0;
}
static int reencrypt_context_set_names(struct luks2_reenc_context *rh, const char *name)
{
if (!rh | !name)
return -EINVAL;
if (*name == '/') {
if (!(rh->device_name = dm_device_name(name)))
return -EINVAL;
} else if (!(rh->device_name = strdup(name)))
return -ENOMEM;
if (asprintf(&rh->hotzone_name, "%s-hotzone-%s", rh->device_name,
rh->direction == CRYPT_REENCRYPT_FORWARD ? "forward" : "backward") < 0) {
rh->hotzone_name = NULL;
return -ENOMEM;
}
if (asprintf(&rh->overlay_name, "%s-overlay", rh->device_name) < 0) {
rh->overlay_name = NULL;
return -ENOMEM;
}
rh->online = true;
return 0;
}
static int modify_offset(uint64_t *offset, uint64_t data_shift, crypt_reencrypt_direction_info di)
{
int r = -EINVAL;
if (!offset)
return r;
if (di == CRYPT_REENCRYPT_FORWARD) {
if (*offset >= data_shift) {
*offset -= data_shift;
r = 0;
}
} else if (di == CRYPT_REENCRYPT_BACKWARD) {
*offset += data_shift;
r = 0;
}
return r;
}
static int reencrypt_update_flag(struct crypt_device *cd, int enable, bool commit)
{
uint32_t reqs;
struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
if (LUKS2_config_get_requirements(cd, hdr, &reqs))
return -EINVAL;
/* nothing to do */
if (enable && (reqs & CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
return -EINVAL;
/* nothing to do */
if (!enable && !(reqs & CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
return -EINVAL;
if (enable)
reqs |= CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
else
reqs &= ~CRYPT_REQUIREMENT_ONLINE_REENCRYPT;
log_dbg(cd, "Going to %s reencryption requirement flag.", enable ? "store" : "wipe");
return LUKS2_config_set_requirements(cd, hdr, reqs, commit);
}
static int reencrypt_recover_segment(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
struct volume_key *vks)
{
struct volume_key *vk_old, *vk_new;
size_t count, s;
ssize_t read, w;
unsigned resilience;
uint64_t area_offset, area_length, area_length_read, crash_iv_offset,
data_offset = crypt_get_data_offset(cd) << SECTOR_SHIFT;
int devfd, r, new_sector_size, old_sector_size, rseg = json_segments_segment_in_reencrypt(rh->jobj_segs_hot);
char *checksum_tmp = NULL, *data_buffer = NULL;
struct crypt_storage_wrapper *cw1 = NULL, *cw2 = NULL;
resilience = rh->rp.type;
if (rseg < 0 || rh->length < 512)
return -EINVAL;
vk_new = crypt_volume_key_by_id(vks, rh->digest_new);
if (!vk_new && rh->mode != CRYPT_REENCRYPT_DECRYPT)
return -EINVAL;
vk_old = crypt_volume_key_by_id(vks, rh->digest_old);
if (!vk_old && rh->mode != CRYPT_REENCRYPT_ENCRYPT)
return -EINVAL;
old_sector_size = json_segment_get_sector_size(reencrypt_segment_old(hdr));
new_sector_size = json_segment_get_sector_size(reencrypt_segment_new(hdr));
if (rh->mode == CRYPT_REENCRYPT_DECRYPT)
crash_iv_offset = rh->offset >> SECTOR_SHIFT; /* TODO: + old iv_tweak */
else
crash_iv_offset = json_segment_get_iv_offset(json_segments_get_segment(rh->jobj_segs_hot, rseg));
log_dbg(cd, "crash_offset: %" PRIu64 ", crash_length: %" PRIu64 ", crash_iv_offset: %" PRIu64, data_offset + rh->offset, rh->length, crash_iv_offset);
r = crypt_storage_wrapper_init(cd, &cw2, crypt_data_device(cd),
data_offset + rh->offset, crash_iv_offset, new_sector_size,
reencrypt_segment_cipher_new(hdr), vk_new, 0);
if (r) {
log_err(cd, _("Failed to initialize new segment storage wrapper."));
return r;
}
if (LUKS2_keyslot_area(hdr, rh->reenc_keyslot, &area_offset, &area_length)) {
r = -EINVAL;
goto out;
}
if (posix_memalign((void**)&data_buffer, device_alignment(crypt_data_device(cd)), rh->length)) {
r = -ENOMEM;
goto out;
}
switch (resilience) {
case REENC_PROTECTION_CHECKSUM:
log_dbg(cd, "Checksums based recovery.");
r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
data_offset + rh->offset, crash_iv_offset, old_sector_size,
reencrypt_segment_cipher_old(hdr), vk_old, 0);
if (r) {
log_err(cd, _("Failed to initialize old segment storage wrapper."));
goto out;
}
count = rh->length / rh->alignment;
area_length_read = count * rh->rp.p.csum.hash_size;
if (area_length_read > area_length) {
log_dbg(cd, "Internal error in calculated area_length.");
r = -EINVAL;
goto out;
}
checksum_tmp = malloc(rh->rp.p.csum.hash_size);
if (!checksum_tmp) {
r = -ENOMEM;
goto out;
}
/* TODO: lock for read */
devfd = device_open(cd, crypt_metadata_device(cd), O_RDONLY);
if (devfd < 0)
goto out;
/* read old data checksums */
read = read_lseek_blockwise(devfd, device_block_size(cd, crypt_metadata_device(cd)),
device_alignment(crypt_metadata_device(cd)), rh->rp.p.csum.checksums, area_length_read, area_offset);
if (read < 0 || (size_t)read != area_length_read) {
log_err(cd, _("Failed to read checksums for current hotzone."));
r = -EINVAL;
goto out;
}
read = crypt_storage_wrapper_read(cw2, 0, data_buffer, rh->length);
if (read < 0 || (size_t)read != rh->length) {
log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset + data_offset);
r = -EINVAL;
goto out;
}
for (s = 0; s < count; s++) {
if (crypt_hash_write(rh->rp.p.csum.ch, data_buffer + (s * rh->alignment), rh->alignment)) {
log_dbg(cd, "Failed to write hash.");
r = EINVAL;
goto out;
}
if (crypt_hash_final(rh->rp.p.csum.ch, checksum_tmp, rh->rp.p.csum.hash_size)) {
log_dbg(cd, "Failed to finalize hash.");
r = EINVAL;
goto out;
}
if (!memcmp(checksum_tmp, (char *)rh->rp.p.csum.checksums + (s * rh->rp.p.csum.hash_size), rh->rp.p.csum.hash_size)) {
log_dbg(cd, "Sector %zu (size %zu, offset %zu) needs recovery", s, rh->alignment, s * rh->alignment);
if (crypt_storage_wrapper_decrypt(cw1, s * rh->alignment, data_buffer + (s * rh->alignment), rh->alignment)) {
log_err(cd, _("Failed to decrypt sector %zu."), s);
r = -EINVAL;
goto out;
}
w = crypt_storage_wrapper_encrypt_write(cw2, s * rh->alignment, data_buffer + (s * rh->alignment), rh->alignment);
if (w < 0 || (size_t)w != rh->alignment) {
log_err(cd, _("Failed to recover sector %zu."), s);
r = -EINVAL;
goto out;
}
}
}
r = 0;
break;
case REENC_PROTECTION_JOURNAL:
log_dbg(cd, "Journal based recovery.");
/* FIXME: validation candidate */
if (rh->length > area_length) {
r = -EINVAL;
log_dbg(cd, "Invalid journal size.");
goto out;
}
/* TODO locking */
r = crypt_storage_wrapper_init(cd, &cw1, crypt_metadata_device(cd),
area_offset, crash_iv_offset, old_sector_size,
reencrypt_segment_cipher_old(hdr), vk_old, 0);
if (r) {
log_err(cd, _("Failed to initialize old segment storage wrapper."));
goto out;
}
read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
if (read < 0 || (size_t)read != rh->length) {
log_dbg(cd, "Failed to read journaled data.");
r = -EIO;
/* may content plaintext */
crypt_memzero(data_buffer, rh->length);
goto out;
}
read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
/* may content plaintext */
crypt_memzero(data_buffer, rh->length);
if (read < 0 || (size_t)read != rh->length) {
log_dbg(cd, "recovery write failed.");
r = -EINVAL;
goto out;
}
r = 0;
break;
case REENC_PROTECTION_DATASHIFT:
log_dbg(cd, "Data shift based recovery.");
if (rseg == 0) {
r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
json_segment_get_offset(rh->jobj_segment_moved, 0), 0, 0,
reencrypt_segment_cipher_old(hdr), NULL, 0);
} else
r = crypt_storage_wrapper_init(cd, &cw1, crypt_data_device(cd),
data_offset + rh->offset - rh->data_shift, 0, 0,
reencrypt_segment_cipher_old(hdr), NULL, 0);
if (r) {
log_err(cd, _("Failed to initialize old segment storage wrapper."));
goto out;
}
read = crypt_storage_wrapper_read_decrypt(cw1, 0, data_buffer, rh->length);
if (read < 0 || (size_t)read != rh->length) {
log_dbg(cd, "Failed to read data.");
r = -EIO;
/* may content plaintext */
crypt_memzero(data_buffer, rh->length);
goto out;
}
read = crypt_storage_wrapper_encrypt_write(cw2, 0, data_buffer, rh->length);
/* may content plaintext */
crypt_memzero(data_buffer, rh->length);
if (read < 0 || (size_t)read != rh->length) {
log_dbg(cd, "recovery write failed.");
r = -EINVAL;
goto out;
}
r = 0;
break;
default:
r = -EINVAL;
}
if (!r)
rh->read = rh->length;
out:
free(data_buffer);
free(checksum_tmp);
crypt_storage_wrapper_destroy(cw1);
crypt_storage_wrapper_destroy(cw2);
return r;
}
static int reencrypt_add_moved_segment(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh)
{
int s = LUKS2_segment_first_unused_id(hdr);
if (!rh->jobj_segment_moved)
return 0;
if (s < 0)
return s;
if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(rh->jobj_segment_moved))) {
json_object_put(rh->jobj_segment_moved);
return -EINVAL;
}
return 0;
}
static int reencrypt_add_backup_segment(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
unsigned final)
{
int digest, s = LUKS2_segment_first_unused_id(hdr);
json_object *jobj;
if (s < 0)
return s;
digest = final ? rh->digest_new : rh->digest_old;
jobj = final ? rh->jobj_segment_new : rh->jobj_segment_old;
if (json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), s, json_object_get(jobj))) {
json_object_put(jobj);
return -EINVAL;
}
if (strcmp(json_segment_type(jobj), "crypt"))
return 0;
return LUKS2_digest_segment_assign(cd, hdr, s, digest, 1, 0);
}
static int reencrypt_assign_segments_simple(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
unsigned hot,
unsigned commit)
{
int r, sg;
if (hot && json_segments_count(rh->jobj_segs_hot) > 0) {
log_dbg(cd, "Setting 'hot' segments.");
r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
if (!r)
rh->jobj_segs_hot = NULL;
} else if (!hot && json_segments_count(rh->jobj_segs_post) > 0) {
log_dbg(cd, "Setting 'post' segments.");
r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
if (!r)
rh->jobj_segs_post = NULL;
} else {
log_dbg(cd, "No segments to set.");
return -EINVAL;
}
if (r) {
log_dbg(cd, "Failed to assign new enc segments.");
return r;
}
r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
if (r) {
log_dbg(cd, "Failed to assign reencryption previous backup segment.");
return r;
}
r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
if (r) {
log_dbg(cd, "Failed to assign reencryption final backup segment.");
return r;
}
r = reencrypt_add_moved_segment(cd, hdr, rh);
if (r) {
log_dbg(cd, "Failed to assign reencryption moved backup segment.");
return r;
}
for (sg = 0; sg < LUKS2_segments_count(hdr); sg++) {
if (LUKS2_segment_is_type(hdr, sg, "crypt") &&
LUKS2_digest_segment_assign(cd, hdr, sg, rh->mode == CRYPT_REENCRYPT_ENCRYPT ? rh->digest_new : rh->digest_old, 1, 0)) {
log_dbg(cd, "Failed to assign digest %u to segment %u.", rh->digest_new, sg);
return -EINVAL;
}
}
return commit ? LUKS2_hdr_write(cd, hdr) : 0;
}
static int reencrypt_assign_segments(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
unsigned hot,
unsigned commit)
{
bool forward;
int rseg, scount, r = -EINVAL;
/* FIXME: validate in reencrypt context load */
if (rh->digest_new < 0 && rh->mode != CRYPT_REENCRYPT_DECRYPT)
return -EINVAL;
if (LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0))
return -EINVAL;
if (rh->mode == CRYPT_REENCRYPT_ENCRYPT || rh->mode == CRYPT_REENCRYPT_DECRYPT)
return reencrypt_assign_segments_simple(cd, hdr, rh, hot, commit);
if (hot && rh->jobj_segs_hot) {
log_dbg(cd, "Setting 'hot' segments.");
r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_hot, 0);
if (!r)
rh->jobj_segs_hot = NULL;
} else if (!hot && rh->jobj_segs_post) {
log_dbg(cd, "Setting 'post' segments.");
r = LUKS2_segments_set(cd, hdr, rh->jobj_segs_post, 0);
if (!r)
rh->jobj_segs_post = NULL;
}
if (r)
return r;
scount = LUKS2_segments_count(hdr);
/* segment in reencryption has to hold reference on both digests */
rseg = json_segments_segment_in_reencrypt(LUKS2_get_segments_jobj(hdr));
if (rseg < 0 && hot)
return -EINVAL;
if (rseg >= 0) {
LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_new, 1, 0);
LUKS2_digest_segment_assign(cd, hdr, rseg, rh->digest_old, 1, 0);
}
forward = (rh->direction == CRYPT_REENCRYPT_FORWARD);
if (hot) {
if (rseg > 0)
LUKS2_digest_segment_assign(cd, hdr, 0, forward ? rh->digest_new : rh->digest_old, 1, 0);
if (scount > rseg + 1)
LUKS2_digest_segment_assign(cd, hdr, rseg + 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
} else {
LUKS2_digest_segment_assign(cd, hdr, 0, forward || scount == 1 ? rh->digest_new : rh->digest_old, 1, 0);
if (scount > 1)
LUKS2_digest_segment_assign(cd, hdr, 1, forward ? rh->digest_old : rh->digest_new, 1, 0);
}
r = reencrypt_add_backup_segment(cd, hdr, rh, 0);
if (r) {
log_dbg(cd, "Failed to assign hot reencryption backup segment.");
return r;
}
r = reencrypt_add_backup_segment(cd, hdr, rh, 1);
if (r) {
log_dbg(cd, "Failed to assign post reencryption backup segment.");
return r;
}
return commit ? LUKS2_hdr_write(cd, hdr) : 0;
}
static int reencrypt_set_encrypt_segments(struct crypt_device *cd, struct luks2_hdr *hdr, uint64_t dev_size, uint64_t data_shift, bool move_first_segment, crypt_reencrypt_direction_info di)
{
int r;
uint64_t first_segment_offset, first_segment_length,
second_segment_offset, second_segment_length,
data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
json_object *jobj_segment_first = NULL, *jobj_segment_second = NULL, *jobj_segments;
if (dev_size < data_shift)
return -EINVAL;
if (data_shift && (di == CRYPT_REENCRYPT_FORWARD))
return -ENOTSUP;
if (move_first_segment) {
/*
* future data_device layout:
* [future LUKS2 header (data shift size)][second data segment][gap (data shift size)][first data segment (data shift size)]
*/
first_segment_offset = dev_size;
first_segment_length = data_shift;
second_segment_offset = data_shift;
second_segment_length = dev_size - 2 * data_shift;
} else if (data_shift) {
first_segment_offset = data_offset;
first_segment_length = dev_size;
} else {
/* future data_device layout with detached header: [first data segment] */
first_segment_offset = data_offset;
first_segment_length = 0; /* dynamic */
}
jobj_segments = json_object_new_object();
if (!jobj_segments)
return -ENOMEM;
r = -EINVAL;
if (move_first_segment) {
jobj_segment_first = json_segment_create_linear(first_segment_offset, &first_segment_length, 0);
if (second_segment_length &&
!(jobj_segment_second = json_segment_create_linear(second_segment_offset, &second_segment_length, 0))) {
log_dbg(cd, "Failed generate 2nd segment.");
goto err;
}
} else
jobj_segment_first = json_segment_create_linear(first_segment_offset, first_segment_length ? &first_segment_length : NULL, 0);
if (!jobj_segment_first) {
log_dbg(cd, "Failed generate 1st segment.");
goto err;
}
json_object_object_add(jobj_segments, "0", jobj_segment_first);
if (jobj_segment_second)
json_object_object_add(jobj_segments, "1", jobj_segment_second);
r = LUKS2_digest_segment_assign(cd, hdr, CRYPT_ANY_SEGMENT, CRYPT_ANY_DIGEST, 0, 0);
if (!r)
r = LUKS2_segments_set(cd, hdr, jobj_segments, 0);
err:
return r;
}
static int reencrypt_make_targets(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct device *hz_device,
struct volume_key *vks,
struct dm_target *result,
uint64_t size)
{
bool reenc_seg;
struct volume_key *vk;
uint64_t segment_size, segment_offset, segment_start = 0;
int r;
int s = 0;
json_object *jobj, *jobj_segments = LUKS2_get_segments_jobj(hdr);
while (result) {
jobj = json_segments_get_segment(jobj_segments, s);
if (!jobj) {
log_dbg(cd, "Internal error. Segment %u is null.", s);
r = -EINVAL;
goto out;
}
reenc_seg = (s == json_segments_segment_in_reencrypt(jobj_segments));
segment_offset = json_segment_get_offset(jobj, 1);
segment_size = json_segment_get_size(jobj, 1);
/* 'dynamic' length allowed in last segment only */
if (!segment_size && !result->next)
segment_size = (size >> SECTOR_SHIFT) - segment_start;
if (!segment_size) {
log_dbg(cd, "Internal error. Wrong segment size %u", s);
r = -EINVAL;
goto out;
}
if (!strcmp(json_segment_type(jobj), "crypt")) {
vk = crypt_volume_key_by_id(vks, reenc_seg ? LUKS2_reencrypt_digest_new(hdr) : LUKS2_digest_by_segment(hdr, s));
if (!vk) {
log_err(cd, _("Missing key for dm-crypt segment %u"), s);
r = -EINVAL;
goto out;
}
if (reenc_seg)
segment_offset -= crypt_get_data_offset(cd);
r = dm_crypt_target_set(result, segment_start, segment_size,
reenc_seg ? hz_device : crypt_data_device(cd),
vk,
json_segment_get_cipher(jobj),
json_segment_get_iv_offset(jobj),
segment_offset,
"none",
0,
json_segment_get_sector_size(jobj));
if (r) {
log_err(cd, _("Failed to set dm-crypt segment."));
goto out;
}
} else if (!strcmp(json_segment_type(jobj), "linear")) {
r = dm_linear_target_set(result, segment_start, segment_size, reenc_seg ? hz_device : crypt_data_device(cd), segment_offset);
if (r) {
log_err(cd, _("Failed to set dm-linear segment."));
goto out;
}
} else {
r = -EINVAL;
goto out;
}
segment_start += segment_size;
s++;
result = result->next;
}
return s;
out:
return r;
}
/* GLOBAL FIXME: audit function names and parameters names */
/* FIXME:
* 1) audit log routines
* 2) can't we derive hotzone device name from crypt context? (unlocked name, device uuid, etc?)
*/
static int reencrypt_load_overlay_device(struct crypt_device *cd, struct luks2_hdr *hdr,
const char *overlay, const char *hotzone, struct volume_key *vks, uint64_t size,
uint32_t flags)
{
char hz_path[PATH_MAX];
int r;
struct device *hz_dev = NULL;
struct crypt_dm_active_device dmd = {
.flags = flags,
};
log_dbg(cd, "Loading new table for overlay device %s.", overlay);
r = snprintf(hz_path, PATH_MAX, "%s/%s", dm_get_dir(), hotzone);
if (r < 0 || r >= PATH_MAX) {
r = -EINVAL;
goto out;
}
r = device_alloc(cd, &hz_dev, hz_path);
if (r)
goto out;
r = dm_targets_allocate(&dmd.segment, LUKS2_segments_count(hdr));
if (r)
goto out;
r = reencrypt_make_targets(cd, hdr, hz_dev, vks, &dmd.segment, size);
if (r < 0)
goto out;
r = dm_reload_device(cd, overlay, &dmd, 0, 0);
/* what else on error here ? */
out:
dm_targets_free(cd, &dmd);
device_free(cd, hz_dev);
return r;
}
static int reencrypt_replace_device(struct crypt_device *cd, const char *target, const char *source, uint32_t flags)
{
int r, exists = 1;
struct crypt_dm_active_device dmd_source, dmd_target = {};
uint32_t dmflags = DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH;
log_dbg(cd, "Replacing table in device %s with table from device %s.", target, source);
/* check only whether target device exists */
r = dm_status_device(cd, target);
if (r < 0) {
if (r == -ENODEV)
exists = 0;
else
return r;
}
r = dm_query_device(cd, source, DM_ACTIVE_DEVICE | DM_ACTIVE_CRYPT_CIPHER |
DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY, &dmd_source);
if (r < 0)
return r;
if (exists && ((r = dm_query_device(cd, target, 0, &dmd_target)) < 0))
goto err;
dmd_source.flags |= flags;
dmd_source.uuid = crypt_get_uuid(cd);
if (exists) {
if (dmd_target.size != dmd_source.size) {
log_err(cd, _("Source and target device sizes don't match. Source %" PRIu64 ", target: %" PRIu64 "."),
dmd_source.size, dmd_target.size);
r = -EINVAL;
goto err;
}
r = dm_reload_device(cd, target, &dmd_source, 0, 0);
if (!r) {
log_dbg(cd, "Resuming device %s", target);
r = dm_resume_device(cd, target, dmflags | act2dmflags(dmd_source.flags));
}
} else
r = dm_create_device(cd, target, CRYPT_SUBDEV, &dmd_source);
err:
dm_targets_free(cd, &dmd_source);
dm_targets_free(cd, &dmd_target);
return r;
}
static int reencrypt_swap_backing_device(struct crypt_device *cd, const char *name,
const char *new_backend_name)
{
int r;
struct device *overlay_dev = NULL;
char overlay_path[PATH_MAX] = { 0 };
struct crypt_dm_active_device dmd = {};
log_dbg(cd, "Redirecting %s mapping to new backing device: %s.", name, new_backend_name);
r = snprintf(overlay_path, PATH_MAX, "%s/%s", dm_get_dir(), new_backend_name);
if (r < 0 || r >= PATH_MAX) {
r = -EINVAL;
goto out;
}
r = device_alloc(cd, &overlay_dev, overlay_path);
if (r)
goto out;
r = device_block_adjust(cd, overlay_dev, DEV_OK,
0, &dmd.size, &dmd.flags);
if (r)
goto out;
r = dm_linear_target_set(&dmd.segment, 0, dmd.size, overlay_dev, 0);
if (r)
goto out;
r = dm_reload_device(cd, name, &dmd, 0, 0);
if (!r) {
log_dbg(cd, "Resuming device %s", name);
r = dm_resume_device(cd, name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
}
out:
dm_targets_free(cd, &dmd);
device_free(cd, overlay_dev);
return r;
}
static int reencrypt_activate_hotzone_device(struct crypt_device *cd, const char *name, uint64_t device_size, uint32_t flags)
{
int r;
uint64_t new_offset = reencrypt_get_data_offset_new(crypt_get_hdr(cd, CRYPT_LUKS2)) >> SECTOR_SHIFT;
struct crypt_dm_active_device dmd = {
.flags = flags,
.uuid = crypt_get_uuid(cd),
.size = device_size >> SECTOR_SHIFT
};
log_dbg(cd, "Activating hotzone device %s.", name);
r = device_block_adjust(cd, crypt_data_device(cd), DEV_OK,
new_offset, &dmd.size, &dmd.flags);
if (r)
goto err;
r = dm_linear_target_set(&dmd.segment, 0, dmd.size, crypt_data_device(cd), new_offset);
if (r)
goto err;
r = dm_create_device(cd, name, CRYPT_SUBDEV, &dmd);
err:
dm_targets_free(cd, &dmd);
return r;
}
static int reencrypt_init_device_stack(struct crypt_device *cd,
const struct luks2_reenc_context *rh)
{
int r;
/* Activate hotzone device 1:1 linear mapping to data_device */
r = reencrypt_activate_hotzone_device(cd, rh->hotzone_name, rh->device_size, CRYPT_ACTIVATE_PRIVATE);
if (r) {
log_err(cd, _("Failed to activate hotzone device %s."), rh->hotzone_name);
return r;
}
/*
* Activate overlay device with exactly same table as original 'name' mapping.
* Note that within this step the 'name' device may already include a table
* constructed from more than single dm-crypt segment. Therefore transfer
* mapping as is.
*
* If we're about to resume reencryption orig mapping has to be already validated for
* abrupt shutdown and rchunk_offset has to point on next chunk to reencrypt!
*
* TODO: in crypt_activate_by*
*/
r = reencrypt_replace_device(cd, rh->overlay_name, rh->device_name, CRYPT_ACTIVATE_PRIVATE);
if (r) {
log_err(cd, _("Failed to activate overlay device %s with actual origin table."), rh->overlay_name);
goto err;
}
/* swap origin mapping to overlay device */
r = reencrypt_swap_backing_device(cd, rh->device_name, rh->overlay_name);
if (r) {
log_err(cd, _("Failed to load new mapping for device %s."), rh->device_name);
goto err;
}
/*
* Now the 'name' (unlocked luks) device is mapped via dm-linear to an overlay dev.
* The overlay device has a original live table of 'name' device in-before the swap.
*/
return 0;
err:
/* TODO: force error helper devices on error path */
dm_remove_device(cd, rh->overlay_name, 0);
dm_remove_device(cd, rh->hotzone_name, 0);
return r;
}
/* TODO:
* 1) audit error path. any error in this routine is fatal and should be unlikely.
* usually it would hint some collision with another userspace process touching
* dm devices directly.
*/
static int reenc_refresh_helper_devices(struct crypt_device *cd, const char *overlay, const char *hotzone)
{
int r;
/*
* we have to explicitly suspend the overlay device before suspending
* the hotzone one. Resuming overlay device (aka switching tables) only
* after suspending the hotzone may lead to deadlock.
*
* In other words: always suspend the stack from top to bottom!
*/
r = dm_suspend_device(cd, overlay, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
if (r) {
log_err(cd, _("Failed to suspend device %s."), overlay);
return r;
}
/* suspend HZ device */
r = dm_suspend_device(cd, hotzone, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
if (r) {
log_err(cd, _("Failed to suspend device %s."), hotzone);
return r;
}
/* resume overlay device: inactive table (with hotozne) -> live */
r = dm_resume_device(cd, overlay, DM_RESUME_PRIVATE);
if (r)
log_err(cd, _("Failed to resume device %s."), overlay);
return r;
}
static int reencrypt_refresh_overlay_devices(struct crypt_device *cd,
struct luks2_hdr *hdr,
const char *overlay,
const char *hotzone,
struct volume_key *vks,
uint64_t device_size,
uint32_t flags)
{
int r = reencrypt_load_overlay_device(cd, hdr, overlay, hotzone, vks, device_size, flags);
if (r) {
log_err(cd, _("Failed to reload device %s."), overlay);
return REENC_ERR;
}
r = reenc_refresh_helper_devices(cd, overlay, hotzone);
if (r) {
log_err(cd, _("Failed to refresh reencryption devices stack."));
return REENC_ROLLBACK;
}
return REENC_OK;
}
static int reencrypt_move_data(struct crypt_device *cd, int devfd, uint64_t data_shift)
{
void *buffer;
int r;
ssize_t ret;
uint64_t buffer_len, offset;
struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
log_dbg(cd, "Going to move data from head of data device.");
buffer_len = data_shift;
if (!buffer_len)
return -EINVAL;
offset = json_segment_get_offset(LUKS2_get_segment_jobj(hdr, 0), 0);
/* this is nonsense anyway */
if (buffer_len != json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0)) {
log_dbg(cd, "buffer_len %" PRIu64", segment size %" PRIu64, buffer_len, json_segment_get_size(LUKS2_get_segment_jobj(hdr, 0), 0));
return -EINVAL;
}
if (posix_memalign(&buffer, device_alignment(crypt_data_device(cd)), buffer_len))
return -ENOMEM;
ret = read_lseek_blockwise(devfd,
device_block_size(cd, crypt_data_device(cd)),
device_alignment(crypt_data_device(cd)),
buffer, buffer_len, 0);
if (ret < 0 || (uint64_t)ret != buffer_len) {
r = -EIO;
goto err;
}
log_dbg(cd, "Going to write %" PRIu64 " bytes at offset %" PRIu64, buffer_len, offset);
ret = write_lseek_blockwise(devfd,
device_block_size(cd, crypt_data_device(cd)),
device_alignment(crypt_data_device(cd)),
buffer, buffer_len, offset);
if (ret < 0 || (uint64_t)ret != buffer_len) {
r = -EIO;
goto err;
}
r = 0;
err:
memset(buffer, 0, buffer_len);
free(buffer);
return r;
}
static int reencrypt_make_backup_segments(struct crypt_device *cd,
struct luks2_hdr *hdr,
int keyslot_new,
const char *cipher,
uint64_t data_offset,
const struct crypt_params_reencrypt *params)
{
int r, segment, moved_segment = -1, digest_old = -1, digest_new = -1;
json_object *jobj_segment_new = NULL, *jobj_segment_old = NULL, *jobj_segment_bcp = NULL;
uint32_t sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
uint64_t segment_offset, tmp, data_shift = params->data_shift << SECTOR_SHIFT;
if (params->mode != CRYPT_REENCRYPT_DECRYPT) {
digest_new = LUKS2_digest_by_keyslot(hdr, keyslot_new);
if (digest_new < 0)
return -EINVAL;
}
if (params->mode != CRYPT_REENCRYPT_ENCRYPT) {
digest_old = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT);
if (digest_old < 0)
return -EINVAL;
}
segment = LUKS2_segment_first_unused_id(hdr);
if (segment < 0)
return -EINVAL;
if (params->mode == CRYPT_REENCRYPT_ENCRYPT &&
(params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT)) {
json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_segment_bcp);
r = LUKS2_segment_set_flag(jobj_segment_bcp, "backup-moved-segment");
if (r)
goto err;
moved_segment = segment++;
json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), moved_segment, jobj_segment_bcp);
}
/* FIXME: Add detection for case (digest old == digest new && old segment == new segment) */
if (digest_old >= 0)
json_object_copy(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT), &jobj_segment_old);
else if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
r = LUKS2_get_data_size(hdr, &tmp, NULL);
if (r)
goto err;
jobj_segment_old = json_segment_create_linear(0, tmp ? &tmp : NULL, 0);
}
if (!jobj_segment_old) {
r = -EINVAL;
goto err;
}
r = LUKS2_segment_set_flag(jobj_segment_old, "backup-previous");
if (r)
goto err;
json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_old);
jobj_segment_old = NULL;
if (digest_old >= 0)
LUKS2_digest_segment_assign(cd, hdr, segment, digest_old, 1, 0);
segment++;
if (digest_new >= 0) {
segment_offset = data_offset;
if (params->mode != CRYPT_REENCRYPT_ENCRYPT &&
modify_offset(&segment_offset, data_shift, params->direction)) {
r = -EINVAL;
goto err;
}
jobj_segment_new = json_segment_create_crypt(segment_offset,
crypt_get_iv_offset(cd),
NULL, cipher, sector_size, 0);
} else if (params->mode == CRYPT_REENCRYPT_DECRYPT) {
segment_offset = data_offset;
if (modify_offset(&segment_offset, data_shift, params->direction)) {
r = -EINVAL;
goto err;
}
jobj_segment_new = json_segment_create_linear(segment_offset, NULL, 0);
}
if (!jobj_segment_new) {
r = -EINVAL;
goto err;
}
r = LUKS2_segment_set_flag(jobj_segment_new, "backup-final");
if (r)
goto err;
json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_new);
jobj_segment_new = NULL;
if (digest_new >= 0)
LUKS2_digest_segment_assign(cd, hdr, segment, digest_new, 1, 0);
/* FIXME: also check occupied space by keyslot in shrunk area */
if (params->direction == CRYPT_REENCRYPT_FORWARD && data_shift &&
crypt_metadata_device(cd) == crypt_data_device(cd) &&
LUKS2_set_keyslots_size(cd, hdr, json_segment_get_offset(reencrypt_segment_new(hdr), 0))) {
log_err(cd, _("Failed to set new keyslots area size."));
r = -EINVAL;
goto err;
}
return 0;
err:
json_object_put(jobj_segment_new);
json_object_put(jobj_segment_old);
return r;
}
static int reencrypt_verify_and_upload_keys(struct crypt_device *cd, struct luks2_hdr *hdr, int digest_old, int digest_new, struct volume_key *vks)
{
int r;
struct volume_key *vk;
if (digest_new >= 0) {
vk = crypt_volume_key_by_id(vks, digest_new);
if (!vk)
return -ENOENT;
else {
if (LUKS2_digest_verify_by_digest(cd, hdr, digest_new, vk) != digest_new)
return -EINVAL;
if (crypt_use_keyring_for_vk(cd) &&
(r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk))))
return r;
}
}
if (digest_old >= 0 && digest_old != digest_new) {
vk = crypt_volume_key_by_id(vks, digest_old);
if (!vk) {
r = -ENOENT;
goto err;
} else {
if (LUKS2_digest_verify_by_digest(cd, hdr, digest_old, vk) != digest_old) {
r = -EINVAL;
goto err;
}
if (crypt_use_keyring_for_vk(cd) &&
(r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk))))
goto err;
}
}
return 0;
err:
crypt_drop_keyring_key(cd, vks);
return r;
}
/* This function must be called with metadata lock held */
static int reencrypt_init(struct crypt_device *cd,
const char *name,
struct luks2_hdr *hdr,
const char *passphrase,
size_t passphrase_size,
int keyslot_old,
int keyslot_new,
const char *cipher,
const char *cipher_mode,
const struct crypt_params_reencrypt *params,
struct volume_key **vks)
{
bool move_first_segment;
char _cipher[128];
uint32_t sector_size;
int r, reencrypt_keyslot, devfd = -1;
uint64_t data_offset, dev_size = 0;
struct crypt_dm_active_device dmd_target, dmd_source = {
.uuid = crypt_get_uuid(cd),
.flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
};
if (!params || params->mode > CRYPT_REENCRYPT_DECRYPT)
return -EINVAL;
if (params->mode != CRYPT_REENCRYPT_DECRYPT &&
(!params->luks2 || !(cipher && cipher_mode) || keyslot_new < 0))
return -EINVAL;
log_dbg(cd, "Initializing reencryption (mode: %s) in LUKS2 metadata.",
crypt_reencrypt_mode_to_str(params->mode));
move_first_segment = (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT);
/* implicit sector size 512 for decryption */
sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE;
if (sector_size < SECTOR_SIZE || sector_size > MAX_SECTOR_SIZE ||
NOTPOW2(sector_size)) {
log_err(cd, _("Unsupported encryption sector size."));
return -EINVAL;
}
if (!cipher_mode || *cipher_mode == '\0')
snprintf(_cipher, sizeof(_cipher), "%s", cipher);
else
snprintf(_cipher, sizeof(_cipher), "%s-%s", cipher, cipher_mode);
if (MISALIGNED(params->data_shift, sector_size >> SECTOR_SHIFT)) {
log_err(cd, _("Data shift is not aligned to requested encryption sector size (%" PRIu32 " bytes)."), sector_size);
return -EINVAL;
}
data_offset = LUKS2_get_data_offset(hdr) << SECTOR_SHIFT;
r = device_check_access(cd, crypt_data_device(cd), DEV_OK);
if (r)
return r;
r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
if (r)
return r;
r = device_size(crypt_data_device(cd), &dev_size);
if (r)
return r;
dev_size -= data_offset;
if (MISALIGNED(dev_size, sector_size)) {
log_err(cd, _("Data device is not aligned to requested encryption sector size (%" PRIu32 " bytes)."), sector_size);
return -EINVAL;
}
reencrypt_keyslot = LUKS2_keyslot_find_empty(hdr);
if (reencrypt_keyslot < 0) {
log_err(cd, _("All key slots full."));
return -EINVAL;
}
/*
* We must perform data move with exclusive open data device
* to exclude another cryptsetup process to colide with
* encryption initialization (or mount)
*/
if (move_first_segment) {
if (dev_size < 2 * (params->data_shift << SECTOR_SHIFT)) {
log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
return -EINVAL;
}
if (params->data_shift < LUKS2_get_data_offset(hdr)) {
log_err(cd, _("Data shift (%" PRIu64 " sectors) is less than future data offset (%" PRIu64 " sectors)."), params->data_shift, LUKS2_get_data_offset(hdr));
return -EINVAL;
}
devfd = device_open_excl(cd, crypt_data_device(cd), O_RDWR);
if (devfd < 0) {
if (devfd == -EBUSY)
log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
return -EINVAL;
}
}
if (params->mode == CRYPT_REENCRYPT_ENCRYPT) {
/* in-memory only */
r = reencrypt_set_encrypt_segments(cd, hdr, dev_size, params->data_shift << SECTOR_SHIFT, move_first_segment, params->direction);
if (r)
goto err;
}
r = LUKS2_keyslot_reencrypt_create(cd, hdr, reencrypt_keyslot,
params);
if (r < 0)
goto err;
r = reencrypt_make_backup_segments(cd, hdr, keyslot_new, _cipher, data_offset, params);
if (r) {
log_dbg(cd, "Failed to create reencryption backup device segments.");
goto err;
}
r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
if (r < 0)
goto err;
if (name && params->mode != CRYPT_REENCRYPT_ENCRYPT) {
r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
if (r)
goto err;
r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
if (r < 0)
goto err;
r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
if (!r) {
r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
if (r)
log_err(cd, _("Mismatching parameters on device %s."), name);
}
dm_targets_free(cd, &dmd_source);
dm_targets_free(cd, &dmd_target);
free(CONST_CAST(void*)dmd_target.uuid);
if (r)
goto err;
}
if (move_first_segment && reencrypt_move_data(cd, devfd, params->data_shift << SECTOR_SHIFT)) {
r = -EIO;
goto err;
}
/* This must be first and only write in LUKS2 metadata during _reencrypt_init */
r = reencrypt_update_flag(cd, 1, true);
if (r) {
log_dbg(cd, "Failed to set online-reencryption requirement.");
r = -EINVAL;
} else
r = reencrypt_keyslot;
err:
device_release_excl(cd, crypt_data_device(cd));
if (r < 0)
crypt_load(cd, CRYPT_LUKS2, NULL);
return r;
}
static int reencrypt_hotzone_protect_final(struct crypt_device *cd,
struct luks2_hdr *hdr, struct luks2_reenc_context *rh,
const void *buffer, size_t buffer_len)
{
const void *pbuffer;
size_t data_offset, len;
int r;
if (rh->rp.type == REENC_PROTECTION_NONE)
return 0;
if (rh->rp.type == REENC_PROTECTION_CHECKSUM) {
log_dbg(cd, "Checksums hotzone resilience.");
for (data_offset = 0, len = 0; data_offset < buffer_len; data_offset += rh->alignment, len += rh->rp.p.csum.hash_size) {
if (crypt_hash_write(rh->rp.p.csum.ch, (const char *)buffer + data_offset, rh->alignment)) {
log_dbg(cd, "Failed to hash sector at offset %zu.", data_offset);
return -EINVAL;
}
if (crypt_hash_final(rh->rp.p.csum.ch, (char *)rh->rp.p.csum.checksums + len, rh->rp.p.csum.hash_size)) {
log_dbg(cd, "Failed to finalize hash.");
return -EINVAL;
}
}
pbuffer = rh->rp.p.csum.checksums;
} else if (rh->rp.type == REENC_PROTECTION_JOURNAL) {
log_dbg(cd, "Journal hotzone resilience.");
len = buffer_len;
pbuffer = buffer;
} else if (rh->rp.type == REENC_PROTECTION_DATASHIFT) {
log_dbg(cd, "Data shift hotzone resilience.");
return LUKS2_hdr_write(cd, hdr);
} else
return -EINVAL;
log_dbg(cd, "Going to store %zu bytes in reencrypt keyslot.", len);
r = LUKS2_keyslot_reencrypt_store(cd, hdr, rh->reenc_keyslot, pbuffer, len);
return r > 0 ? 0 : r;
}
static int reencrypt_context_update(struct crypt_device *cd,
struct luks2_reenc_context *rh)
{
if (rh->read < 0)
return -EINVAL;
if (rh->direction == CRYPT_REENCRYPT_BACKWARD) {
if (rh->data_shift && rh->mode == CRYPT_REENCRYPT_ENCRYPT) {
if (rh->offset)
rh->offset -= rh->data_shift;
if (rh->offset && (rh->offset < rh->data_shift)) {
rh->length = rh->offset;
rh->offset = rh->data_shift;
}
if (!rh->offset)
rh->length = rh->data_shift;
} else {
if (rh->offset < rh->length)
rh->length = rh->offset;
rh->offset -= rh->length;
}
} else if (rh->direction == CRYPT_REENCRYPT_FORWARD) {
rh->offset += (uint64_t)rh->read;
/* it fails in-case of device_size < rh->offset later */
if (rh->device_size - rh->offset < rh->length)
rh->length = rh->device_size - rh->offset;
} else
return -EINVAL;
if (rh->device_size < rh->offset) {
log_dbg(cd, "Calculated reencryption offset %" PRIu64 " is beyond device size %" PRIu64 ".", rh->offset, rh->device_size);
return -EINVAL;
}
rh->progress += (uint64_t)rh->read;
return 0;
}
static int reencrypt_load(struct crypt_device *cd, struct luks2_hdr *hdr,
uint64_t device_size,
const struct crypt_params_reencrypt *params,
struct luks2_reenc_context **rh)
{
int r;
struct luks2_reenc_context *tmp = NULL;
crypt_reencrypt_info ri = LUKS2_reenc_status(hdr);
if (ri == CRYPT_REENCRYPT_CLEAN)
r = reencrypt_load_clean(cd, hdr, device_size, &tmp, params);
else if (ri == CRYPT_REENCRYPT_CRASH)
r = reencrypt_load_crashed(cd, hdr, device_size, &tmp);
else if (ri == CRYPT_REENCRYPT_NONE) {
log_err(cd, _("No LUKS2 reencryption in progress."));
return -EINVAL;
} else
r = -EINVAL;
if (r < 0 || !tmp) {
log_err(cd, _("Failed to load LUKS2 reencryption context."));
return r;
}
*rh = tmp;
return 0;
}
/* internal only */
int crypt_reencrypt_lock(struct crypt_device *cd, const char *uuid, struct crypt_lock_handle **reencrypt_lock)
{
int r;
char *lock_resource;
const char *tmp = crypt_get_uuid(cd);
if (!tmp && !uuid)
return -EINVAL;
if (!uuid)
uuid = tmp;
if (!tmp)
tmp = uuid;
if (strcmp(uuid, tmp))
return -EINVAL;
if (!crypt_metadata_locking_enabled()) {
*reencrypt_lock = NULL;
return 0;
}
r = asprintf(&lock_resource, "LUKS2-reencryption-%s", uuid);
if (r < 0)
return -ENOMEM;
if (r < 20) {
r = -EINVAL;
goto out;
}
r = crypt_write_lock(cd, lock_resource, false, reencrypt_lock);
out:
free(lock_resource);
return r;
}
/* internal only */
void crypt_reencrypt_unlock(struct crypt_device *cd, struct crypt_lock_handle *reencrypt_lock)
{
crypt_unlock_internal(cd, reencrypt_lock);
}
static int reencrypt_lock_and_verify(struct crypt_device *cd, struct luks2_hdr *hdr,
struct crypt_lock_handle **reencrypt_lock)
{
int r;
crypt_reencrypt_info ri;
struct crypt_lock_handle *h;
ri = LUKS2_reenc_status(hdr);
if (ri == CRYPT_REENCRYPT_INVALID) {
log_err(cd, _("Failed to get reencryption state."));
return -EINVAL;
}
if (ri < CRYPT_REENCRYPT_CLEAN) {
log_err(cd, _("Device is not in reencryption."));
return -EINVAL;
}
r = crypt_reencrypt_lock(cd, NULL, &h);
if (r < 0) {
if (r == -EBUSY)
log_err(cd, _("Reencryption process is already running."));
else
log_err(cd, _("Failed to acquire reencryption lock."));
return r;
}
/* With reencryption lock held, reload device context and verify metadata state */
r = crypt_load(cd, CRYPT_LUKS2, NULL);
if (r) {
crypt_reencrypt_unlock(cd, h);
return r;
}
ri = LUKS2_reenc_status(hdr);
if (ri == CRYPT_REENCRYPT_CLEAN) {
*reencrypt_lock = h;
return 0;
}
crypt_reencrypt_unlock(cd, h);
log_err(cd, _("Cannot proceed with reencryption. Run reencryption recovery first."));
return -EINVAL;
}
static int reencrypt_load_by_passphrase(struct crypt_device *cd,
const char *name,
const char *passphrase,
size_t passphrase_size,
int keyslot_old,
int keyslot_new,
struct volume_key **vks,
const struct crypt_params_reencrypt *params)
{
int r, old_ss, new_ss;
struct luks2_hdr *hdr;
struct crypt_lock_handle *reencrypt_lock;
struct luks2_reenc_context *rh;
struct crypt_dm_active_device dmd_target, dmd_source = {
.uuid = crypt_get_uuid(cd),
.flags = CRYPT_ACTIVATE_SHARED /* turn off exclusive open checks */
};
uint64_t minimal_size, device_size, mapping_size = 0, required_size = 0;
bool dynamic;
struct crypt_params_reencrypt rparams = {};
uint32_t flags = 0;
if (params) {
rparams = *params;
required_size = params->device_size;
}
log_dbg(cd, "Loading LUKS2 reencryption context.");
rh = crypt_get_reenc_context(cd);
if (rh) {
LUKS2_reenc_context_free(cd, rh);
crypt_set_reenc_context(cd, NULL);
rh = NULL;
}
hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
r = reencrypt_lock_and_verify(cd, hdr, &reencrypt_lock);
if (r)
return r;
/* From now on we hold reencryption lock */
if (LUKS2_get_data_size(hdr, &minimal_size, &dynamic))
return -EINVAL;
/* some configurations provides fixed device size */
r = luks2_check_device_size(cd, hdr, minimal_size, &device_size, false, dynamic);
if (r) {
r = -EINVAL;
goto err;
}
minimal_size >>= SECTOR_SHIFT;
old_ss = reencrypt_get_sector_size_old(hdr);
new_ss = reencrypt_get_sector_size_new(hdr);
r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
if (r == -ENOENT) {
log_dbg(cd, "Keys are not ready. Unlocking all volume keys.");
r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new, passphrase, passphrase_size, vks);
if (r < 0)
goto err;
r = reencrypt_verify_and_upload_keys(cd, hdr, LUKS2_reencrypt_digest_old(hdr), LUKS2_reencrypt_digest_new(hdr), *vks);
}
if (r < 0)
goto err;
if (name) {
r = dm_query_device(cd, name, DM_ACTIVE_UUID | DM_ACTIVE_DEVICE |
DM_ACTIVE_CRYPT_KEYSIZE | DM_ACTIVE_CRYPT_KEY |
DM_ACTIVE_CRYPT_CIPHER, &dmd_target);
if (r < 0)
goto err;
flags = dmd_target.flags;
r = LUKS2_assembly_multisegment_dmd(cd, hdr, *vks, LUKS2_get_segments_jobj(hdr), &dmd_source);
if (!r) {
r = crypt_compare_dm_devices(cd, &dmd_source, &dmd_target);
if (r)
log_err(cd, _("Mismatching parameters on device %s."), name);
}
dm_targets_free(cd, &dmd_source);
dm_targets_free(cd, &dmd_target);
free(CONST_CAST(void*)dmd_target.uuid);
if (r)
goto err;
mapping_size = dmd_target.size;
}
r = -EINVAL;
if (required_size && mapping_size && (required_size != mapping_size)) {
log_err(cd, _("Active device size and requested reencryption size don't match."));
goto err;
}
if (mapping_size)
required_size = mapping_size;
if (required_size) {
/* TODO: Add support for changing fixed minimal size in reencryption mda where possible */
if ((minimal_size && (required_size < minimal_size)) ||
(required_size > (device_size >> SECTOR_SHIFT)) ||
(!dynamic && (required_size != minimal_size)) ||
(old_ss > 0 && MISALIGNED(required_size, old_ss >> SECTOR_SHIFT)) ||
(new_ss > 0 && MISALIGNED(required_size, new_ss >> SECTOR_SHIFT))) {
log_err(cd, _("Illegal device size requested in reencryption parameters."));
goto err;
}
rparams.device_size = required_size;
}
r = reencrypt_load(cd, hdr, device_size, &rparams, &rh);
if (r < 0 || !rh)
goto err;
if (name && (r = reencrypt_context_set_names(rh, name)))
goto err;
/* Reassure device is not mounted and there's no dm mapping active */
if (!name && (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0)) {
log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
r = -EBUSY;
goto err;
}
device_release_excl(cd, crypt_data_device(cd));
/* FIXME: There's a race for dm device activation not managed by cryptsetup.
*
* 1) excl close
* 2) rogue dm device activation
* 3) one or more dm-crypt based wrapper activation
* 4) next excl open get's skipped due to 3) device from 2) remains undetected.
*/
r = reencrypt_init_storage_wrappers(cd, hdr, rh, *vks);
if (r)
goto err;
/* If one of wrappers is based on dmcrypt fallback it already blocked mount */
if (!name && crypt_storage_wrapper_get_type(rh->cw1) != DMCRYPT &&
crypt_storage_wrapper_get_type(rh->cw2) != DMCRYPT) {
if (device_open_excl(cd, crypt_data_device(cd), O_RDONLY) < 0) {
log_err(cd,_("Failed to open %s in exclusive mode (already mapped or mounted)."), device_path(crypt_data_device(cd)));
r = -EBUSY;
goto err;
}
}
rh->flags = flags;
MOVE_REF(rh->vks, *vks);
MOVE_REF(rh->reenc_lock, reencrypt_lock);
crypt_set_reenc_context(cd, rh);
return 0;
err:
crypt_reencrypt_unlock(cd, reencrypt_lock);
LUKS2_reenc_context_free(cd, rh);
return r;
}
static int reencrypt_recovery_by_passphrase(struct crypt_device *cd,
struct luks2_hdr *hdr,
int keyslot_old,
int keyslot_new,
const char *passphrase,
size_t passphrase_size)
{
int r;
crypt_reencrypt_info ri;
struct crypt_lock_handle *reencrypt_lock;
r = crypt_reencrypt_lock(cd, NULL, &reencrypt_lock);
if (r) {
if (r == -EBUSY)
log_err(cd, _("Reencryption in-progress. Cannot perform recovery."));
else
log_err(cd, _("Failed to get reencryption lock."));
return r;
}
if ((r = crypt_load(cd, CRYPT_LUKS2, NULL))) {
crypt_reencrypt_unlock(cd, reencrypt_lock);
return r;
}
ri = LUKS2_reenc_status(hdr);
if (ri == CRYPT_REENCRYPT_INVALID) {
crypt_reencrypt_unlock(cd, reencrypt_lock);
return -EINVAL;
}
if (ri == CRYPT_REENCRYPT_CRASH) {
r = LUKS2_reencrypt_locked_recovery_by_passphrase(cd, keyslot_old, keyslot_new,
passphrase, passphrase_size, 0, NULL);
if (r < 0)
log_err(cd, _("LUKS2 reencryption recovery failed."));
} else {
log_dbg(cd, "No LUKS2 reencryption recovery needed.");
r = 0;
}
crypt_reencrypt_unlock(cd, reencrypt_lock);
return r;
}
static int reencrypt_init_by_passphrase(struct crypt_device *cd,
const char *name,
const char *passphrase,
size_t passphrase_size,
int keyslot_old,
int keyslot_new,
const char *cipher,
const char *cipher_mode,
const struct crypt_params_reencrypt *params)
{
int r;
crypt_reencrypt_info ri;
struct volume_key *vks = NULL;
uint32_t flags = params ? params->flags : 0;
struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
/* short-circuit in recovery and finish immediately. */
if (flags & CRYPT_REENCRYPT_RECOVERY)
return reencrypt_recovery_by_passphrase(cd, hdr, keyslot_old, keyslot_new, passphrase, passphrase_size);
if (cipher) {
r = crypt_keyslot_get_key_size(cd, keyslot_new);
if (r < 0)
return r;
r = LUKS2_check_cipher(cd, r, cipher, cipher_mode);
if (r < 0)
return r;
}
r = LUKS2_device_write_lock(cd, hdr, crypt_metadata_device(cd));
if (r)
return r;
ri = LUKS2_reenc_status(hdr);
if (ri == CRYPT_REENCRYPT_INVALID) {
device_write_unlock(cd, crypt_metadata_device(cd));
return -EINVAL;
}
if ((ri > CRYPT_REENCRYPT_NONE) && (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY)) {
device_write_unlock(cd, crypt_metadata_device(cd));
log_err(cd, _("LUKS2 reencryption already initialized in metadata."));
return -EBUSY;
}
if (ri == CRYPT_REENCRYPT_NONE && !(flags & CRYPT_REENCRYPT_RESUME_ONLY)) {
r = reencrypt_init(cd, name, hdr, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params, &vks);
if (r < 0)
log_err(cd, _("Failed to initialize LUKS2 reencryption in metadata."));
} else if (ri > CRYPT_REENCRYPT_NONE) {
log_dbg(cd, "LUKS2 reencryption already initialized.");
r = 0;
}
device_write_unlock(cd, crypt_metadata_device(cd));
if (r < 0 || (flags & CRYPT_REENCRYPT_INITIALIZE_ONLY))
goto out;
r = reencrypt_load_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, &vks, params);
out:
if (r < 0)
crypt_drop_keyring_key(cd, vks);
crypt_free_volume_key(vks);
return r < 0 ? r : LUKS2_find_keyslot(hdr, "reencrypt");
}
int crypt_reencrypt_init_by_keyring(struct crypt_device *cd,
const char *name,
const char *passphrase_description,
int keyslot_old,
int keyslot_new,
const char *cipher,
const char *cipher_mode,
const struct crypt_params_reencrypt *params)
{
int r;
char *passphrase;
size_t passphrase_size;
if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase_description)
return -EINVAL;
if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
return -EINVAL;
r = keyring_get_passphrase(passphrase_description, &passphrase, &passphrase_size);
if (r < 0) {
log_err(cd, _("Failed to read passphrase from keyring (error %d)."), r);
return -EINVAL;
}
r = reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
crypt_memzero(passphrase, passphrase_size);
free(passphrase);
return r;
}
int crypt_reencrypt_init_by_passphrase(struct crypt_device *cd,
const char *name,
const char *passphrase,
size_t passphrase_size,
int keyslot_old,
int keyslot_new,
const char *cipher,
const char *cipher_mode,
const struct crypt_params_reencrypt *params)
{
if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT) || !passphrase)
return -EINVAL;
if (params && (params->flags & CRYPT_REENCRYPT_INITIALIZE_ONLY) && (params->flags & CRYPT_REENCRYPT_RESUME_ONLY))
return -EINVAL;
return reencrypt_init_by_passphrase(cd, name, passphrase, passphrase_size, keyslot_old, keyslot_new, cipher, cipher_mode, params);
}
static reenc_status_t reencrypt_step(struct crypt_device *cd,
struct luks2_hdr *hdr,
struct luks2_reenc_context *rh,
uint64_t device_size,
bool online)
{
int r;
/* update reencrypt keyslot protection parameters in memory only */
r = reenc_keyslot_update(cd, rh);
if (r < 0) {
log_dbg(cd, "Keyslot update failed.");
return REENC_ERR;
}
/* in memory only */
r = reencrypt_make_segments(cd, hdr, rh, device_size);
if (r)
return REENC_ERR;
r = reencrypt_assign_segments(cd, hdr, rh, 1, 0);
if (r) {
log_err(cd, _("Failed to set device segments for next reencryption hotzone."));
return REENC_ERR;
}
if (online) {
r = reencrypt_refresh_overlay_devices(cd, hdr, rh->overlay_name, rh->hotzone_name, rh->vks, rh->device_size, rh->flags);
/* Teardown overlay devices with dm-error. None bio shall pass! */
if (r != REENC_OK)
return r;
}
log_dbg(cd, "Reencrypting chunk starting at offset: %" PRIu64 ", size :%" PRIu64 ".", rh->offset, rh->length);
log_dbg(cd, "data_offset: %" PRIu64, crypt_get_data_offset(cd) << SECTOR_SHIFT);
if (!rh->offset && rh->mode == CRYPT_REENCRYPT_ENCRYPT && rh->data_shift &&
rh->jobj_segment_moved) {
crypt_storage_wrapper_destroy(rh->cw1);
log_dbg(cd, "Reinitializing old segment storage wrapper for moved segment.");
r = crypt_storage_wrapper_init(cd, &rh->cw1, crypt_data_device(cd),
LUKS2_reencrypt_get_data_offset_moved(hdr),
crypt_get_iv_offset(cd),
reencrypt_get_sector_size_old(hdr),
reencrypt_segment_cipher_old(hdr),
crypt_volume_key_by_id(rh->vks, rh->digest_old),
rh->wflags1);
if (r) {
log_err(cd, _("Failed to initialize old segment storage wrapper."));
return REENC_ROLLBACK;
}
}
rh->read = crypt_storage_wrapper_read(rh->cw1, rh->offset, rh->reenc_buffer, rh->length);
if (rh->read < 0) {
/* severity normal */
log_err(cd, _("Failed to read hotzone area starting at %" PRIu64 "."), rh->offset);
return REENC_ROLLBACK;
}
/* metadata commit point */
r = reencrypt_hotzone_protect_final(cd, hdr, rh, rh->reenc_buffer, rh->read);
if (r < 0) {
/* severity normal */
log_err(cd, _("Failed to write reencryption resilience metadata."));
return REENC_ROLLBACK;
}
r = crypt_storage_wrapper_decrypt(rh->cw1, rh->offset, rh->reenc_buffer, rh->read);
if (r) {
/* severity normal */
log_err(cd, _("Decryption failed."));
return REENC_ROLLBACK;
}
if (rh->read != crypt_storage_wrapper_encrypt_write(rh->cw2, rh->offset, rh->reenc_buffer, rh->read)) {
/* severity fatal */
log_err(cd, _("Failed to write hotzone area starting at %" PRIu64 "."), rh->offset);
return REENC_FATAL;
}
if (rh->rp.type != REENC_PROTECTION_NONE && crypt_storage_wrapper_datasync(rh->cw2)) {
log_err(cd, _("Failed to sync data."));
return REENC_FATAL;
}
/* metadata commit safe point */
r = reencrypt_assign_segments(cd, hdr, rh, 0, rh->rp.type != REENC_PROTECTION_NONE);
if (r) {
/* severity fatal */
log_err(cd, _("Failed to update metadata after current reencryption hotzone completed."));
return REENC_FATAL;
}
if (online) {
/* severity normal */
log_dbg(cd, "Resuming device %s", rh->hotzone_name);
r = dm_resume_device(cd, rh->hotzone_name, DM_RESUME_PRIVATE);
if (r) {
log_err(cd, _("Failed to resume device %s."), rh->hotzone_name);
return REENC_ERR;
}
}
return REENC_OK;
}
static int reencrypt_erase_backup_segments(struct crypt_device *cd,
struct luks2_hdr *hdr)
{
int segment = LUKS2_get_segment_id_by_flag(hdr, "backup-previous");
if (segment >= 0) {
if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
return -EINVAL;
json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
}
segment = LUKS2_get_segment_id_by_flag(hdr, "backup-final");
if (segment >= 0) {
if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
return -EINVAL;
json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
}
segment = LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment");
if (segment >= 0) {
if (LUKS2_digest_segment_assign(cd, hdr, segment, CRYPT_ANY_DIGEST, 0, 0))
return -EINVAL;
json_object_object_del_by_uint(LUKS2_get_segments_jobj(hdr), segment);
}
return 0;
}
static int reencrypt_wipe_moved_segment(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reenc_context *rh)
{
int r = 0;
uint64_t offset, length;
if (rh->jobj_segment_moved) {
offset = json_segment_get_offset(rh->jobj_segment_moved, 0);
length = json_segment_get_size(rh->jobj_segment_moved, 0);
log_dbg(cd, "Wiping %" PRIu64 " bytes of backup segment data at offset %" PRIu64,
length, offset);
r = crypt_wipe_device(cd, crypt_data_device(cd), CRYPT_WIPE_RANDOM,
offset, length, 1024 * 1024, NULL, NULL);
}
return r;
}
static int reencrypt_teardown_ok(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reenc_context *rh)
{
int i, r;
uint32_t dmt_flags;
bool finished = !(rh->device_size > rh->progress);
if (rh->rp.type == REENC_PROTECTION_NONE &&
LUKS2_hdr_write(cd, hdr)) {
log_err(cd, _("Failed to write LUKS2 metadata."));
return -EINVAL;
}
if (rh->online) {
r = LUKS2_reload(cd, rh->device_name, rh->vks, rh->device_size, rh->flags);
if (r)
log_err(cd, _("Failed to reload device %s."), rh->device_name);
if (!r) {
r = dm_resume_device(cd, rh->device_name, DM_SUSPEND_SKIP_LOCKFS | DM_SUSPEND_NOFLUSH);
if (r)
log_err(cd, _("Failed to resume device %s."), rh->device_name);
}
dm_remove_device(cd, rh->overlay_name, 0);
dm_remove_device(cd, rh->hotzone_name, 0);
if (!r && finished && rh->mode == CRYPT_REENCRYPT_DECRYPT &&
!dm_flags(cd, DM_LINEAR, &dmt_flags) && (dmt_flags & DM_DEFERRED_SUPPORTED))
dm_remove_device(cd, rh->device_name, CRYPT_DEACTIVATE_DEFERRED);
}
if (finished) {
if (reencrypt_wipe_moved_segment(cd, hdr, rh))
log_err(cd, _("Failed to wipe backup segment data."));
if (reencrypt_get_data_offset_new(hdr) && LUKS2_set_keyslots_size(cd, hdr, reencrypt_get_data_offset_new(hdr)))
log_dbg(cd, "Failed to set new keyslots area size.");
if (rh->digest_old >= 0 && rh->digest_new != rh->digest_old)
for (i = 0; i < LUKS2_KEYSLOTS_MAX; i++)
if (LUKS2_digest_by_keyslot(hdr, i) == rh->digest_old)
crypt_keyslot_destroy(cd, i);
crypt_keyslot_destroy(cd, rh->reenc_keyslot);
if (reencrypt_erase_backup_segments(cd, hdr))
log_dbg(cd, "Failed to erase backup segments");
/* do we need atomic erase? */
if (reencrypt_update_flag(cd, 0, true))
log_err(cd, _("Failed to disable reencryption requirement flag."));
}
return 0;
}
static void reencrypt_teardown_fatal(struct crypt_device *cd, struct luks2_hdr *hdr, struct luks2_reenc_context *rh)
{
log_err(cd, _("Fatal error while reencrypting chunk starting at %" PRIu64 ", %" PRIu64 " sectors long."),
(rh->offset >> SECTOR_SHIFT) + crypt_get_data_offset(cd), rh->length >> SECTOR_SHIFT);
if (rh->online) {
log_err(cd, "Reencryption was run in online mode.");
if (dm_status_suspended(cd, rh->hotzone_name) > 0) {
log_dbg(cd, "Hotzone device %s suspended, replacing with dm-error.", rh->hotzone_name);
if (dm_error_device(cd, rh->hotzone_name)) {
log_err(cd, _("Failed to replace suspended device %s with dm-error target."), rh->hotzone_name);
log_err(cd, _("Do not resume the device unless replaced with error target manually."));
}
}
}
}
static int reencrypt_teardown(struct crypt_device *cd, struct luks2_hdr *hdr,
struct luks2_reenc_context *rh, reenc_status_t rs, bool interrupted,
int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
{
int r;
switch (rs) {
case REENC_OK:
if (progress && !interrupted)
progress(rh->device_size, rh->progress, NULL);
r = reencrypt_teardown_ok(cd, hdr, rh);
break;
case REENC_FATAL:
reencrypt_teardown_fatal(cd, hdr, rh);
/* fall-through */
default:
r = -EIO;
}
/* this frees reencryption lock */
LUKS2_reenc_context_free(cd, rh);
crypt_set_reenc_context(cd, NULL);
return r;
}
int crypt_reencrypt(struct crypt_device *cd,
int (*progress)(uint64_t size, uint64_t offset, void *usrptr))
{
int r;
crypt_reencrypt_info ri;
struct luks2_hdr *hdr;
struct luks2_reenc_context *rh;
reenc_status_t rs;
bool quit = false;
if (onlyLUKS2mask(cd, CRYPT_REQUIREMENT_ONLINE_REENCRYPT))
return -EINVAL;
hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
ri = LUKS2_reenc_status(hdr);
if (ri > CRYPT_REENCRYPT_CLEAN) {
log_err(cd, _("Cannot proceed with reencryption. Unexpected reencryption status."));
return -EINVAL;
}
rh = crypt_get_reenc_context(cd);
if (!rh || (!rh->reenc_lock && crypt_metadata_locking_enabled())) {
log_err(cd, _("Missing or invalid reencrypt context."));
return -EINVAL;
}
log_dbg(cd, "Resuming LUKS2 reencryption.");
if (rh->online && reencrypt_init_device_stack(cd, rh)) {
log_err(cd, _("Failed to initialize reencryption device stack."));
return -EINVAL;
}
log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
rs = REENC_OK;
while (!quit && (rh->device_size > rh->progress)) {
rs = reencrypt_step(cd, hdr, rh, rh->device_size, rh->online);
if (rs != REENC_OK)
break;
log_dbg(cd, "Progress %" PRIu64 ", device_size %" PRIu64, rh->progress, rh->device_size);
if (progress && progress(rh->device_size, rh->progress, NULL))
quit = true;
r = reencrypt_context_update(cd, rh);
if (r) {
log_err(cd, _("Failed to update reencryption context."));
rs = REENC_ERR;
break;
}
log_dbg(cd, "Next reencryption offset will be %" PRIu64 " sectors.", rh->offset);
log_dbg(cd, "Next reencryption chunk size will be %" PRIu64 " sectors).", rh->length);
}
r = reencrypt_teardown(cd, hdr, rh, rs, quit, progress);
return r;
}
static int reencrypt_recovery(struct crypt_device *cd,
struct luks2_hdr *hdr,
uint64_t device_size,
struct volume_key *vks)
{
int r;
struct luks2_reenc_context *rh = NULL;
r = reencrypt_load(cd, hdr, device_size, NULL, &rh);
if (r < 0) {
log_err(cd, _("Failed to load LUKS2 reencryption context."));
return r;
}
r = reencrypt_recover_segment(cd, hdr, rh, vks);
if (r < 0)
goto err;
if ((r = reencrypt_assign_segments(cd, hdr, rh, 0, 0)))
goto err;
r = reencrypt_context_update(cd, rh);
if (r) {
log_err(cd, _("Failed to update reencryption context."));
goto err;
}
r = reencrypt_teardown_ok(cd, hdr, rh);
if (!r)
r = LUKS2_hdr_write(cd, hdr);
err:
LUKS2_reenc_context_free(cd, rh);
return r;
}
/*
* use only for calculation of minimal data device size.
* The real data offset is taken directly from segments!
*/
int LUKS2_reencrypt_data_offset(struct luks2_hdr *hdr, bool blockwise)
{
crypt_reencrypt_info ri = LUKS2_reenc_status(hdr);
uint64_t data_offset = LUKS2_get_data_offset(hdr);
if (ri == CRYPT_REENCRYPT_CLEAN && reencrypt_direction(hdr) == CRYPT_REENCRYPT_FORWARD)
data_offset += reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
return blockwise ? data_offset : data_offset << SECTOR_SHIFT;
}
/* internal only */
int luks2_check_device_size(struct crypt_device *cd, struct luks2_hdr *hdr, uint64_t check_size, uint64_t *dev_size, bool activation, bool dynamic)
{
int r;
uint64_t data_offset, real_size = 0;
if (reencrypt_direction(hdr) == CRYPT_REENCRYPT_BACKWARD &&
(LUKS2_get_segment_by_flag(hdr, "backup-moved-segment") || dynamic))
check_size += reencrypt_data_shift(hdr);
r = device_check_access(cd, crypt_data_device(cd), activation ? DEV_EXCL : DEV_OK);
if (r)
return r;
data_offset = LUKS2_reencrypt_data_offset(hdr, false);
r = device_check_size(cd, crypt_data_device(cd), data_offset, 1);
if (r)
return r;
r = device_size(crypt_data_device(cd), &real_size);
if (r)
return r;
log_dbg(cd, "Required minimal device size: %" PRIu64 " (%" PRIu64 " sectors)"
", real device size: %" PRIu64 " (%" PRIu64 " sectors)\n"
"calculated device size: %" PRIu64 " (%" PRIu64 " sectors)",
check_size, check_size >> SECTOR_SHIFT, real_size, real_size >> SECTOR_SHIFT,
real_size - data_offset, (real_size - data_offset) >> SECTOR_SHIFT);
if (real_size < data_offset || (check_size && (real_size - data_offset) < check_size)) {
log_err(cd, _("Device %s is too small."), device_path(crypt_data_device(cd)));
return -EINVAL;
}
*dev_size = real_size - data_offset;
return 0;
}
/* returns keyslot number on success (>= 0) or negative errnor otherwise */
int LUKS2_reencrypt_locked_recovery_by_passphrase(struct crypt_device *cd,
int keyslot_old,
int keyslot_new,
const char *passphrase,
size_t passphrase_size,
uint32_t flags,
struct volume_key **vks)
{
uint64_t minimal_size, device_size;
int keyslot, r = -EINVAL;
struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
struct volume_key *vk = NULL, *_vks = NULL;
log_dbg(cd, "Entering reencryption crash recovery.");
if (LUKS2_get_data_size(hdr, &minimal_size, NULL))
return r;
r = LUKS2_keyslot_open_all_segments(cd, keyslot_old, keyslot_new,
passphrase, passphrase_size, &_vks);
if (r < 0)
goto err;
keyslot = r;
if (crypt_use_keyring_for_vk(cd))
vk = _vks;
while (vk) {
r = LUKS2_volume_key_load_in_keyring_by_digest(cd, hdr, vk, crypt_volume_key_get_id(vk));
if (r < 0)
goto err;
vk = vk->next;
}
if (luks2_check_device_size(cd, hdr, minimal_size, &device_size, true, false))
goto err;
r = reencrypt_recovery(cd, hdr, device_size, _vks);
if (!r && vks)
MOVE_REF(*vks, _vks);
err:
if (r < 0)
crypt_drop_keyring_key(cd, _vks);
crypt_free_volume_key(_vks);
return r < 0 ? r : keyslot;
}
crypt_reencrypt_info LUKS2_reencrypt_status(struct crypt_device *cd, struct crypt_params_reencrypt *params)
{
crypt_reencrypt_info ri;
struct luks2_hdr *hdr = crypt_get_hdr(cd, CRYPT_LUKS2);
ri = LUKS2_reenc_status(hdr);
if (ri == CRYPT_REENCRYPT_NONE || ri == CRYPT_REENCRYPT_INVALID || !params)
return ri;
params->mode = reencrypt_mode(hdr);
params->direction = reencrypt_direction(hdr);
params->resilience = reencrypt_resilience_type(hdr);
params->hash = reencrypt_resilience_hash(hdr);
params->data_shift = reencrypt_data_shift(hdr) >> SECTOR_SHIFT;
params->max_hotzone_size = 0;
if (LUKS2_get_segment_id_by_flag(hdr, "backup-moved-segment") >= 0)
params->flags |= CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT;
return ri;
}
| 29.562591 | 189 | 0.718668 |
d035b4bac8d23b4fa37f755a2305d69f1a9328c1 | 892 | asm | Assembly | oeis/253/A253431.asm | neoneye/loda-programs | 84790877f8e6c2e821b183d2e334d612045d29c0 | [
"Apache-2.0"
] | 11 | 2021-08-22T19:44:55.000Z | 2022-03-20T16:47:57.000Z | oeis/253/A253431.asm | neoneye/loda-programs | 84790877f8e6c2e821b183d2e334d612045d29c0 | [
"Apache-2.0"
] | 9 | 2021-08-29T13:15:54.000Z | 2022-03-09T19:52:31.000Z | oeis/253/A253431.asm | neoneye/loda-programs | 84790877f8e6c2e821b183d2e334d612045d29c0 | [
"Apache-2.0"
] | 3 | 2021-08-22T20:56:47.000Z | 2021-09-29T06:26:12.000Z | ; A253431: Number of (n+1) X (4+1) 0..1 arrays with every 2 X 2 subblock diagonal minus antidiagonal sum nondecreasing horizontally, vertically and ne-to-sw antidiagonally.
; Submitted by Jamie Morken(s1.)
; 109,102,120,156,228,372,660,1236,2388,4692,9300,18516,36948,73812,147540,294996,589908,1179732,2359380,4718676,9437268,18874452,37748820,75497556,150995028,301989972,603979860,1207959636,2415919188,4831838292,9663676500,19327352916,38654705748,77309411412,154618822740,309237645396,618475290708,1236950581332,2473901162580,4947802325076,9895604650068,19791209300052,39582418600020,79164837199956,158329674399828,316659348799572,633318697599060,1266637395198036,2533274790395988,5066549580791892
mov $3,$0
mov $4,$0
cmp $4,0
add $3,$4
mov $5,$0
div $5,$3
add $5,2
mov $2,$5
seq $3,89143 ; a(n) = 9*2^n - 6.
lpb $5
mov $5,4
add $5,$2
sub $3,$5
div $5,3
lpe
mov $0,$3
add $0,97
| 40.545455 | 496 | 0.780269 |
cf94bce84302c8b31c4ce8d6fa75bfb01fb8d2ab | 538 | css | CSS | src/main/resources/public/css/style.css | BochWafa/ci-spring-boot | f999a240d9fcc0da30d9e31d30554fe9d415c420 | [
"MIT"
] | 21 | 2016-06-24T09:43:30.000Z | 2020-11-08T05:54:35.000Z | src/main/resources/public/css/style.css | BochWafa/ci-spring-boot | f999a240d9fcc0da30d9e31d30554fe9d415c420 | [
"MIT"
] | null | null | null | src/main/resources/public/css/style.css | BochWafa/ci-spring-boot | f999a240d9fcc0da30d9e31d30554fe9d415c420 | [
"MIT"
] | 29 | 2016-06-22T12:01:31.000Z | 2021-09-18T15:58:30.000Z | body{
background-color:#EEEEEE;
}
.list{
background-color:#FFF;
padding:20px 20px 10px 20px;
margin-top:30px;
}
.list h1{
margin:0;
padding-bottom:20px;
text-align:center;
}
li.ui-state-default{
background:#fff;
border:none;
border-bottom:1px solid #ddd;
}
li.ui-state-default:last-child{
border-bottom:none;
}
.footer{
background-color:#F4FCE8;
margin:0 -20px -10px -20px;
padding: 10px 20px;
}
#btnDelete{
margin-top:10px;
}
.filed{
text-decoration: line-through;
} | 13.794872 | 34 | 0.639405 |
c67b1f16e2e34c0a25c2c03b250564557b06c60f | 662 | swift | Swift | MenuRxExample/Scenes/Preset/Extension/Array+Ext.swift | Vanyaslav/MenuRxExample | 6f6f5e7e3994c2b8aeb927eac3e2fde520b91523 | [
"Apache-2.0"
] | 1 | 2021-06-04T11:37:28.000Z | 2021-06-04T11:37:28.000Z | MenuRxExample/Scenes/Preset/Extension/Array+Ext.swift | Vanyaslav/MenuRxExample | 6f6f5e7e3994c2b8aeb927eac3e2fde520b91523 | [
"Apache-2.0"
] | null | null | null | MenuRxExample/Scenes/Preset/Extension/Array+Ext.swift | Vanyaslav/MenuRxExample | 6f6f5e7e3994c2b8aeb927eac3e2fde520b91523 | [
"Apache-2.0"
] | null | null | null | //
// Array+Ext.swift
// MenuRxExample
//
// Created by Tomas Baculák on 04/11/2020.
// Copyright © 2020 Tomas Baculák. All rights reserved.
//
import Foundation
extension Preset {
static let dataStringSeparator: String = ";"
}
extension Array where Element == String {
func parsePresets() -> [Preset_VM.PresetItem] {
return map{ $0.components(separatedBy: Preset.dataStringSeparator) }
.map{ Preset_VM.PresetItem(with: $0[0], date: $0[1]) }
}
func concatPresetData() -> String {
return compactMap{ $0 }
.filter{ !$0.isEmpty }
.joined(separator: Preset.dataStringSeparator)
}
}
| 24.518519 | 76 | 0.63142 |
85d433c5965c0a24d83639479b27b225ac7b87a3 | 568 | c | C | backend/gdemu_control.c | retrohead/openmenu | 565f472aef47e7116a0d8e77f7960c2dee0238b0 | [
"BSD-3-Clause"
] | 38 | 2021-07-04T19:03:43.000Z | 2022-03-23T03:31:59.000Z | backend/gdemu_control.c | retrohead/openmenu | 565f472aef47e7116a0d8e77f7960c2dee0238b0 | [
"BSD-3-Clause"
] | 31 | 2021-06-25T13:42:06.000Z | 2022-03-15T00:59:26.000Z | backend/gdemu_control.c | retrohead/openmenu | 565f472aef47e7116a0d8e77f7960c2dee0238b0 | [
"BSD-3-Clause"
] | 5 | 2021-07-16T06:03:37.000Z | 2022-03-23T14:05:09.000Z | #include <arch/arch.h>
#include <dc/sound/sound.h>
#include <kos.h>
#include <kos/thread.h>
#include "backend/gd_item.h"
#include "gdemu_sdk.h"
#include "gdmenu_loader.h"
#ifndef GDROM_FS
void gd_reset_handles(void) {
}
void run_game(const char *region, const char *product) {
(void)region;
(void)product;
void arch_menu(void) __attribute__((noreturn));
arch_menu();
__builtin_unreachable();
}
#endif
void dreamcast_launch_disc(gd_item *disc) {
gdemu_set_img_num((uint16_t)disc->slot_num);
thd_sleep(200);
run_game(disc->region, disc->product);
}
| 19.586207 | 56 | 0.727113 |
f0870a7d6b76e800799cff4ff4d49d050cac7d4e | 226 | js | JavaScript | index.ios.js | MarshalPaterson/ReactNativeBaseRouterFluxBoilerplate | 148502315db0bfaf749ec8364ecbce84a0adb264 | [
"MIT"
] | null | null | null | index.ios.js | MarshalPaterson/ReactNativeBaseRouterFluxBoilerplate | 148502315db0bfaf749ec8364ecbce84a0adb264 | [
"MIT"
] | null | null | null | index.ios.js | MarshalPaterson/ReactNativeBaseRouterFluxBoilerplate | 148502315db0bfaf749ec8364ecbce84a0adb264 | [
"MIT"
] | null | null | null | import { AppRegistry } from 'react-native';
import ReactNativeBaseRouterFluxBoilerplate from './src/index.js';
AppRegistry.registerComponent('ReactNativeBaseRouterFluxBoilerplate', () => ReactNativeBaseRouterFluxBoilerplate); | 56.5 | 114 | 0.831858 |
f058c8ff047b3d9464b8a867f927fcd03d622d8f | 31,894 | py | Python | concept_disc/pubmed_dump.py | nmonath/concept_discovery | 766905684e598159cc6fb58967ed411888b93ce5 | [
"MIT"
] | 3 | 2020-09-10T13:48:23.000Z | 2021-08-19T21:42:50.000Z | concept_disc/pubmed_dump.py | nmonath/concept_discovery | 766905684e598159cc6fb58967ed411888b93ce5 | [
"MIT"
] | null | null | null | concept_disc/pubmed_dump.py | nmonath/concept_discovery | 766905684e598159cc6fb58967ed411888b93ce5 | [
"MIT"
] | 3 | 2020-10-16T21:57:04.000Z | 2020-12-26T00:59:32.000Z | """
Parse PubMed Dump
Ref:
https://www.nlm.nih.gov/databases/download/pubmed_medline.html
https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html
https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html#medlinecitation
"""
from collections import defaultdict
from concurrent import futures
import glob
import gzip
import multiprocessing
import os
from pathlib import Path
import re
from threading import Thread
from typing import Dict, Generator, List, Optional, Sequence, Set, Union
# noinspection PyPep8Naming
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
from .misc import PersistentObject
# -----------------------------------------------------------------------------
# Globals
# -----------------------------------------------------------------------------
BASE_DIR = os.path.expanduser('~/Home/Projects/ConceptRecogn')
AB3P_DIR = os.path.join(BASE_DIR, 'Tools', 'Ab3P')
AB3P_CMD = './identify_abbr'
SPACES_PATT = re.compile(r'\s+')
SENTINEL = '_SENTINEL_'
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class PubmedDocument:
def __init__(self, pmid: str, title: str = None, abstract: str = None, is_english: bool = True):
self.pmid = pmid
self.title = title
self.abstract = abstract
self.is_english = is_english
return
def get_text(self):
txt = "\n".join([s for s in (self.title, self.abstract) if s])
if not txt:
txt = None
return txt
def __str__(self):
return "pmid = {:s}\ntitle = {:s}\nabstract = {:s}".format(self.pmid, self.title, self.abstract)
@classmethod
def from_xml(cls, pubmed_article: ET.Element):
assert pubmed_article.tag == "PubmedArticle"
pmid = pubmed_article.findtext("./MedlineCitation/PMID")
is_english = True
title = extract_subelem_text(pubmed_article.find("./MedlineCitation/Article/ArticleTitle"))
if not title or title == "Not Available":
title = extract_subelem_text(pubmed_article.find("./MedlineCitation/Article/ARTICLETITLE"))
if title:
title = title.strip()
if title.startswith("[") and title.endswith("]"):
title = title.strip("[]")
is_english = False
if title.endswith("(author's transl)"):
title = title[:-len("(author's transl)")].strip()
if title == "In Process Citation":
title = None
abstr = extract_subelem_text(pubmed_article.find("./MedlineCitation/Article/Abstract"))
return cls(pmid, title, abstr, is_english)
# /
class LazyPubmedDocument:
def __init__(self, pmid: str, article_xml: ET.Element, source: str = None):
assert pmid is not None
self.pmid = pmid.strip()
self.article_xml = article_xml
self.source = source
self._title = None
self._abstract = None
self._is_english = None
self._title_parsed = False
return
@property
def title(self):
self._parse_title_abstract()
return self._title
@property
def abstract(self):
self._parse_title_abstract()
return self._abstract
@property
def is_english(self):
self._parse_title_abstract()
return self._is_english
def get_text(self):
txt = "\n".join([s for s in (self.title, self.abstract) if s])
if not txt:
txt = None
return txt
def get_mesh_headings_xml(self) -> List[ET.Element]:
return self.article_xml.findall("./MedlineCitation/MeshHeadingList")
def get_supplemental_mesh_xml(self) -> List[ET.Element]:
"""
This info includes Supplemental Records on: Protocols, Diseases, Organisms
"""
return self.article_xml.findall("./MedlineCitation/SupplMeshList")
def get_chemicals_xml(self) -> List[ET.Element]:
return self.article_xml.findall("./MedlineCitation/ChemicalList")
def get_keywords_xml(self) -> List[ET.Element]:
return self.article_xml.findall("./MedlineCitation/KeywordList")
def _parse_title_abstract(self):
if self._title_parsed:
return
is_english = True
title = extract_subelem_text(self.article_xml.find("./MedlineCitation/Article/ArticleTitle"))
if not title or title == "Not Available":
title = extract_subelem_text(self.article_xml.find("./MedlineCitation/Article/ARTICLETITLE"))
if title:
title = title.strip()
if title.startswith("[") and title.endswith("]"):
title = title.strip("[]")
is_english = False
if title.endswith("(author's transl)"):
title = title[:-len("(author's transl)")].strip()
if title == "In Process Citation":
title = ""
self._title = title
self._is_english = is_english
self._abstract = extract_subelem_text(self.article_xml.find("./MedlineCitation/Article/Abstract"))
self._title_parsed = True
return
def to_xml(self) -> ET.Element:
"""
Output format as parsed by `Article`
"""
doc = ET.Element("Article", pmid=self.pmid)
if self.source:
doc.set("source", self.source)
ET.SubElement(doc, "Title").text = self.title
ET.SubElement(doc, "Abstract").text = self.abstract
for children in [self.get_mesh_headings_xml(),
self.get_supplemental_mesh_xml(),
self.get_chemicals_xml(),
self.get_keywords_xml()]:
if children:
doc.extend(children)
return doc
def __str__(self):
return "pmid = {:s}\ntitle = {:s}\nabstract = {:s}".format(self.pmid, self.title, self.abstract)
@classmethod
def from_pubmed_xml(cls, pubmed_article: ET, source: str = None):
assert pubmed_article.tag == "PubmedArticle"
pmid = pubmed_article.findtext("./MedlineCitation/PMID")
return cls(pmid, pubmed_article, source=source)
# /
class PubmedDumpIndex(PersistentObject):
def __init__(self):
super().__init__()
# Dir where all the dump files exist.
# dump_file_path = {base_dir}/{dump_file_name}
self.base_dir = None
# dump_file_name(str) -> List[pmid(str)]
self.dumpfile_index = dict()
# pmid(str) -> dump_file_name(str)
self.docid_index = None
return
def get_dump_file(self, pmid: str) -> Optional[str]:
"""
Returns absolute path (str) to file containing Doc with specified `pmid`,
or None if not found.
"""
if self.docid_index is None:
self._build_docid_index()
fname = self.docid_index.get(pmid)
if fname is not None:
return f"{self.base_dir}/{fname}"
return
def get_dump_files(self, pmids: Sequence[str]) -> Dict[str, str]:
if self.docid_index is None:
self._build_docid_index()
pmid_file_dict = {pmid_ : self.get_dump_file(pmid_) for pmid_ in pmids}
return pmid_file_dict
def get_doc(self, pmid: str) -> Optional[LazyPubmedDocument]:
dump_file = self.get_dump_file(pmid)
if dump_file is None:
return
for doc in lazy_parse_dump_file(dump_file):
if doc.pmid == pmid:
return doc
return
def get_docs(self, pmids: Sequence[str]) -> Generator[LazyPubmedDocument, None, None]:
"""
Generator yields LazyPubmedDocument for docs found for PMID in pmids.
Order may be different. Only found docs are returned.
"""
pmid_file_dict = self.get_dump_files(pmids)
file_pmids = defaultdict(set)
for pmid, fpath in pmid_file_dict.items():
file_pmids[fpath].add(pmid)
for dump_fpath, pmid_set in file_pmids.items():
n_pmids = len(pmid_set)
for doc in lazy_parse_dump_file(dump_fpath):
if doc.pmid in pmid_set:
yield doc
n_pmids -= 1
if n_pmids == 0:
break
return
def _build_docid_index(self):
self.docid_index = dict()
for fpath, pmids in self.dumpfile_index.items():
for pmid_ in pmids:
self.docid_index[pmid_] = fpath
return
@staticmethod
def build_save_index(pubmed_dump_files_or_patt: Union[str, List[str]],
output_file: str,
nprocs: int):
"""
Run `nprocs` processes to build an index into `pubmed_dump_files`,
and save it to `output_file`.
:param pubmed_dump_files_or_patt: Glob pattern or list of paths containing Pubmed-Dump
Assumes that all the files are in the same directory!
:param output_file: Where each index will be saved, as a Pickle file (*.pkl)"
:param nprocs:
"""
print("PubmedDumpIndex.build_save_index:")
print(" pubmed_dump_files_or_patt =", pubmed_dump_files_or_patt)
print(" output_file =", output_file)
print(" nprocs =", nprocs)
output_file = os.path.expanduser(output_file)
output_dir = os.path.dirname(output_file)
output_dir = os.path.expanduser(output_dir)
if not Path(output_dir).exists():
print("Creating dir:", output_dir)
Path(output_dir).mkdir()
print('Starting {} processes ...'.format(nprocs), flush=True)
m = multiprocessing.Manager()
res_queue = m.Queue()
# Using a process pool to start the sub-processes. Allows gathering return values.
# With this method, Queue instance must be inherited by the sub-processes (e.g. as a global);
# passing queue as an arg results in RuntimeError.
with futures.ProcessPoolExecutor(max_workers=nprocs) as executor:
results = executor.map(PubmedDumpIndex.build_index_procr,
[pubmed_dump_files_or_patt] * nprocs,
[res_queue] * nprocs,
range(nprocs), [nprocs] * nprocs)
pmindex = PubmedDumpIndex()
# Put Queue consumer in a Thread
t = Thread(target=pmindex._gather_file_docids, args=(nprocs, res_queue), daemon=False)
t.start()
# Join the consumer Thread until it is done
t.join()
# Get return values ... possible if processes started using ProcessPoolExecutor
tot_docs_found = 0
for (proc_nbr, docs_found) in results:
print('... Sub-process {:d} found {:,d} docs'.format(proc_nbr, docs_found), flush=True)
tot_docs_found += docs_found
print('Total nbr docs written = {:,d}'.format(tot_docs_found))
pmindex.save(output_file)
return
@staticmethod
def build_index_procr(pubmed_dump_files_or_patt: Union[str, List[str]],
res_queue,
proc_nbr: int, nprocs: int):
assert 0 <= proc_nbr < nprocs
if isinstance(pubmed_dump_files_or_patt, List):
pubmed_dump_files = [os.path.expanduser(f) for f in pubmed_dump_files_or_patt]
else:
pubmed_dump_files = glob.glob(os.path.expanduser(pubmed_dump_files_or_patt))
# Ensure each process sees same ordering
pubmed_dump_files = sorted(pubmed_dump_files)
tot_docs_found = 0
# Process every `nprocs`-th file starting at index `proc_nbr`
for fi in range(proc_nbr, len(pubmed_dump_files), nprocs):
file_pmids = []
for doc in lazy_parse_dump_file(pubmed_dump_files[fi]):
file_pmids.append(doc.pmid)
res_queue.put(('add', proc_nbr, pubmed_dump_files[fi], file_pmids))
tot_docs_found += len(file_pmids)
res_queue.put((SENTINEL, proc_nbr))
return proc_nbr, tot_docs_found
def _gather_file_docids(self, nprocs: int, res_queue):
n_dump_files_processed = 0
while nprocs > 0:
qry_data = res_queue.get()
if qry_data[0] == SENTINEL:
nprocs -= 1
print('... Sub-process {} end recd.'.format(qry_data[1]), flush=True)
else:
n_dump_files_processed += 1
_, proc_nbr, pubmed_dump_file, file_pmids = qry_data
base_dir, file_name = os.path.split(pubmed_dump_file)
if self.base_dir is None:
self.base_dir = base_dir
self.dumpfile_index[file_name] = file_pmids
print("Nbr dump files processed = {:,d}".format(n_dump_files_processed), flush=True)
return
# /
# -----------------------------------------------------------------------------
# Article - from PubMed or MeSH-Dump
# -----------------------------------------------------------------------------
class MeshHeading:
def __init__(self, uid: str, name: str, is_major: bool):
self.uid = uid
self.name = name
self.is_major = is_major
return
def __str__(self):
return "{:s}: {:s}{:s}".format(self.uid, self.name, " *" if self.is_major else "")
# /
class SupplMeshName:
def __init__(self, uid: str, name: str, suppl_type: str):
self.uid = uid
self.name = name
self.suppl_type = suppl_type
return
def __str__(self):
return "{:s}: {:s} [{:s}]".format(self.uid, self.name, self.suppl_type)
# /
class Qualifier(MeshHeading):
def __init__(self, uid: str, name: str, is_major: bool):
super().__init__(uid, name, is_major)
return
# /
class MainHeading(MeshHeading):
def __init__(self, uid: str, name: str, is_major: bool):
super().__init__(uid, name, is_major)
# Whether a Qualifier is marked as Major
self.is_qualified_major: bool = False
self.qualifiers: Set[Qualifier] = set()
return
def add_qualifier(self, qlfr: Qualifier):
self.qualifiers.add(qlfr)
if qlfr.is_major:
self.is_qualified_major = True
return
def __str__(self):
mystr = super().__str__()
if self.qualifiers:
mystr += " / " + ", ".join([str(qlfr) for qlfr in self.qualifiers])
return mystr
# /
class Keyword:
def __init__(self, name: str, is_major: bool):
self.name = name
self.is_major = is_major
return
def __str__(self):
return "{:s}{:s}".format(self.name, " *" if self.is_major else "")
# /
class Article:
def __init__(self, pmid: str, title: str, abstract: Optional[str]):
self.pmid = pmid
self.abstract = abstract
self.is_english = True
if title:
title = title.strip()
if title.startswith("[") and title.endswith("]"):
title = title.strip("[]")
self.is_english = False
if title.endswith("(author's transl)"):
title = title[:-len("(author's transl)")].strip()
if title == "In Process Citation":
title = None
self.title = title or ""
self.main_headings: List[MainHeading] = []
self.suppl_concept_records: List[SupplMeshName] = []
self.keywords: List[Keyword] = []
return
def to_xml(self, pubmed_format: bool = False) -> ET.Element:
"""
Get this article as an XML element.
:param pubmed_format: Use XML format as returned by PubMed API ... PubmedArticle/MedlineCitation
"""
def format_title():
return escape(self.title if self.is_english else "[" + self.title + "]")
def is_yn(flag: bool):
return "Y" if flag else "N"
if pubmed_format:
root = ET.Element("PubmedArticle")
medline = ET.SubElement(root, "MedlineCitation")
ET.SubElement(medline, "PMID").text = self.pmid
article = ET.SubElement(medline, "Article")
ET.SubElement(article, "ArticleTitle").text = format_title()
if self.abstract:
ET.SubElement(article, "Abstract").text = escape(self.abstract)
axml = medline
else:
root = ET.Element("Article", pmid=self.pmid)
ET.SubElement(root, "Title").text = format_title()
if self.abstract:
ET.SubElement(root, "Abstract").text = escape(self.abstract)
axml = root
if self.main_headings:
mhlist = ET.SubElement(axml, "MeshHeadingList")
for mhdg in self.main_headings:
mh_xml = ET.SubElement(mhlist, "MeshHeading")
mh_descr = ET.SubElement(mh_xml, "DescriptorName", UI=mhdg.uid, MajorTopicYN=is_yn(mhdg.is_major))
mh_descr.text = escape(mhdg.name)
for qlfr in mhdg.qualifiers:
q_xml = ET.SubElement(mh_xml, "QualifierName", UI=mhdg.uid, MajorTopicYN=is_yn(mhdg.is_major))
q_xml.text = escape(qlfr.name)
if self.suppl_concept_records:
scr_list = ET.SubElement(axml, "SupplMeshList")
for scr in self.suppl_concept_records:
scr_xml = ET.SubElement(scr_list, "SupplMeshName", UI=scr.uid, Type=escape(scr.suppl_type))
scr_xml.text = escape(scr.name)
if self.keywords:
kwd_list = ET.SubElement(axml, "KeywordList")
for kwd in self.keywords:
kwd_xml = ET.SubElement(kwd_list, "Keyword", MajorTopicYN=is_yn(kwd.is_major))
kwd_xml.text = escape(kwd.name)
return root
def get_major_headings(self):
return [hdg for hdg in self.main_headings if hdg.is_major or hdg.is_qualified_major]
@staticmethod
def from_xml_file(article_xml_file: str):
tree = ET.parse(article_xml_file)
return Article.from_xml_root(tree.getroot())
# noinspection PyTypeChecker
@staticmethod
def from_xml_root(root: ET.Element):
if root.tag == "Article":
pmid = root.get('pmid')
title = extract_subelem_text(root.find("./Title"))
abstr = extract_subelem_text(root.find("./Abstract"))
elif root.tag == "PubmedArticle":
# All the tags of interest are under './MedlineCitation'
root = root.find("./MedlineCitation")
pmid = root.findtext("./PMID")
title = extract_subelem_text(root.find("./Article/ArticleTitle"))
if not title or title == "Not Available":
title = extract_subelem_text(root.find("./Article/ARTICLETITLE"))
abstr = extract_subelem_text(root.find("./Article/Abstract"))
else:
raise NotImplementedError(f"Cannot parse root.tag = {root.tag}. Should be one of: Article, PubmedArticle")
article = Article(pmid, title, abstr)
for mh_elem in root.findall("./MeshHeadingList/MeshHeading"):
d_elem = mh_elem.find("./DescriptorName")
main_hdg = MainHeading(d_elem.get("UI"), d_elem.text, d_elem.get("MajorTopicYN", "N") == "Y")
article.main_headings.append(main_hdg)
for q_elem in mh_elem.findall("./QualifierName"):
main_hdg.add_qualifier(Qualifier(q_elem.get("UI"), q_elem.text, q_elem.get("MajorTopicYN", "N") == "Y"))
for sm_elem in root.findall("./SupplMeshList/SupplMeshName"):
scr = SupplMeshName(sm_elem.get("UI"), sm_elem.text, sm_elem.get("Type"))
article.suppl_concept_records.append(scr)
for kw_elem in root.findall("./KeywordList/Keyword"):
kwd = Keyword(kw_elem.text, kw_elem.get("MajorTopicYN", "N") == "Y")
article.keywords.append(kwd)
return article
# /
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def extract_subelem_text(xelem):
"""
Extracts and combines text from sub-elements of `xelem`.
:param xml.etree.ElementTree.Element xelem: xml.etree.ElementTree.Element.
:return: str
Special Cases
-------------
<title>GeneReviews<sup>®</sup></title> => 'GeneReviews ®'
R<sub>0</sub> => R0
<i>text</i> => text
<b>text</b> => text
<u>text</u> => text
will be extracted as 'GeneReviews ®'.
This is not strictly correct, but when tokenizing, will generate separate token for 'GeneReviews',
which is desirable.
"""
txt = None
if xelem is not None:
txt = ''
for subelem in xelem.iter():
if subelem.tag in ('abstract', 'title', 'p', 'sup', 'list-item'):
if txt and not txt.endswith(' '):
txt += ' '
elif subelem.tag == 'AbstractText':
if txt and not txt.endswith('\n'):
txt += '\n'
label = subelem.get("Label")
if label and label.upper() != "UNLABELLED":
txt += label + ":\n"
elif subelem.tag == "CopyrightInformation":
continue
if subelem.text:
txt += subelem.text
if subelem is not xelem and subelem.tag == 'title' and not txt.endswith(('. ', ': ')):
txt += ': '
if subelem.tail:
# Remove "\n" from subelem.tail
txt += re.sub(r"\s+", " ", subelem.tail)
if not txt:
txt = None
return clean_text(txt)
def clean_text(txt):
if txt is not None:
# Collapse multiple non-newline whitespaces to single BLANK
txt = re.sub(r'((?!\n)\s)+', ' ', txt.strip())
# Remove SPACE around newline
txt = re.sub(r' ?\n ?', '\n', txt)
# Collapse multiple newlines
txt = re.sub(r'\n+', '\n', txt)
# Remove SPACE preceding [,:.], IF there is also space after the punct.
txt = re.sub(r' ([,:.]) ', r'\1 ', txt)
return txt
def parse_dump_file(pubmed_dump_file: str) -> List[PubmedDocument]:
is_gzipped = False
open_fn = open
if pubmed_dump_file.endswith(".gz"):
is_gzipped = True
open_fn = gzip.open
with open_fn(pubmed_dump_file) as f:
ftxt = f.read()
if is_gzipped:
# noinspection PyUnresolvedReferences
ftxt = ftxt.decode("UTF-8")
root = ET.fromstring(ftxt)
pubmed_docs = []
# Ignore elements "PubmedBookArticle"
for doc_root in root.iterfind("./PubmedArticle"):
doc = PubmedDocument.from_xml(doc_root)
pubmed_docs.append(doc)
return pubmed_docs
def lazy_parse_dump_file(pubmed_dump_file: str):
"""
Generator for LazyPubmedDocument
:param pubmed_dump_file:
"""
is_gzipped = False
open_fn = open
if pubmed_dump_file.endswith(".gz"):
is_gzipped = True
open_fn = gzip.open
with open_fn(pubmed_dump_file) as f:
ftxt = f.read()
if is_gzipped:
# noinspection PyUnresolvedReferences
ftxt = ftxt.decode("UTF-8")
root = ET.fromstring(ftxt)
# Ignore elements "PubmedBookArticle"
for doc_root in root.iterfind("./PubmedArticle"):
doc = LazyPubmedDocument.from_pubmed_xml(doc_root, source=pubmed_dump_file)
yield doc
return
def extract_from_pubmed_dump(pubmed_dump_file: str,
output_dir: str,
pmids_file: str = None,
max_docs: int = 0,
verbose=False):
"""
Extracts Doc from PubMed dump, and writes it to `output_dir`.
:param pubmed_dump_file:
:param output_dir:
:param pmids_file:
:param max_docs:
:param verbose:
:return:
"""
pmids = None
if pmids_file is not None:
with open(os.path.expanduser(pmids_file)) as f:
pmids = set([line.strip() for line in f])
output_dir = os.path.expanduser(output_dir)
if not Path(output_dir).exists():
print("Creating dir:", output_dir)
Path(output_dir).mkdir()
if verbose:
print("Extracting from pubmed dump:", pubmed_dump_file, flush=True)
n_docs = 0
for doc in lazy_parse_dump_file(pubmed_dump_file):
if pmids and doc.pmid not in pmids:
continue
doc_file = f"{output_dir}/{doc.pmid}.xml"
ET.ElementTree(doc.to_xml()).write(doc_file, encoding="unicode", xml_declaration=True)
if verbose:
print(" ", doc.pmid, flush=True)
n_docs += 1
if 0 < max_docs <= n_docs:
break
return n_docs
def extract_proc_one(pubmed_dump_files_or_patt: Union[str, List[str]],
output_dir: str,
pmids_file: str,
proc_nbr: int,
nprocs: int):
"""
Called from `extract_from_pubmed_dump_mp`, does the tasks for one process (`proc_nbr`) out of `nprocs` processes.
:param pubmed_dump_files_or_patt:
:param output_dir:
:param pmids_file:
:param proc_nbr: in range [0, nprocs - 1]
:param nprocs: >= 1
:return: proc_nbr, Nbr docs written
"""
assert 0 <= proc_nbr < nprocs
if isinstance(pubmed_dump_files_or_patt, List):
pubmed_dump_files = [os.path.expanduser(f) for f in pubmed_dump_files_or_patt]
else:
print(f"extract_proc_one[{proc_nbr}]: pubmed_dump_files_or_patt =", pubmed_dump_files_or_patt,
flush=True)
pubmed_dump_files = glob.glob(os.path.expanduser(pubmed_dump_files_or_patt))
print("extract_proc_one[{}]: nbr dump files = {:,d}".format(proc_nbr, len(pubmed_dump_files)), flush=True)
# Ensure each process sees same ordering
pubmed_dump_files = sorted(pubmed_dump_files)
tot_docs_found = 0
# Process every `nprocs`-th file starting at index `proc_nbr`
for fi in range(proc_nbr, len(pubmed_dump_files), nprocs):
tot_docs_found += extract_from_pubmed_dump(pubmed_dump_files[fi], output_dir, pmids_file, verbose=False)
return proc_nbr, tot_docs_found
def extract_from_pubmed_dump_mp(pubmed_dump_files_or_patt: Union[str, List[str]],
output_dir: str,
pmids_file: str,
nprocs: int):
"""
Run `nprocs` processes to extract docs of specified PMID.
:param pubmed_dump_files_or_patt: Glob pattern or list of paths containing Pubmed-Dump
:param output_dir: Where each doc will be written as a file: "{output_dir}/{pmid}.xml"
:param pmids_file: One PMID per line
:param nprocs:
"""
print("extract_from_pubmed_dump_mp:")
print(" pubmed_dump_files_or_patt =", pubmed_dump_files_or_patt)
print(" output_dir =", output_dir)
print(" pmids_file =", pmids_file)
output_dir = os.path.expanduser(output_dir)
if not Path(output_dir).exists():
print("Creating dir:", output_dir)
Path(output_dir).mkdir()
print('Starting {} processes ...'.format(nprocs), flush=True)
# Using a process pool to start the sub-processes. Allows gathering return values.
# With this method, Queue instance must be inherited by the sub-processes (e.g. as a global);
# passing queue as an arg results in RuntimeError.
with futures.ProcessPoolExecutor(max_workers=nprocs) as executor:
results = executor.map(extract_proc_one,
[pubmed_dump_files_or_patt] * nprocs,
[output_dir] * nprocs,
[pmids_file] * nprocs,
range(nprocs), [nprocs] * nprocs)
# Get return values ... possible if processes started using ProcessPoolExecutor
tot_docs_found = 0
for (proc_nbr, docs_found) in results:
print('... Subprocess {:d} found {:,d} docs'.format(proc_nbr, docs_found))
tot_docs_found += docs_found
print('Total nbr docs written = {:,d}'.format(tot_docs_found))
return
def build_index(pubmed_dump_files_or_patt: Union[str, List[str]],
output_file: str,
nprocs: int):
# Import class here so that load from pickle does not report errors
# noinspection PyUnresolvedReferences
from cr.pubmed.pubmed_dump import PubmedDumpIndex
PubmedDumpIndex.build_save_index(pubmed_dump_files_or_patt, output_file, nprocs)
return
# ======================================================================================================
# Main
# ======================================================================================================
# Invoke as: python -m pubmed_dump CMD ...
if __name__ == '__main__':
import argparse
from datetime import datetime
from .misc import print_cmd
_argparser = argparse.ArgumentParser(
description='PubMed Dump Parser.',
)
_subparsers = _argparser.add_subparsers(dest='subcmd',
title='Available commands',
)
# Make the sub-commands required
_subparsers.required = True
# ... extract [-n NBR_PROCS] DUMP_PATH_PATTERN PMIDS_FILE OUTPUT_DIR
_sub_cmd_parser = _subparsers.add_parser('extract', help="Extract articles for specific PMIDs.")
_sub_cmd_parser.add_argument('-n', '--nbr_procs', type=int, default=4,
help="Nbr of sub-processes.")
_sub_cmd_parser.add_argument('dump_path_pattern', type=str,
help="Pattern for path to PubMed Dump files")
_sub_cmd_parser.add_argument('pmids_file', type=str,
help="Path to file containing PMIDs")
_sub_cmd_parser.add_argument('output_dir', type=str,
help="Output dir")
# ... build_index [-n NBR_PROCS] DUMP_PATH_PATTERN PMIDS_FILE OUTPUT_DIR
_sub_cmd_parser = _subparsers.add_parser('build_index',
help="Build and save PubmedDumpIndex.",
description=("e.g.: " +
"python -m pubmed_dump build_index -n 10 " +
"'../../PubMed/Data/D20191215/*.xml.gz' " +
"../../PubMed/Data/D20191215/pubmed_dump_index.pkl"))
_sub_cmd_parser.add_argument('-n', '--nbr_procs', type=int, default=4,
help="Nbr of sub-processes.")
_sub_cmd_parser.add_argument('dump_path_pattern', type=str,
help="Pattern for path to PubMed Dump files")
_sub_cmd_parser.add_argument('output_file', type=str,
help="Path to where PubmedDumpIndex will be written as a Pickle file")
# ...
_args = _argparser.parse_args()
# .................................................................................................
start_time = datetime.now()
print()
print_cmd()
if _args.subcmd == 'extract':
extract_from_pubmed_dump_mp(_args.dump_path_pattern, _args.output_dir, _args.pmids_file, _args.nbr_procs)
elif _args.subcmd == 'build_index':
build_index(_args.dump_path_pattern, _args.output_file, _args.nbr_procs)
else:
raise NotImplementedError(f"Command not implemented: {_args.subcmd}")
# /
print('\nTotal Run time =', datetime.now() - start_time)
| 34.331539 | 120 | 0.58199 |
5ff51207e107d743fa9ad285a8f25bbc3fba1ac2 | 787 | h | C | pariapse-networking/ManagedObject/RemoteEntityDelegate.h | JeremyVoisin/PeriapseNetworking | 482ae487ea1d652fdd82ece6b372caad537af889 | [
"MIT"
] | null | null | null | pariapse-networking/ManagedObject/RemoteEntityDelegate.h | JeremyVoisin/PeriapseNetworking | 482ae487ea1d652fdd82ece6b372caad537af889 | [
"MIT"
] | null | null | null | pariapse-networking/ManagedObject/RemoteEntityDelegate.h | JeremyVoisin/PeriapseNetworking | 482ae487ea1d652fdd82ece6b372caad537af889 | [
"MIT"
] | null | null | null | //
// RemoteEntityDelegate.h
//
// Created by Jérémy Voisin on 25/03/2016.
// Copyright © 2016 jeyz. All rights reserved.
//
#import <Foundation/Foundation.h>
@class RemoteEntity;
@protocol RemoteEntityDelegate <NSObject>
@optional
- (void)remoteEntitiesFinishedLoading:(NSArray*)entities;
- (void)remoteEntitiesOfflineLoaded:(NSArray*)entities;
- (void)remoteEntitiesWontBeLoadedMoreThan:(NSArray*)entities becauseOfErrorNumber:(NSUInteger)error;
- (void)remoteEntityLoaded: (RemoteEntity*)entity;
- (void)remoteEntityUpdated:(RemoteEntity*)entity;
- (void)remoteEntityCreated:(RemoteEntity*)entity;
- (void)remoteEntityDeleted:(RemoteEntity*)entity;
- (void)remoteEntityOnError:(RemoteEntity*)entity withErrorString: (NSString*) error andErrorCode:(NSInteger)errorCode;
@end
| 29.148148 | 119 | 0.789072 |
571fdfe65f4f298dde1e4d46fe02442c248d3eaf | 280 | asm | Assembly | src/features/misc macros.asm | FranchuFranchu/fran-os | 30e14b587dd66039511d08a54f516ad65c4375ff | [
"0BSD"
] | 1 | 2020-10-24T17:09:05.000Z | 2020-10-24T17:09:05.000Z | src/features/misc macros.asm | FranchuFranchu/fran-os | 30e14b587dd66039511d08a54f516ad65c4375ff | [
"0BSD"
] | null | null | null | src/features/misc macros.asm | FranchuFranchu/fran-os | 30e14b587dd66039511d08a54f516ad65c4375ff | [
"0BSD"
] | null | null | null | %macro DEBUG_PRINT 1
mov esi, %%string
call kernel_terminal_write_string
section .data
%%string: db %1, 0
section .text
%endmacro
%macro FATAL_ERROR 1
mov dl, VGA_COLOR_WHITE
mov dH, VGA_COLOR_RED
call kernel_terminal_set_color
DEBUG_PRINT %1
call kernel_halt
%endmacro | 18.666667 | 34 | 0.785714 |
f0348185cb88efdb34b5de39fe352d2ee65ecef9 | 13,977 | py | Python | nssrc/com/citrix/netscaler/nitro/resource/config/snmp/snmpmib.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/snmp/snmpmib.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/snmp/snmpmib.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class snmpmib(base_resource) :
""" Configuration for SNMP mib resource. """
def __init__(self) :
self._contact = None
self._name = None
self._location = None
self._customid = None
self._ownernode = None
self._sysdesc = None
self._sysuptime = None
self._sysservices = None
self._sysoid = None
self.___count = None
@property
def contact(self) :
r"""Name of the administrator for this Citrix ADC. Along with the name, you can include information on how to contact this person, such as a phone number or an email address. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the Citrix ADC CLI:
If the information includes one or more spaces, enclose it in double or single quotation marks (for example, "my contact" or 'my contact').<br/>Default value: "WebMaster (default)"<br/>Minimum length = 1.
"""
try :
return self._contact
except Exception as e:
raise e
@contact.setter
def contact(self, contact) :
r"""Name of the administrator for this Citrix ADC. Along with the name, you can include information on how to contact this person, such as a phone number or an email address. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the Citrix ADC CLI:
If the information includes one or more spaces, enclose it in double or single quotation marks (for example, "my contact" or 'my contact').<br/>Default value: "WebMaster (default)"<br/>Minimum length = 1
"""
try :
self._contact = contact
except Exception as e:
raise e
@property
def name(self) :
r"""Name for this Citrix ADC. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters. You should choose a name that helps identify the Citrix ADC appliance.
The following requirement applies only to the Citrix ADC CLI:
If the name includes one or more spaces, enclose it in double or single quotation marks (for example, "my name" or 'my name').<br/>Default value: "NetScaler"<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name for this Citrix ADC. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters. You should choose a name that helps identify the Citrix ADC appliance.
The following requirement applies only to the Citrix ADC CLI:
If the name includes one or more spaces, enclose it in double or single quotation marks (for example, "my name" or 'my name').<br/>Default value: "NetScaler"<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def location(self) :
r"""Physical location of the Citrix ADC. For example, you can specify building name, lab number, and rack number. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the Citrix ADC CLI:
If the location includes one or more spaces, enclose it in double or single quotation marks (for example, "my location" or 'my location').<br/>Default value: "POP (default)"<br/>Minimum length = 1.
"""
try :
return self._location
except Exception as e:
raise e
@location.setter
def location(self, location) :
r"""Physical location of the Citrix ADC. For example, you can specify building name, lab number, and rack number. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the Citrix ADC CLI:
If the location includes one or more spaces, enclose it in double or single quotation marks (for example, "my location" or 'my location').<br/>Default value: "POP (default)"<br/>Minimum length = 1
"""
try :
self._location = location
except Exception as e:
raise e
@property
def customid(self) :
r"""Custom identification number for the Citrix ADC. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters. You should choose a custom identification that helps identify the Citrix ADC appliance.
The following requirement applies only to the Citrix ADC CLI:
If the ID includes one or more spaces, enclose it in double or single quotation marks (for example, "my ID" or 'my ID').<br/>Default value: "Default"<br/>Minimum length = 1.
"""
try :
return self._customid
except Exception as e:
raise e
@customid.setter
def customid(self, customid) :
r"""Custom identification number for the Citrix ADC. Can consist of 1 to 127 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters. You should choose a custom identification that helps identify the Citrix ADC appliance.
The following requirement applies only to the Citrix ADC CLI:
If the ID includes one or more spaces, enclose it in double or single quotation marks (for example, "my ID" or 'my ID').<br/>Default value: "Default"<br/>Minimum length = 1
"""
try :
self._customid = customid
except Exception as e:
raise e
@property
def ownernode(self) :
r"""ID of the cluster node for which we are setting the mib. This is a mandatory argument to set snmp mib on CLIP.<br/>Default value: -1<br/>Maximum length = 31.
"""
try :
return self._ownernode
except Exception as e:
raise e
@ownernode.setter
def ownernode(self, ownernode) :
r"""ID of the cluster node for which we are setting the mib. This is a mandatory argument to set snmp mib on CLIP.<br/>Default value: -1<br/>Maximum length = 31
"""
try :
self._ownernode = ownernode
except Exception as e:
raise e
@property
def sysdesc(self) :
r"""The description of the system.
"""
try :
return self._sysdesc
except Exception as e:
raise e
@property
def sysuptime(self) :
r"""The UP time of the system in 100th of a second.
"""
try :
return self._sysuptime
except Exception as e:
raise e
@property
def sysservices(self) :
r"""The services offered by the system.
"""
try :
return self._sysservices
except Exception as e:
raise e
@property
def sysoid(self) :
r"""The OID of the system's management system.
"""
try :
return self._sysoid
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(snmpmib_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.snmpmib
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.ownernode is not None :
return str(self.ownernode)
return None
except Exception as e :
raise e
@classmethod
def filter_update_parameters(cls, resource) :
r""" Use this function to create a resource with only update operation specific parameters.
"""
updateresource = snmpmib()
updateresource.contact = resource.contact
updateresource.name = resource.name
updateresource.location = resource.location
updateresource.customid = resource.customid
updateresource.ownernode = resource.ownernode
return updateresource
@classmethod
def update(cls, client, resource) :
r""" Use this API to update snmpmib.
"""
try :
if type(resource) is not list :
updateresource = cls.filter_update_parameters(resource)
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ snmpmib() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_update_parameters(resource[i])
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of snmpmib resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = snmpmib()
if type(resource) != type(unsetresource):
unsetresource.ownernode = resource
else :
unsetresource.ownernode = resource.ownernode
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ snmpmib() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].ownernode = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ snmpmib() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].ownernode = resource[i].ownernode
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the snmpmib resources that are configured on netscaler (on ncore deployment).
"""
try :
if not name :
obj = snmpmib()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_cluster(cls, client, name="", option_="") :
r""" Use this API to fetch all the snmpmib resources that are configured on netscaler.
"""
try :
if not name :
obj = snmpmib()
response = obj.get_resources(client, option_)
else :
if type(name) is not list :
if type(name) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name)))
obj = snmpmib()
obj.ownernode = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
if type(name[0]) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name[0])))
response = [snmpmib() for _ in range(len(name))]
obj = [snmpmib() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = snmpmib()
obj[i].ownernode = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of snmpmib resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = snmpmib()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the snmpmib resources configured on NetScaler.
"""
try :
obj = snmpmib()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of snmpmib resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = snmpmib()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class snmpmib_response(base_response) :
def __init__(self, length=1) :
self.snmpmib = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.snmpmib = [snmpmib() for _ in range(length)]
| 37.980978 | 387 | 0.701867 |
f07bf2d0976fb300df894c953453e434a3e9d26f | 314 | js | JavaScript | documentation/html/search/functions_6.js | Italo1994/Laboratorio2-IMD0030 | ebe3df127ec78914d2feca77f92bf398e61c023c | [
"MIT"
] | null | null | null | documentation/html/search/functions_6.js | Italo1994/Laboratorio2-IMD0030 | ebe3df127ec78914d2feca77f92bf398e61c023c | [
"MIT"
] | null | null | null | documentation/html/search/functions_6.js | Italo1994/Laboratorio2-IMD0030 | ebe3df127ec78914d2feca77f92bf398e61c023c | [
"MIT"
] | null | null | null | var searchData=
[
['_7ecubo',['~Cubo',['../classCubo.html#ada9f424711b825757aaf553f82949522',1,'Cubo']]],
['_7eesfera',['~Esfera',['../classEsfera.html#abf33ddbf68a9d97d90ddb1ee83d5c994',1,'Esfera']]],
['_7epiramide',['~Piramide',['../classPiramide.html#a1fdd51b594ba1fe14e094c7f1a6e5e79',1,'Piramide']]]
];
| 44.857143 | 104 | 0.710191 |
9c7c38ef52bdd39fc9c2d43a42f56df688d9e3e0 | 3,206 | js | JavaScript | addons/mail/static/src/models/notification_group_manager/notification_group_manager.js | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/mail/static/src/models/notification_group_manager/notification_group_manager.js | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/mail/static/src/models/notification_group_manager/notification_group_manager.js | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | odoo.define('mail/static/src/models/notification_group_manager/notification_group_manager.js', function (require) {
'use strict';
const { registerNewModel } = require('mail/static/src/model/model_core.js');
const { one2many } = require('mail/static/src/model/model_field.js');
function factory(dependencies) {
class NotificationGroupManager extends dependencies['mail.model'] {
//----------------------------------------------------------------------
// Public
//----------------------------------------------------------------------
computeGroups() {
for (const group of this.groups) {
group.delete();
}
const groups = [];
// TODO batch insert, better logic task-2258605
this.env.messaging.currentPartner.failureNotifications.forEach(notification => {
const thread = notification.message.originThread;
// Notifications are grouped by model and notification_type.
// Except for channel where they are also grouped by id because
// we want to open the actual channel in discuss or chat window
// and not its kanban/list/form view.
const channelId = thread.model === 'mail.channel' ? thread.id : null;
const id = `${thread.model}/${channelId}/${notification.notification_type}`;
const group = this.env.models['mail.notification_group'].insert({
id,
notification_type: notification.notification_type,
res_model: thread.model,
res_model_name: thread.model_name,
});
group.update({ notifications: [['link', notification]] });
// keep res_id only if all notifications are for the same record
// set null if multiple records are present in the group
let res_id = group.res_id;
if (group.res_id === undefined) {
res_id = thread.id;
} else if (group.res_id !== thread.id) {
res_id = null;
}
// keep only the most recent date from all notification messages
let date = group.date;
if (!date) {
date = notification.message.date;
} else {
date = moment.max(group.date, notification.message.date);
}
group.update({
date,
res_id,
});
// avoid linking the same group twice when adding a notification
// to an existing group
if (!groups.includes(group)) {
groups.push(group);
}
});
this.update({ groups: [['link', groups]] });
}
}
NotificationGroupManager.fields = {
groups: one2many('mail.notification_group'),
};
NotificationGroupManager.modelName = 'mail.notification_group_manager';
return NotificationGroupManager;
}
registerNewModel('mail.notification_group_manager', factory);
});
| 41.102564 | 115 | 0.524953 |
93d24b8bbb90d1c4f02414936c108cfb3616bb05 | 4,431 | asm | Assembly | Source/Levels/L1212.asm | AbePralle/FGB | 52f004b8d9d4091a2a242a012dc8c1f90d4c160d | [
"MIT"
] | null | null | null | Source/Levels/L1212.asm | AbePralle/FGB | 52f004b8d9d4091a2a242a012dc8c1f90d4c160d | [
"MIT"
] | null | null | null | Source/Levels/L1212.asm | AbePralle/FGB | 52f004b8d9d4091a2a242a012dc8c1f90d4c160d | [
"MIT"
] | null | null | null | ; L1212.asm Crouton Homeworld 1
; Generated 04.19.2001 by mlevel
; Modified 04.19.2001 by Abe Pralle
INCLUDE "Source/Defs.inc"
INCLUDE "Source/Levels.inc"
NUM_DIALOG EQU 3
HFENCE_INDEX EQU 18
HULK_INDEX EQU 43
GOBLIN_INDEX EQU 44
VAR_OVERHEARD EQU 0
STATE_ANYTIME EQU 0
STATE_MAKESURE EQU 1
STATE_SUCKS EQU 2
;---------------------------------------------------------------------
SECTION "Level1212Section",ROMX
;---------------------------------------------------------------------
dialog:
L1212_anytime_gtx:
INCBIN "Data/Dialog/Talk/L1212_anytime.gtx"
L1212_yeah_gtx:
INCBIN "Data/Dialog/Talk/L1212_yeah.gtx"
L1212_makesure_gtx:
INCBIN "Data/Dialog/Talk/L1212_makesure.gtx"
L1212_suckstobehim_gtx:
INCBIN "Data/Dialog/Talk/L1212_suckstobehim.gtx"
L1212_Contents::
DW L1212_Load
DW L1212_Init
DW L1212_Check
DW L1212_Map
;---------------------------------------------------------------------
; Load
;---------------------------------------------------------------------
L1212_Load:
DW ((L1212_LoadFinished - L1212_Load2)) ;size
L1212_Load2:
call ParseMap
ret
L1212_LoadFinished:
;---------------------------------------------------------------------
; Map
;---------------------------------------------------------------------
L1212_Map:
INCBIN "Data/Levels/L1212_crouton_hw1.lvl"
;---------------------------------------------------------------------
; Init
;---------------------------------------------------------------------
L1212_Init:
DW ((L1212_InitFinished - L1212_Init2)) ;size
L1212_Init2:
ld hl,$1212
call SetJoinMap
call SetRespawnMap
call State0To1
ld a,BANK(fgbwar_gbm)
ld hl,fgbwar_gbm
call InitMusic
STDSETUPDIALOG
xor a
ld [levelVars+VAR_OVERHEARD],a
ret
L1212_InitFinished:
;---------------------------------------------------------------------
; Check
;---------------------------------------------------------------------
L1212_Check:
DW ((L1212_CheckFinished - L1212_Check2)) ;size
L1212_Check2:
call ((.animateFence-L1212_Check2)+levelCheckRAM)
call ((.checkDialog-L1212_Check2)+levelCheckRAM)
ret
.checkDialog
ld a,[levelVars+VAR_OVERHEARD]
or a
ret nz
ld hl,((.checkHeroInZone-L1212_Check2)+levelCheckRAM)
xor a
call CheckEachHero
ret
.checkHeroInZone
ld c,a
call GetFirst
call GetCurZone
cp 2
jr z,.inZone
xor a
ret
.inZone
;increment dialog number
ld hl,mapState
ld a,[hl]
push af
inc a
cp (NUM_DIALOG+1)
jr c,.dialogNumOkay
ld a,1
.dialogNumOkay
ld [hl],a
pop af
ld hl,((.dialogLookup-L1212_Check2)+levelCheckRAM)
call Lookup16
push hl
call MakeIdle
pop hl
ld de,((.afterDialog-L1212_Check2)+levelCheckRAM)
call SetDialogSkip
ld d,h
ld e,l
call SetSpeakerFromHeroIndex
ld c,HULK_INDEX
call ShowDialogAtTop
call ClearDialog
ld c,GOBLIN_INDEX
ld de,L1212_yeah_gtx
call ShowDialogAtBottom
.afterDialog
call ClearDialogSkipForward
call MakeNonIdle
ld a,1
ld [levelVars+VAR_OVERHEARD],a
ret
.animateFence
ldio a,[updateTimer]
rrca
and 3
ld b,a
ld hl,bgTileMap+HFENCE_INDEX
ld d,HFENCE_INDEX
call ((.animateFourFrames-L1212_Check2)+levelCheckRAM)
ret
.animateFourFrames
ld c,4
.animateFourFrames_loop
ld a,b
add c
and 3
add d
ld [hl+],a
dec c
jr nz,.animateFourFrames_loop
ret
.dialogLookup
DW 0,L1212_anytime_gtx,L1212_makesure_gtx,L1212_suckstobehim_gtx
L1212_CheckFinished:
PRINT "1212 Script Sizes (Load/Init/Check) (of $500): "
PRINT (L1212_LoadFinished - L1212_Load2)
PRINT " / "
PRINT (L1212_InitFinished - L1212_Init2)
PRINT " / "
PRINT (L1212_CheckFinished - L1212_Check2)
PRINT "\n"
| 23.321053 | 70 | 0.493568 |
7f2927b4a098e685cee72dfcda473db987e67b5f | 9,507 | rs | Rust | src/sys/resource.rs | rtic-scope/nix | 916bbfac03b58f181fc971b59a1d61c601f3944c | [
"MIT"
] | 1,719 | 2016-02-12T17:14:47.000Z | 2022-03-26T11:26:28.000Z | src/sys/resource.rs | rtic-scope/nix | 916bbfac03b58f181fc971b59a1d61c601f3944c | [
"MIT"
] | 1,386 | 2016-02-12T16:56:50.000Z | 2022-03-27T20:16:27.000Z | src/sys/resource.rs | rtic-scope/nix | 916bbfac03b58f181fc971b59a1d61c601f3944c | [
"MIT"
] | 524 | 2016-02-12T21:06:59.000Z | 2022-03-16T13:39:17.000Z | //! Configure the process resource limits.
use cfg_if::cfg_if;
use crate::errno::Errno;
use crate::Result;
pub use libc::rlim_t;
use std::mem;
cfg_if! {
if #[cfg(all(target_os = "linux", target_env = "gnu"))]{
use libc::{__rlimit_resource_t, rlimit, RLIM_INFINITY};
}else if #[cfg(any(
target_os = "freebsd",
target_os = "openbsd",
target_os = "netbsd",
target_os = "macos",
target_os = "ios",
target_os = "android",
target_os = "dragonfly",
all(target_os = "linux", not(target_env = "gnu"))
))]{
use libc::{c_int, rlimit, RLIM_INFINITY};
}
}
libc_enum! {
/// Types of process resources.
///
/// The Resource enum is platform dependent. Check different platform
/// manuals for more details. Some platform links have been provided for
/// easier reference (non-exhaustive).
///
/// * [Linux](https://man7.org/linux/man-pages/man2/getrlimit.2.html)
/// * [FreeBSD](https://www.freebsd.org/cgi/man.cgi?query=setrlimit)
/// * [NetBSD](https://man.netbsd.org/setrlimit.2)
// linux-gnu uses u_int as resource enum, which is implemented in libc as
// well.
//
// https://gcc.gnu.org/legacy-ml/gcc/2015-08/msg00441.html
// https://github.com/rust-lang/libc/blob/master/src/unix/linux_like/linux/gnu/mod.rs
#[cfg_attr(all(target_os = "linux", target_env = "gnu"), repr(u32))]
#[cfg_attr(any(
target_os = "freebsd",
target_os = "openbsd",
target_os = "netbsd",
target_os = "macos",
target_os = "ios",
target_os = "android",
target_os = "dragonfly",
all(target_os = "linux", not(target_env = "gnu"))
), repr(i32))]
#[non_exhaustive]
pub enum Resource {
#[cfg(not(any(target_os = "freebsd", target_os = "netbsd", target_os = "openbsd")))]
#[cfg_attr(docsrs, doc(cfg(all())))]
/// The maximum amount (in bytes) of virtual memory the process is
/// allowed to map.
RLIMIT_AS,
/// The largest size (in bytes) core(5) file that may be created.
RLIMIT_CORE,
/// The maximum amount of cpu time (in seconds) to be used by each
/// process.
RLIMIT_CPU,
/// The maximum size (in bytes) of the data segment for a process
RLIMIT_DATA,
/// The largest size (in bytes) file that may be created.
RLIMIT_FSIZE,
/// The maximum number of open files for this process.
RLIMIT_NOFILE,
/// The maximum size (in bytes) of the stack segment for a process.
RLIMIT_STACK,
#[cfg(target_os = "freebsd")]
#[cfg_attr(docsrs, doc(cfg(all())))]
/// The maximum number of kqueues this user id is allowed to create.
RLIMIT_KQUEUES,
#[cfg(any(target_os = "android", target_os = "linux"))]
#[cfg_attr(docsrs, doc(cfg(all())))]
/// A limit on the combined number of flock locks and fcntl leases that
/// this process may establish.
RLIMIT_LOCKS,
#[cfg(any(
target_os = "android",
target_os = "freebsd",
target_os = "openbsd",
target_os = "linux",
target_os = "netbsd"
))]
#[cfg_attr(docsrs, doc(cfg(all())))]
/// The maximum size (in bytes) which a process may lock into memory
/// using the mlock(2) system call.
RLIMIT_MEMLOCK,
#[cfg(any(target_os = "android", target_os = "linux"))]
#[cfg_attr(docsrs, doc(cfg(all())))]
/// A limit on the number of bytes that can be allocated for POSIX
/// message queues for the real user ID of the calling process.
RLIMIT_MSGQUEUE,
#[cfg(any(target_os = "android", target_os = "linux"))]
#[cfg_attr(docsrs, doc(cfg(all())))]
/// A ceiling to which the process's nice value can be raised using
/// setpriority or nice.
RLIMIT_NICE,
#[cfg(any(
target_os = "android",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd",
target_os = "linux",
))]
#[cfg_attr(docsrs, doc(cfg(all())))]
/// The maximum number of simultaneous processes for this user id.
RLIMIT_NPROC,
#[cfg(target_os = "freebsd")]
#[cfg_attr(docsrs, doc(cfg(all())))]
/// The maximum number of pseudo-terminals this user id is allowed to
/// create.
RLIMIT_NPTS,
#[cfg(any(target_os = "android",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd",
target_os = "linux",
))]
#[cfg_attr(docsrs, doc(cfg(all())))]
/// When there is memory pressure and swap is available, prioritize
/// eviction of a process' resident pages beyond this amount (in bytes).
RLIMIT_RSS,
#[cfg(any(target_os = "android", target_os = "linux"))]
#[cfg_attr(docsrs, doc(cfg(all())))]
/// A ceiling on the real-time priority that may be set for this process
/// using sched_setscheduler and sched_set‐ param.
RLIMIT_RTPRIO,
#[cfg(any(target_os = "linux"))]
#[cfg_attr(docsrs, doc(cfg(all())))]
/// A limit (in microseconds) on the amount of CPU time that a process
/// scheduled under a real-time scheduling policy may con‐ sume without
/// making a blocking system call.
RLIMIT_RTTIME,
#[cfg(any(target_os = "android", target_os = "linux"))]
#[cfg_attr(docsrs, doc(cfg(all())))]
/// A limit on the number of signals that may be queued for the real
/// user ID of the calling process.
RLIMIT_SIGPENDING,
#[cfg(any(target_os = "freebsd", target_os = "dragonfly"))]
#[cfg_attr(docsrs, doc(cfg(all())))]
/// The maximum size (in bytes) of socket buffer usage for this user.
RLIMIT_SBSIZE,
#[cfg(target_os = "freebsd")]
#[cfg_attr(docsrs, doc(cfg(all())))]
/// The maximum size (in bytes) of the swap space that may be reserved
/// or used by all of this user id's processes.
RLIMIT_SWAP,
#[cfg(target_os = "freebsd")]
#[cfg_attr(docsrs, doc(cfg(all())))]
/// An alias for RLIMIT_AS.
RLIMIT_VMEM,
}
}
/// Get the current processes resource limits
///
/// A value of `None` indicates the value equals to `RLIM_INFINITY` which means
/// there is no limit.
///
/// # Parameters
///
/// * `resource`: The [`Resource`] that we want to get the limits of.
///
/// # Examples
///
/// ```
/// # use nix::sys::resource::{getrlimit, Resource};
///
/// let (soft_limit, hard_limit) = getrlimit(Resource::RLIMIT_NOFILE).unwrap();
/// println!("current soft_limit: {:?}", soft_limit);
/// println!("current hard_limit: {:?}", hard_limit);
/// ```
///
/// # References
///
/// [getrlimit(2)](https://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html#tag_16_215)
///
/// [`Resource`]: enum.Resource.html
pub fn getrlimit(resource: Resource) -> Result<(Option<rlim_t>, Option<rlim_t>)> {
let mut old_rlim = mem::MaybeUninit::<rlimit>::uninit();
cfg_if! {
if #[cfg(all(target_os = "linux", target_env = "gnu"))]{
let res = unsafe { libc::getrlimit(resource as __rlimit_resource_t, old_rlim.as_mut_ptr()) };
}else{
let res = unsafe { libc::getrlimit(resource as c_int, old_rlim.as_mut_ptr()) };
}
}
Errno::result(res).map(|_| {
let rlimit { rlim_cur, rlim_max } = unsafe { old_rlim.assume_init() };
(Some(rlim_cur), Some(rlim_max))
})
}
/// Set the current processes resource limits
///
/// # Parameters
///
/// * `resource`: The [`Resource`] that we want to set the limits of.
/// * `soft_limit`: The value that the kernel enforces for the corresponding
/// resource. Note: `None` input will be replaced by constant `RLIM_INFINITY`.
/// * `hard_limit`: The ceiling for the soft limit. Must be lower or equal to
/// the current hard limit for non-root users. Note: `None` input will be
/// replaced by constant `RLIM_INFINITY`.
///
/// > Note: for some os (linux_gnu), setting hard_limit to `RLIM_INFINITY` can
/// > results `EPERM` Error. So you will need to set the number explicitly.
///
/// # Examples
///
/// ```
/// # use nix::sys::resource::{setrlimit, Resource};
///
/// let soft_limit = Some(512);
/// let hard_limit = Some(1024);
/// setrlimit(Resource::RLIMIT_NOFILE, soft_limit, hard_limit).unwrap();
/// ```
///
/// # References
///
/// [setrlimit(2)](https://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html#tag_16_215)
///
/// [`Resource`]: enum.Resource.html
///
/// Note: `setrlimit` provides a safe wrapper to libc's `setrlimit`.
pub fn setrlimit(
resource: Resource,
soft_limit: Option<rlim_t>,
hard_limit: Option<rlim_t>,
) -> Result<()> {
let new_rlim = rlimit {
rlim_cur: soft_limit.unwrap_or(RLIM_INFINITY),
rlim_max: hard_limit.unwrap_or(RLIM_INFINITY),
};
cfg_if! {
if #[cfg(all(target_os = "linux", target_env = "gnu"))]{
let res = unsafe { libc::setrlimit(resource as __rlimit_resource_t, &new_rlim as *const rlimit) };
}else{
let res = unsafe { libc::setrlimit(resource as c_int, &new_rlim as *const rlimit) };
}
}
Errno::result(res).map(drop)
}
| 35.875472 | 110 | 0.594194 |
2f6865f8fd9f292b7ef125defd6cd4316114d8c8 | 2,508 | rs | Rust | rust/src/bin/remove_duplicates_from_sorted_array.rs | senofsky/leetcode | 8cde5d0fbe781a7e5b3e9859ea37faa5fd6e6fec | [
"MIT"
] | null | null | null | rust/src/bin/remove_duplicates_from_sorted_array.rs | senofsky/leetcode | 8cde5d0fbe781a7e5b3e9859ea37faa5fd6e6fec | [
"MIT"
] | null | null | null | rust/src/bin/remove_duplicates_from_sorted_array.rs | senofsky/leetcode | 8cde5d0fbe781a7e5b3e9859ea37faa5fd6e6fec | [
"MIT"
] | null | null | null | // Given a sorted array nums, remove the duplicates in-place such that each
// element appear only once and return the new length.
//
// Do not allocate extra space for another array, you must do this by modifying
// the input array in-place with O(1) extra memory.
//
// Example 1:
//
// Given nums = [1,1,2],
//
// Your function should return length = 2, with the first two elements of nums
// being 1 and 2 respectively.
//
// It doesn't matter what you leave beyond the returned length.
//
// Example 2:
//
// Given nums = [0,0,1,1,1,2,2,3,3,4],
//
// Your function should return length = 5, with the first five elements of nums
// being modified to 0, 1, 2, 3, and 4 respectively.
//
// It doesn't matter what values are set beyond the returned length.
//
// Clarification:
//
// Confused why the returned value is an integer but your answer is an array?
//
// Note that the input array is passed in by reference, which means modification
// to the input array will be known to the caller as well.
//
// Internally you can think of this:
//
// // nums is passed in by reference. (i.e., without making a copy)
// int len = removeDuplicates(nums);
//
// // any modification to nums in your function would be known by the caller.
// // using the length returned by your function, it prints the first len
// // elements.
// for (int i = 0; i < len; i++) {
// print(nums[i]);
// }
// TODO: Compare against other submissions
// pub fn remove_duplicates(nums: &mut Vec<i32>) -> i32 {
// if nums.is_empty() {
// return 0;
// }
//
// let array_length = nums.len();
// let mut new_index = 0;
//
// for index in 0..array_length {
// if nums[new_index] != nums[index] {
// new_index += 1;
// nums[new_index] = nums[index];
// }
// }
//
// (new_index + 1) as i32
fn remove_duplicates(nums: &mut Vec<i32>) -> i32 {
if nums.is_empty() {
return 0;
}
let array_length = nums.len();
let mut new_index = 1;
for index in 1..array_length {
if nums[index] != nums[index - 1] {
nums[new_index] = nums[index];
new_index += 1;
}
}
new_index as i32
}
fn main() {
let mut nums = vec![1, 1, 2];
let new_length = remove_duplicates(&mut nums);
println!("{:?}, length = {}", nums, new_length);
let mut nums = vec![0, 0, 1, 1, 1, 2, 2, 3, 3, 4];
let new_length = remove_duplicates(&mut nums);
println!("{:?}, length = {}", nums, new_length);
}
| 28.179775 | 80 | 0.610447 |
afabdc8b173466c9869f608310797b3a08fea174 | 5,008 | rb | Ruby | mrblib/mrb_keyboard.rb | chronno/mruby-mrgss | 7f29e667576c8543ec2da274278da70ff8b9605b | [
"MIT"
] | 1 | 2015-11-25T08:29:30.000Z | 2015-11-25T08:29:30.000Z | mrblib/mrb_keyboard.rb | chronno/mruby-mrgss | 7f29e667576c8543ec2da274278da70ff8b9605b | [
"MIT"
] | null | null | null | mrblib/mrb_keyboard.rb | chronno/mruby-mrgss | 7f29e667576c8543ec2da274278da70ff8b9605b | [
"MIT"
] | 1 | 2015-11-26T22:18:07.000Z | 2015-11-26T22:18:07.000Z | #============================================================================
# ** ::MRGSS
#----------------------------------------------------------------------------
# This module contains all MRGSS Modules.
#============================================================================
module MRGSS
#------------------------------------------------------------------------
# * Keyboard
#------------------------------------------------------------------------
# This Class represents a the keyboard
#------------------------------------------------------------------------
module Keyboard
#----------------------------------------------------------------------
# Status buffers
#----------------------------------------------------------------------
@trigger = Array.new(512).fill(false)
@release = Array.new(512).fill(false)
@press = Array.new(512).fill(false)
@repeat = Array.new(512).fill(false)
@time = Array.new(512).fill(0)
#----------------------------------------------------------------------
# triggered?
#----------------------------------------------------------------------
def self.trigger?(key)
return @trigger[key]
end
#----------------------------------------------------------------------
# pressed?
#----------------------------------------------------------------------
def self.press?(key)
return @press[key]
end
#----------------------------------------------------------------------
# keyboard status update
#----------------------------------------------------------------------
def self.update(key, action)
@trigger[key] = action == 1 && @time[key] == 0
@release[key] = action == 0
@time[key] = action == 1 ? @time[key] + 1 : 0
@repeat[key] = action == 2 && @time[key] % 2 == 0
@press[key] = action != 0 && @time[key] > 1
end
#----------------------------------------------------------------------
# update character input
#----------------------------------------------------------------------
def self.method_missing(name, *args, &block)
p args.pack("U").to_s
end
#----------------------------------------------------------------------
# Keys Constants
#----------------------------------------------------------------------
KEY_UNKNOWN = -1
KEY_SPACE = 32
KEY_APOSTROPHE = 39
KEY_COMMA = 44
KEY_MINUS = 45
KEY_PERIOD = 46
KEY_SLASH = 47
KEY_0 = 48
KEY_1 = 49
KEY_2 = 50
KEY_3 = 51
KEY_4 = 52
KEY_5 = 53
KEY_6 = 54
KEY_7 = 55
KEY_8 = 56
KEY_9 = 57
KEY_SEMICOLON = 59
KEY_EQUAL = 61
KEY_A = 65
KEY_B = 66
KEY_C = 67
KEY_D = 68
KEY_E = 69
KEY_F = 70
KEY_G = 71
KEY_H = 72
KEY_I = 73
KEY_J = 74
KEY_K = 75
KEY_L = 76
KEY_M = 77
KEY_N = 78
KEY_O = 79
KEY_P = 80
KEY_Q = 81
KEY_R = 82
KEY_S = 83
KEY_T = 84
KEY_U = 85
KEY_V = 86
KEY_W = 87
KEY_X = 88
KEY_Y = 89
KEY_Z = 90
KEY_LEFT_BRACKET = 91
KEY_BACKSLASH = 92
KEY_RIGHT_BRACKET = 93
KEY_GRAVE_ACCENT = 96
KEY_WORLD_1 = 161
KEY_WORLD_2 = 162
KEY_ESCAPE = 256
KEY_ENTER = 257
KEY_TAB = 258
KEY_BACKSPACE = 259
KEY_INSERT = 260
KEY_DELETE = 261
KEY_RIGHT = 262
KEY_LEFT = 263
KEY_DOWN = 264
KEY_UP = 265
KEY_PAGE_UP = 266
KEY_PAGE_DOWN = 267
KEY_HOME = 268
KEY_END = 269
KEY_CAPS_LOCK = 280
KEY_SCROLL_LOCK = 281
KEY_NUM_LOCK = 282
KEY_PRINT_SCREEN = 283
KEY_PAUSE = 284
KEY_F1 = 290
KEY_F2 = 291
KEY_F3 = 292
KEY_F4 = 293
KEY_F5 = 294
KEY_F6 = 295
KEY_F7 = 296
KEY_F8 = 297
KEY_F9 = 298
KEY_F10 = 299
KEY_F11 = 300
KEY_F12 = 301
KEY_F13 = 302
KEY_F14 = 303
KEY_F15 = 304
KEY_F16 = 305
KEY_F17 = 306
KEY_F18 = 307
KEY_F19 = 308
KEY_F20 = 309
KEY_F21 = 310
KEY_F22 = 311
KEY_F23 = 312
KEY_F24 = 313
KEY_F25 = 314
KEY_KP_0 = 320
KEY_KP_1 = 321
KEY_KP_2 = 322
KEY_KP_3 = 323
KEY_KP_4 = 324
KEY_KP_5 = 325
KEY_KP_6 = 326
KEY_KP_7 = 327
KEY_KP_8 = 328
KEY_KP_9 = 329
KEY_KP_DECIMAL = 330
KEY_KP_DIVIDE = 331
KEY_KP_MULTIPLY = 332
KEY_KP_SUBTRACT = 333
KEY_KP_ADD = 334
KEY_KP_ENTER = 335
KEY_KP_EQUAL = 336
KEY_LEFT_SHIFT = 340
KEY_LEFT_CONTROL = 341
KEY_LEFT_ALT = 342
KEY_LEFT_SUPER = 343
KEY_RIGHT_SHIFT = 344
KEY_RIGHT_CONTROL = 345
KEY_RIGHT_ALT = 346
KEY_RIGHT_SUPER = 347
KEY_MENU = 348
KEY_LAST = KEY_MENU
end
end | 28.617143 | 77 | 0.381989 |
4a55f20e2894a0e8e39d1a5bb7bdebae8de54931 | 4,402 | js | JavaScript | src/utils.js | myurch/mock-rel | 7fa490bc6c84ac6cb1e9cea675dcd00d5a40329d | [
"MIT"
] | 3 | 2019-10-08T19:11:28.000Z | 2020-01-07T21:19:45.000Z | src/utils.js | myurch/mock-rel | 7fa490bc6c84ac6cb1e9cea675dcd00d5a40329d | [
"MIT"
] | 1 | 2021-05-10T11:29:09.000Z | 2021-05-10T11:29:09.000Z | src/utils.js | myurch/mock-rel | 7fa490bc6c84ac6cb1e9cea675dcd00d5a40329d | [
"MIT"
] | null | null | null | import * as R from 'ramda'
import {BACKREF} from './consts'
export const createField = ({type, modelName=null, backref=null}) => {
return({
type: type,
backref: backref,
modelName: modelName,
})
}
// state can be null
export const handle_add_all_models = ({modelName, data_list, id_automatic, state}) => {
if (!(typeof(modelName) === 'string')){
throw TypeError('mock-rel must take String for modelName')
}
if (id_automatic === undefined) {
id_automatic = true
}
let table = {}
if (id_automatic) {
let idx = resolveNextid({state, modelName})
R.forEach((obj)=> {
obj.id = idx
table = R.assoc(idx.toString(), obj, table)
idx ++
}, data_list)
} else {
R.forEach((obj)=> {
table = R.assoc(R.prop('id', obj).toString(), obj, table)
}, data_list)
}
return table
}
let objMax = (obj) => {
if(obj) {
let keys = Object.keys(obj);
let arr = keys.map(key => obj[key]);
if(arr.length > 0) {
arr.sort(function(a, b){return a-b})
return (arr[arr.length - 1]) + 1;
}
}
return 0;
}
const resolveNextid = ({state, modelName, data, schema}) => {
const customResolver = R.path([modelName, 'id_resolver'], schema)
if (customResolver) {
return customResolver({state, modelName, data})
} else {
// look at all id's already stored in the state; return max + 1
const ids = R.pluck(['id'], R.propOr({}, modelName, state))
return objMax(ids)
}
}
export const handle_backref = ({schema, modelName, state, data, nextId}) => {
if (schema) {
R.map(fieldName => {
const type = R.path([modelName, 'fields', fieldName], schema)
if (R.prop('type', type) === BACKREF) {
R.map(relId => {
// make sure id's are strings if going into assocPath()
const relPath = [
R.prop('modelName', type), // modelName
relId.toString(), // id
R.prop('backref', type), // fieldName
]
const modelExists = R.pathOr(false,
R.slice(0, -1, relPath),
state
)
if ( typeof(modelExists) === typeof(false) ) {
throw TypeError(`Backref obj does not exist for model: ${modelName}, field: ${fieldName}`)
} else {
state = R.assocPath(relPath, nextId, state)
}
}, R.prop(fieldName, data))
}
}, Object.keys(data))
}
return state
}
export const handle_add_model = ({state, modelName, data, nextId, schema}) => {
if (!(typeof(modelName) === 'string')){
throw TypeError('mock-rel must take String for modelName')
}
if (nextId === undefined){
nextId = resolveNextid({state, modelName, data, schema})
}
// add associated data
const existingRow = R.path([modelName, nextId.toString()], state)
let row = R.assocPath(['id'], nextId, data)
if (existingRow) {
row = R.mergeDeepLeft(
row,
existingRow
)
}
state = R.assocPath([modelName, nextId.toString()], row, state)
return {state, nextId}
}
export const checkSchemaIntegrity = (schema) => {
if (schema) {
R.mapObjIndexed((num, key, obj) => {
if (!(R.prop(['fields'], num))) {
throw TypeError('mock-rel schema integrity error. Every model should have "fields" key')
}
}, schema)
}
}
// return boolean true if passes
export const checkValidation = (state, action) => {
const modelName = R.path(['payload', 'modelName'], action)
const validation = R.path(['payload', 'schema', modelName, 'validation'], action)
if (validation) {
return validation({state, action})
}
return true
}
// return boolean true if passes
export const checkPreAction = (state, action) => {
const modelName = R.path(['payload', 'modelName'], action)
const preAction = R.path(['payload', 'schema', modelName, 'preAction'], action)
if (preAction) {
return preAction({state, action})
}
return { state, action }
}
| 31.219858 | 114 | 0.534075 |
af2ad448e88ea996a981a72136e655cc9278a886 | 8,389 | rb | Ruby | spec/sidekiq_ecs_scaler/configuration_spec.rb | shoma07/sidekiq-ecs-scaler | 12b6a5fa004e4cf96b7be4f0d0240bf6b9fbc4b7 | [
"MIT"
] | null | null | null | spec/sidekiq_ecs_scaler/configuration_spec.rb | shoma07/sidekiq-ecs-scaler | 12b6a5fa004e4cf96b7be4f0d0240bf6b9fbc4b7 | [
"MIT"
] | null | null | null | spec/sidekiq_ecs_scaler/configuration_spec.rb | shoma07/sidekiq-ecs-scaler | 12b6a5fa004e4cf96b7be4f0d0240bf6b9fbc4b7 | [
"MIT"
] | null | null | null | # frozen_string_literal: true
RSpec.describe SidekiqEcsScaler::Configuration do
let(:configuration) { described_class.new }
describe "#enabled" do
subject { configuration.enabled }
context "when default" do
it { is_expected.to eq true }
end
end
describe "#enabled=" do
subject(:write) { configuration.enabled = enabled }
context "when enabled is true" do
let(:enabled) { true }
it do
expect { write }.not_to change(configuration, :enabled).from(true)
end
end
context "when enabled is false" do
let(:enabled) { false }
it do
expect { write }.to change(configuration, :enabled).to(false)
end
end
context "when enabled is invalid" do
let(:enabled) { "true" }
it do
expect { write }.to raise_error(ArgumentError)
end
end
end
describe "#logger" do
subject { configuration.logger }
context "when default" do
it { is_expected.to eq Sidekiq.logger }
end
end
describe "#logger=" do
subject(:write) { configuration.logger = logger }
context "when logger is valid" do
let(:logger) { Logger.new(StringIO.new) }
it do
expect { write }.to change(configuration, :logger).to(logger)
end
end
context "when logger is invalid" do
let(:logger) { StringIO.new }
it do
expect { write }.to raise_error(ArgumentError)
end
end
end
describe "#queue_name" do
subject { configuration.queue_name }
context "when default" do
it { is_expected.to eq "default" }
end
end
describe "#queue_name=" do
subject(:write) { configuration.queue_name = queue_name }
context "when argument is valid" do
let(:queue_name) { "highest" }
it do
expect { write }.to change(configuration, :queue_name).to("highest")
end
end
context "when argument is invalid" do
let(:queue_name) { nil }
it do
expect { write }.to raise_error(ArgumentError)
end
end
end
describe "#min_count" do
subject { configuration.min_count }
context "when default" do
it { is_expected.to eq 1 }
end
end
describe "#min_count=" do
subject(:write) { configuration.min_count = min_count }
context "when argument is valid and less than max_count" do
let(:min_count) { 2 }
before do
configuration.max_count = 3
end
it do
expect { write }.to change(configuration, :min_count).to(2)
end
it do
expect { write }.not_to change(configuration, :max_count)
end
end
context "when argument is valid and grater than max_count" do
let(:min_count) { 2 }
before do
configuration.max_count = 1
end
it do
expect { write }.to change(configuration, :min_count).to(2)
end
it do
expect { write }.to change(configuration, :max_count).to(2)
end
end
context "when argument is invalid" do
let(:min_count) { 0 }
it do
expect { write }.to raise_error(ArgumentError)
end
end
end
describe "#max_count" do
subject { configuration.max_count }
context "when default" do
it { is_expected.to eq 1 }
end
end
describe "#max_count=" do
subject(:write) { configuration.max_count = max_count }
context "when argument is valid and grater than min_count" do
let(:max_count) { 2 }
before do
configuration.min_count = 1
end
it do
expect { write }.to change(configuration, :max_count).to(2)
end
it do
expect { write }.not_to change(configuration, :min_count)
end
end
context "when argument is valid and less than min_count" do
let(:max_count) { 2 }
before do
configuration.min_count = 3
end
it do
expect { write }.to change(configuration, :max_count).to(2)
end
it do
expect { write }.to change(configuration, :min_count).to(2)
end
end
context "when argument is invalid" do
let(:max_count) { 0 }
it do
expect { write }.to raise_error(ArgumentError)
end
end
end
describe "#step_count" do
subject { configuration.step_count }
context "when default" do
it { is_expected.to eq 1 }
end
end
describe "#step_count=" do
subject(:write) { configuration.step_count = step_count }
context "when argument is valid" do
let(:step_count) { 2 }
it do
expect { write }.to change(configuration, :step_count).to(2)
end
end
context "when argument is invalid" do
let(:step_count) { 0 }
it do
expect { write }.to raise_error(ArgumentError)
end
end
end
describe "#max_latency" do
subject { configuration.max_latency }
context "when default" do
it { is_expected.to eq 3600 }
end
end
describe "#max_latency=" do
subject(:write) { configuration.max_latency = max_latency }
context "when argument is valid" do
let(:max_latency) { 7200 }
it do
expect { write }.to change(configuration, :max_latency).to(7200)
end
end
context "when argument is less than max count" do
let(:max_latency) { 10 }
before do
configuration.max_count = 20
end
it do
expect { write }.to raise_error(ArgumentError)
end
end
end
describe "#ecs_client" do
subject { configuration.ecs_client }
context "when default" do
it { is_expected.to be_instance_of(::Aws::ECS::Client) }
end
end
describe "#ecs_client=" do
subject(:write) { configuration.ecs_client = ecs_client }
context "when argument is kind of Aws::ECS::Client" do
let(:ecs_client) do
Class.new(::Aws::ECS::Client) do
def initialize
super(stub_responses: true)
end
end.new
end
it do
write
expect(configuration.ecs_client).to be_kind_of(::Aws::ECS::Client)
end
end
context "when argument is not kind of Aws::ECS::Client" do
let(:ecs_client) do
Class.new.new
end
it do
expect { write }.to raise_error(ArgumentError)
end
end
end
describe "#latency_per_step_count" do
subject { configuration.latency_per_step_count }
context "when step_count is 1" do
before do
configuration.min_count = 2
configuration.max_count = 20
configuration.max_latency = 3600
end
it { is_expected.to eq 189 }
end
context "when step_count is 2" do
before do
configuration.min_count = 2
configuration.max_count = 20
configuration.step_count = 2
configuration.max_latency = 3600
end
it { is_expected.to eq 360 }
end
end
describe "#task_meta!" do
subject(:call) { configuration.task_meta! }
context "when task_meta is present" do
before do
allow(configuration).to receive(:task_meta).and_return(
SidekiqEcsScaler::TaskMetaV4.new({ "Cluster" => "local", "TaskARN" => "ARN" })
)
end
it { is_expected.to have_attributes(cluster: "local", task_arn: "ARN") }
end
context "when task_meta is null" do
it do
expect { call }.to raise_error(SidekiqEcsScaler::Error)
end
end
end
describe "#sidekiq_options" do
subject { configuration.sidekiq_options }
context "when default" do
it { is_expected.to eq({ "retry" => true, "queue" => "default" }) }
end
end
describe "#sidekiq_options=" do
subject(:write) { configuration.sidekiq_options = sidekiq_options }
context "when argument is invalid" do
let(:sidekiq_options) { nil }
it do
expect { write }.to raise_error(ArgumentError)
end
end
context "when argument is valid" do
let(:sidekiq_options) { { "queue" => "scheduler" } }
around do |example|
original_options = SidekiqEcsScaler::Worker.sidekiq_options
example.run
SidekiqEcsScaler::Worker.sidekiq_options(original_options)
end
it do
expect { write }.to(
change(SidekiqEcsScaler::Worker, :sidekiq_options).to({ "retry" => true, "queue" => "scheduler" })
)
end
end
end
end
| 21.960733 | 108 | 0.611396 |
a4bc0e34e66ae500890d6f87fafbdcb2b82d69cd | 9,517 | swift | Swift | YourHealth/Specialist/Settings.swift | 9carlo6/YourHealth | f1a0ec2a188142b2ae91993e88b9f6ab2b151409 | [
"Apache-2.0"
] | null | null | null | YourHealth/Specialist/Settings.swift | 9carlo6/YourHealth | f1a0ec2a188142b2ae91993e88b9f6ab2b151409 | [
"Apache-2.0"
] | 7 | 2020-12-27T12:06:36.000Z | 2021-01-10T11:40:21.000Z | YourHealth/Specialist/Settings.swift | 9carlo6/YourHealth | f1a0ec2a188142b2ae91993e88b9f6ab2b151409 | [
"Apache-2.0"
] | null | null | null | //
// Settings.swift
// YourHealth
//
// Created by conteangelo on 06/01/2021.
//
import SwiftUI
import FirebaseCore
import Firebase
import FirebaseFirestore
import FirebaseFirestoreSwift
import SwiftUI
let FILE_NAME = "images-2.jpeg"
struct Settings: View {
@Environment(\.presentationMode) var presentationMode
//variabile necessaria per aggiornare il conenuto della dashboard
@Binding var with_center: Bool
@State var navColor: Color = Color.init(red: 255/255, green: 240/255, blue: 240/255)
@State private var showAlert = false
@State private var name = ""
@State private var profession = ""
@State private var city = ""
@State private var address = ""
var body: some View {
// NavigationView{
Form{
Section{
VStack{
Image("dottoressa1")
.resizable()
.frame(width: /*@START_MENU_TOKEN@*/100/*@END_MENU_TOKEN@*/, height: 100, alignment: .center)
//.cornerRadius(50)
Text(name)
.fontWeight(.semibold)
.font(.title)
.frame(minWidth: 0, maxWidth: .infinity, alignment: .center)
Text(profession)
.font(.system(.body, design: .rounded))
.foregroundColor(.black)
.aspectRatio(contentMode: .fit)
.frame(minWidth: 0, maxWidth: .infinity, alignment: .center)
Text(city + ", " + address)
.font(.system(.body, design: .rounded))
.foregroundColor(.black)
.aspectRatio(contentMode: .fit)
.frame(minWidth: 0, maxWidth: .infinity, alignment: .center)
}
}.frame(minWidth: 0, maxWidth: .infinity, alignment: .center)
.listRowBackground(navColor)
Section{
HStack{
ZStack{
Image(systemName: "person.fill")
.resizable()
.frame(width: 25, height: 25, alignment: .leading)
.frame(width:17, height: 20)
}
.frame(width: 40, height: 40, alignment: /*@START_MENU_TOKEN@*/.center/*@END_MENU_TOKEN@*/)
.background(Color("Darkpink"))
.cornerRadius(15)
Text("Profile")
}
.frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .leading)
.listRowInsets(EdgeInsets())
.background(navColor.edgesIgnoringSafeArea(.all))
HStack{
ZStack{
Image(systemName: "mail.fill")
.resizable()
.frame(width: 20, height: 20, alignment: .leading)
.frame(width:17, height: 20)
}
.frame(width: 40, height: 40, alignment: /*@START_MENU_TOKEN@*/.center/*@END_MENU_TOKEN@*/)
.background(Color("Darkpink"))
.cornerRadius(15)
Text("Notifications")
}
.frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .leading)
.listRowInsets(EdgeInsets())
.background(navColor.edgesIgnoringSafeArea(.all))
HStack{
ZStack{
Image("help")
.resizable()
.frame(width: 25, height: 25, alignment: .leading)
.frame(width:17, height: 20)
//.padding(.leading, 4)
}
.frame(width: 40, height: 40, alignment: /*@START_MENU_TOKEN@*/.center/*@END_MENU_TOKEN@*/)
.background(Color("Darkpink"))
.cornerRadius(15)
Text("Help")
}
.frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .leading)
.listRowInsets(EdgeInsets())
.background(navColor.edgesIgnoringSafeArea(.all))
HStack{
ZStack{
Image("logout")
.resizable()
.frame(width: 25, height: 25, alignment: .leading)
.frame(width:17, height: 20)
//.padding(.leading, 4)
}
.frame(width: 40, height: 40, alignment: /*@START_MENU_TOKEN@*/.center/*@END_MENU_TOKEN@*/)
.background(Color("Darkpink"))
.cornerRadius(15)
Button(action: {
self.showAlert.toggle()
}){
Text("Logout")
.foregroundColor(.black)
.padding(.vertical)
}.alert(isPresented: $showAlert){
Alert(title: Text("Logout")
.font(.title)
, message: Text("Do you want to exit for YourHealth application?")
,primaryButton: .default(Text("Yes"), action: {
try! Auth.auth().signOut()
UserDefaults.standard.set(false, forKey: "status")
NotificationCenter.default.post(name: NSNotification.Name("status"), object: nil)
//se fa il logout questa variabile
//deve essere impostata a false
self.with_center = false
})
, secondaryButton: .default(Text("No")))
}
}
.frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .leading)
.listRowInsets(EdgeInsets())
.background(navColor.edgesIgnoringSafeArea(.all))
}
.listRowBackground(navColor)
.frame(maxWidth: .infinity, alignment: .leading)
}.onAppear(perform: {
infoSpecialist()
})
.background(navColor.edgesIgnoringSafeArea(.all))
}
private func infoSpecialist(){
let db = Firestore.firestore()
guard let userID = Auth.auth().currentUser?.uid else { return }
let docRef = db.collection("Specialists").document(userID)
docRef.getDocument { (document, error) in
if let document = document, document.exists {
let data = document.data()
self.name = data?["Name and Surname"] as! String
print(name)
self.profession = data?["Profession"] as! String
self.city = data?["City"] as! String
self.address = data?["Address"] as! String
} else {
print("Document does not exist")
}
}
}
}
/*struct Settings_Previews: PreviewProvider {
static var previews: some View {
Settings()
}
}*/
| 35.511194 | 123 | 0.360513 |
0bec90adbea4903a2339131d6f5a64bc4ec4733f | 5,003 | js | JavaScript | src/utils/modernizr-custom.js | linkit360/slypee-html | 837e14086684737b4e38b2fc415d5c53e2896a0d | [
"MIT"
] | null | null | null | src/utils/modernizr-custom.js | linkit360/slypee-html | 837e14086684737b4e38b2fc415d5c53e2896a0d | [
"MIT"
] | null | null | null | src/utils/modernizr-custom.js | linkit360/slypee-html | 837e14086684737b4e38b2fc415d5c53e2896a0d | [
"MIT"
] | null | null | null | /* eslint-disable */
/*! modernizr 3.5.0 (Custom Build) | MIT *
* https://modernizr.com/download/?-cssvhunit-cssvwunit-flexbox-flexwrap-setclasses !*/
!function(e,n,t){function r(e,n){return typeof e===n}function s(){var e,n,t,s,o,i,l;for(var a in C)if(C.hasOwnProperty(a)){if(e=[],n=C[a],n.name&&(e.push(n.name.toLowerCase()),n.options&&n.options.aliases&&n.options.aliases.length))for(t=0;t<n.options.aliases.length;t++)e.push(n.options.aliases[t].toLowerCase());for(s=r(n.fn,"function")?n.fn():n.fn,o=0;o<e.length;o++)i=e[o],l=i.split("."),1===l.length?Modernizr[l[0]]=s:(!Modernizr[l[0]]||Modernizr[l[0]]instanceof Boolean||(Modernizr[l[0]]=new Boolean(Modernizr[l[0]])),Modernizr[l[0]][l[1]]=s),w.push((s?"":"no-")+l.join("-"))}}function o(e){var n=x.className,t=Modernizr._config.classPrefix||"";if(_&&(n=n.baseVal),Modernizr._config.enableJSClass){var r=new RegExp("(^|\\s)"+t+"no-js(\\s|$)");n=n.replace(r,"$1"+t+"js$2")}Modernizr._config.enableClasses&&(n+=" "+t+e.join(" "+t),_?x.className.baseVal=n:x.className=n)}function i(n,t,r){var s;if("getComputedStyle"in e){s=getComputedStyle.call(e,n,t);var o=e.console;if(null!==s)r&&(s=s.getPropertyValue(r));else if(o){var i=o.error?"error":"log";o[i].call(o,"getComputedStyle returning null, its possible modernizr test results are inaccurate")}}else s=!t&&n.currentStyle&&n.currentStyle[r];return s}function l(){return"function"!=typeof n.createElement?n.createElement(arguments[0]):_?n.createElementNS.call(n,"http://www.w3.org/2000/svg",arguments[0]):n.createElement.apply(n,arguments)}function a(){var e=n.body;return e||(e=l(_?"svg":"body"),e.fake=!0),e}function u(e,t,r,s){var o,i,u,f,d="modernizr",p=l("div"),c=a();if(parseInt(r,10))for(;r--;)u=l("div"),u.id=s?s[r]:d+(r+1),p.appendChild(u);return o=l("style"),o.type="text/css",o.id="s"+d,(c.fake?c:p).appendChild(o),c.appendChild(p),o.styleSheet?o.styleSheet.cssText=e:o.appendChild(n.createTextNode(e)),p.id=d,c.fake&&(c.style.background="",c.style.overflow="hidden",f=x.style.overflow,x.style.overflow="hidden",x.appendChild(c)),i=t(p,e),c.fake?(c.parentNode.removeChild(c),x.style.overflow=f,x.offsetHeight):p.parentNode.removeChild(p),!!i}function f(e,n){return!!~(""+e).indexOf(n)}function d(e){return e.replace(/([a-z])-([a-z])/g,function(e,n,t){return n+t.toUpperCase()}).replace(/^-/,"")}function p(e,n){return function(){return e.apply(n,arguments)}}function c(e,n,t){var s;for(var o in e)if(e[o]in n)return t===!1?e[o]:(s=n[e[o]],r(s,"function")?p(s,t||n):s);return!1}function m(e){return e.replace(/([A-Z])/g,function(e,n){return"-"+n.toLowerCase()}).replace(/^ms-/,"-ms-")}function h(n,r){var s=n.length;if("CSS"in e&&"supports"in e.CSS){for(;s--;)if(e.CSS.supports(m(n[s]),r))return!0;return!1}if("CSSSupportsRule"in e){for(var o=[];s--;)o.push("("+m(n[s])+":"+r+")");return o=o.join(" or "),u("@supports ("+o+") { #modernizr { position: absolute; } }",function(e){return"absolute"==i(e,null,"position")})}return t}function v(e,n,s,o){function i(){u&&(delete N.style,delete N.modElem)}if(o=r(o,"undefined")?!1:o,!r(s,"undefined")){var a=h(e,s);if(!r(a,"undefined"))return a}for(var u,p,c,m,v,y=["modernizr","tspan","samp"];!N.style&&y.length;)u=!0,N.modElem=l(y.shift()),N.style=N.modElem.style;for(c=e.length,p=0;c>p;p++)if(m=e[p],v=N.style[m],f(m,"-")&&(m=d(m)),N.style[m]!==t){if(o||r(s,"undefined"))return i(),"pfx"==n?m:!0;try{N.style[m]=s}catch(g){}if(N.style[m]!=v)return i(),"pfx"==n?m:!0}return i(),!1}function y(e,n,t,s,o){var i=e.charAt(0).toUpperCase()+e.slice(1),l=(e+" "+P.join(i+" ")+i).split(" ");return r(n,"string")||r(n,"undefined")?v(l,n,s,o):(l=(e+" "+T.join(i+" ")+i).split(" "),c(l,n,t))}function g(e,n,r){return y(e,t,t,n,r)}var w=[],C=[],S={_version:"3.5.0",_config:{classPrefix:"",enableClasses:!0,enableJSClass:!0,usePrefixes:!0},_q:[],on:function(e,n){var t=this;setTimeout(function(){n(t[e])},0)},addTest:function(e,n,t){C.push({name:e,fn:n,options:t})},addAsyncTest:function(e){C.push({name:null,fn:e})}},Modernizr=function(){};Modernizr.prototype=S,Modernizr=new Modernizr;var x=n.documentElement,_="svg"===x.nodeName.toLowerCase(),b=S.testStyles=u;b("#modernizr { height: 50vh; }",function(n){var t=parseInt(e.innerHeight/2,10),r=parseInt(i(n,null,"height"),10);Modernizr.addTest("cssvhunit",r==t)}),b("#modernizr { width: 50vw; }",function(n){var t=parseInt(e.innerWidth/2,10),r=parseInt(i(n,null,"width"),10);Modernizr.addTest("cssvwunit",r==t)});var z="Moz O ms Webkit",P=S._config.usePrefixes?z.split(" "):[];S._cssomPrefixes=P;var T=S._config.usePrefixes?z.toLowerCase().split(" "):[];S._domPrefixes=T;var E={elem:l("modernizr")};Modernizr._q.push(function(){delete E.elem});var N={style:E.elem.style};Modernizr._q.unshift(function(){delete N.style}),S.testAllProps=y,S.testAllProps=g,Modernizr.addTest("flexbox",g("flexBasis","1px",!0)),Modernizr.addTest("flexwrap",g("flexWrap","wrap",!0)),s(),o(w),delete S.addTest,delete S.addAsyncTest;for(var j=0;j<Modernizr._q.length;j++)Modernizr._q[j]();e.Modernizr=Modernizr}(window,document); | 1,250.75 | 4,851 | 0.670997 |
6bad43d317c3ee99f82f535ed73c7217c9720bec | 1,892 | sql | SQL | integration-test/jpa2.1-hibernate-test/src/test/resources/db/migration/V1.0__schema.sql | DBCDK/jpa-unit | c59c9ba71d7aa6c58324cb288266881e2f0fe087 | [
"Apache-2.0"
] | 31 | 2017-08-31T11:36:02.000Z | 2022-03-18T08:01:48.000Z | integration-test/jpa2.1-hibernate-test/src/test/resources/db/migration/V1.0__schema.sql | DBCDK/jpa-unit | c59c9ba71d7aa6c58324cb288266881e2f0fe087 | [
"Apache-2.0"
] | 50 | 2016-11-22T10:17:07.000Z | 2019-12-02T11:58:59.000Z | integration-test/jpa2.1-hibernate-test/src/test/resources/db/migration/V1.0__schema.sql | DBCDK/jpa-unit | c59c9ba71d7aa6c58324cb288266881e2f0fe087 | [
"Apache-2.0"
] | 7 | 2018-01-15T17:46:31.000Z | 2021-04-09T07:02:04.000Z | create table hibernate_sequences (
sequence_name varchar(50),
next_val bigint
);
create table ACCOUNT (
TYPE varchar(25) not null,
ID bigint generated by default as identity,
VERSION bigint,
CREDIT_LIMIT double,
DEPOSITOR_ID bigint not null,
primary key (ID)
);
create table ACCOUNT_ENTRY (
ID bigint generated by default as identity,
AMOUNT double not null,
DATE date not null,
DETAILS varchar(50) not null,
REFERENCE varchar(50) not null,
TYPE varchar(50) not null,
ACCOUNT_ID bigint,
primary key (ID)
);
create table ADDRESS (
ID bigint generated by default as identity,
CITY varchar(25) not null,
COUNTRY varchar(50) not null,
STREET varchar(50) not null,
ZIP_CODE varchar(6) not null,
DEPOSITOR_ID bigint,
primary key (ID)
);
create table CONTACT_DETAIL (
ID bigint generated by default as identity,
TYPE varchar(50) not null,
VALUE varchar(50) not null,
DEPOSITOR_ID bigint,
primary key (ID)
);
create table CREDIT_CONDITION (
ID bigint generated by default as identity,
DESCRIPTION varchar(1024) not null,
VERSION bigint,
primary key (ID)
);
create table DEPOSITOR (
ID bigint generated by default as identity,
NAME varchar(255) not null,
SURNAME varchar(255) not null,
VERSION bigint,
primary key (ID)
);
alter table ACCOUNT
add constraint FK_3rglsd6k3e87yqm10wpqoex0v
foreign key (DEPOSITOR_ID)
references DEPOSITOR;
alter table ACCOUNT_ENTRY
add constraint FK_fkto0jgas0ortrjcdirbnotwa
foreign key (ACCOUNT_ID)
references ACCOUNT;
alter table ADDRESS
add constraint FK_3mw83khhtr5rj10mxtclbh6jt
foreign key (DEPOSITOR_ID)
references DEPOSITOR;
alter table CONTACT_DETAIL
add constraint FK_bsh5cmmqeumnpaww9d8379ifm
foreign key (DEPOSITOR_ID)
references DEPOSITOR;
| 24.25641 | 48 | 0.713531 |
d1357c3cdf2f6e5902ce3983ef29f507ad7f86c2 | 431 | swift | Swift | TastyTomato/Code/Extensions/Public/CAShapeLayer/CAShapeLayer (ConvenienceInits).swift | resmio/TastyTomato | bed8ffad5953be7acb10714859eeac347be48ce2 | [
"MIT"
] | 2 | 2019-01-23T05:48:31.000Z | 2019-01-23T05:49:19.000Z | TastyTomato/Code/Extensions/Public/CAShapeLayer/CAShapeLayer (ConvenienceInits).swift | resmio/TastyTomato | bed8ffad5953be7acb10714859eeac347be48ce2 | [
"MIT"
] | 43 | 2016-07-18T15:27:08.000Z | 2019-09-17T20:05:07.000Z | TastyTomato/Code/Extensions/Public/CAShapeLayer/CAShapeLayer (ConvenienceInits).swift | resmio/TastyTomato | bed8ffad5953be7acb10714859eeac347be48ce2 | [
"MIT"
] | null | null | null | //
// CAShapeLayer (ConvenienceInits).swift
// TastyTomato
//
// Created by Jan Nash on 7/28/16.
// Copyright © 2016 resmio. All rights reserved.
//
import UIKit
// MARK: // Public
public extension CAShapeLayer {
@objc convenience init(path p: UIBezierPath) {
self.init()
self.path = p.cgPath
}
@objc convenience init(rect r: CGRect) {
self.init(path: UIBezierPath(rect: r))
}
}
| 18.73913 | 50 | 0.62413 |
283a0ba595dd896c3f968c99cb5721328ec61ddc | 59 | rb | Ruby | lib/capistrano/locum/sidekiq.rb | DarkWater666/locum-best-practives | 52a94bedf053e75b87e553d741a1ed4bca42b0f3 | [
"MIT"
] | null | null | null | lib/capistrano/locum/sidekiq.rb | DarkWater666/locum-best-practives | 52a94bedf053e75b87e553d741a1ed4bca42b0f3 | [
"MIT"
] | null | null | null | lib/capistrano/locum/sidekiq.rb | DarkWater666/locum-best-practives | 52a94bedf053e75b87e553d741a1ed4bca42b0f3 | [
"MIT"
] | null | null | null | load File.expand_path('../../tasks/sidekiq.cap', __FILE__)
| 29.5 | 58 | 0.711864 |
18533238f574a0b2c537431acb05e6b235e90f5d | 594 | rb | Ruby | lib/kiq_bus/client.rb | aquinofb/kiqbus | a0b7129e7ae5870f9b2a074516cde86c598d6de3 | [
"MIT"
] | 2 | 2015-12-08T14:47:33.000Z | 2015-12-09T01:52:09.000Z | lib/kiq_bus/client.rb | aquinofb/kiqbus | a0b7129e7ae5870f9b2a074516cde86c598d6de3 | [
"MIT"
] | null | null | null | lib/kiq_bus/client.rb | aquinofb/kiqbus | a0b7129e7ae5870f9b2a074516cde86c598d6de3 | [
"MIT"
] | null | null | null | module KiqBus
class Client
cattr_reader :subscribers
@@subscribers = {}
def self.subscribe channel, class_name, action=:call
@@subscribers[channel] ||= []
@@subscribers[channel] << Subscriber.new(class_name, action.to_sym)
end
def self.broadcast channel, *args
Broadcast.run(@@subscribers[channel], *args)
end
def self.unsubscribe channel, model_class, action=:call
index = @@subscribers[channel].index { |subscriber| subscriber.equal?(model_class, action) }
index >= 0 && @@subscribers[channel].delete_at(index)
end
end
end
| 28.285714 | 98 | 0.675084 |
31fce239430d6f4962574c2d6b7cbb7667b4fcd1 | 95 | sql | SQL | sql/create_refund_transaction.sql | SoftwareVerde/flipstarter | 225ffb3e269b6a3dc188bb35cf918544ede51298 | [
"MIT"
] | null | null | null | sql/create_refund_transaction.sql | SoftwareVerde/flipstarter | 225ffb3e269b6a3dc188bb35cf918544ede51298 | [
"MIT"
] | null | null | null | sql/create_refund_transaction.sql | SoftwareVerde/flipstarter | 225ffb3e269b6a3dc188bb35cf918544ede51298 | [
"MIT"
] | null | null | null | INSERT INTO refund_transactions
(
token,
commitment_id
)
VALUES
(
:token,
:commitment_id
)
| 8.636364 | 31 | 0.747368 |
fc9e79fd818449abb95fe8041ee43d248ad99080 | 1,001 | css | CSS | style.css | JuniorBecari10/Cara-Ou-Coroa | cade6291e49852de6a39ba1110cb367e7728796e | [
"MIT"
] | null | null | null | style.css | JuniorBecari10/Cara-Ou-Coroa | cade6291e49852de6a39ba1110cb367e7728796e | [
"MIT"
] | null | null | null | style.css | JuniorBecari10/Cara-Ou-Coroa | cade6291e49852de6a39ba1110cb367e7728796e | [
"MIT"
] | null | null | null | @font-face {
font-family: "Pixel";
src: url("font.ttf");
}
@font-face {
font-family: "Pixel Paragraph";
src: url("font-p.ttf");
}
/*------------------------------------------*/
* {
margin: 0;
padding: 0;
box-sizing: border-box;
font-family: "Pixel", sans-serif;
font-weight: normal;
text-align: center;
}
article p {
font-family: "Pixel Paragraph";
}
h1 {
margin-top: 10px;
}
img {
image-rendering: pixelated;
display: block;
width: 200px;
margin: 30px auto;
}
p {
margin: 20px;
}
button {
padding: 5px;
border: 2px solid black;
box-shadow: 2px 2px 4px rgba(0, 0, 0, 0.5);
transition: background-color 0.3s;
}
button:hover {
background-color: #cdcdcd
}
button:active {
box-shadow: inset 2px 2px 4px rgba(0, 0, 0, 0.5);
background-color: #bbb
}
article {
border-top: 1px solid black;
border-bottom: 1px solid black;
width: 90%;
margin: 20px auto;
}
| 14.507246 | 53 | 0.548452 |
8e1fb2e49a44743deae0236451fd0a0bceb2ed7b | 10,535 | swift | Swift | Sources/ComponentKit/CustomView/CircleScroll/CircleScrollView.swift | CreatorWilliam/ProjectKit | 4a4d64ac3b24b766dc02bb9581dff3625fc6bb9b | [
"MIT"
] | null | null | null | Sources/ComponentKit/CustomView/CircleScroll/CircleScrollView.swift | CreatorWilliam/ProjectKit | 4a4d64ac3b24b766dc02bb9581dff3625fc6bb9b | [
"MIT"
] | null | null | null | Sources/ComponentKit/CustomView/CircleScroll/CircleScrollView.swift | CreatorWilliam/ProjectKit | 4a4d64ac3b24b766dc02bb9581dff3625fc6bb9b | [
"MIT"
] | null | null | null | //
// CircleScrollView.swift
// ComponentKit
//
// Created by William Lee on 20/12/17.
// Copyright © 2018 William Lee. All rights reserved.
//
import UIKit
import ImageKit
public protocol CircleScrollViewDelegate: AnyObject {
func circleScrollView(_ view: CircleScrollView, didScrollTo index: Int)
func circleScrollView(_ view: CircleScrollView, didSelectAt index: Int)
}
// MARK: - Default
public extension CircleScrollViewDelegate {
func circleScrollView(_ view: CircleScrollView, didScrollTo index: Int) { }
}
public class CircleScrollView: UIView {
/// 滑动方向
public enum Direction {
/// 水平滑动
case horizontal
/// 竖直滑动
case vertical
}
public weak var delegate: CircleScrollViewDelegate?
/// 页码
public let pageControl = UIPageControl()
/// 占位图(本地图片名)
public var placeholder: String? {
didSet {
if let name = placeholder {
previousView.image = UIImage(named: name)
currentView.image = UIImage(named: name)
nextView.image = UIImage(named: name)
}
}
}
/// 滑动方向
private var direction: Direction = .horizontal
/// 展示内容的容器
private let scrollView: UIScrollView = UIScrollView()
/// 上一个视图
private var previousView = UIImageView()
/// 当前视图
private var currentView = UIImageView()
/// 下一个视图
private var nextView = UIImageView()
//Timer
private var timer: Timer?
/// 当前索引
private var currentIndex: Int = 0
/// 上一个
private var previousIndex: Int {
var index = currentIndex - 1
if index < 0 { index = images.count - 1 }
return index
}
/// 下一个
private var nextIndex: Int {
var index = currentIndex + 1
if index > images.count - 1 { index = 0 }
return index
}
/// 是否自动滚动
private var isAutoScrollable: Bool = false
/// 数据源
private var images: [Any] = []
public init(frame: CGRect = .zero,
isAutoScrollable: Bool = false) {
super.init(frame: frame)
self.isAutoScrollable = isAutoScrollable
setupUI()
}
required public init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
public override func layoutSubviews() {
super.layoutSubviews()
scrollView.frame = bounds
let width: CGFloat = scrollView.bounds.width
let height: CGFloat = scrollView.bounds.height
switch direction {
case .horizontal:
previousView.frame = CGRect(x: 0, y: 0, width: width, height: height)
currentView.frame = CGRect(x: width, y: 0, width: width, height: height)
nextView.frame = CGRect(x: width * 2, y: 0, width: width, height: height)
scrollView.contentSize = CGSize(width: width * 3, height: height)
scrollView.contentOffset = CGPoint(x: width, y: 0)
case .vertical:
previousView.frame = CGRect(x: 0, y: 0, width: width, height: height)
currentView.frame = CGRect(x: 0, y: height, width: width, height: height)
nextView.frame = CGRect(x: 0, y: height * 2, width: width, height: height)
scrollView.contentSize = CGSize(width: width, height: height * 3)
scrollView.contentOffset = CGPoint(x: 0, y: height)
}
}
}
// MARK: - Public
public extension CircleScrollView {
/// 设置轮播图集后,自动进行轮播
///
/// - Parameter items: 轮播图集
func update(with items: [Any], isForce: Bool = false) {
//保存数据,只会初始化一次, 除非是强制性更新
if images.count > 0 && isForce == false { return }
images = items
currentIndex = 0
pageControl.numberOfPages = images.count
// 防止越界
guard images.count > 0 else { return }
scrollView.isScrollEnabled = (images.count > 1)
update(view: previousView, with: images[previousIndex])
update(view: currentView, with: images[currentIndex])
update(view: nextView, with: images[nextIndex])
//判断启动轮播
if isAutoScrollable {
DispatchQueue.main.asyncAfter(deadline: .now() + 2, execute: {
self.startLoop()
})
} else {
self.stopLoop()
}
}
}
// MARK: - UIScrollViewDelegate
extension CircleScrollView: UIScrollViewDelegate {
public func scrollViewDidEndDecelerating(_ scrollView: UIScrollView) {
updateContent()
}
}
// MARK: - Zoomable
extension CircleScrollView: Zoomable {
public var zoomView: UIView { return currentView }
public var zoomViewContainer: UIView { return scrollView }
public func zoom(with offset: CGFloat) {
// 仅支持水平滚动,竖直方向上放大
guard direction == .horizontal else { return }
let size = scrollView.bounds.size
guard size.height > 0 else { return }
zoomView.layer.anchorPoint = CGPoint(x: 0.5, y: 1)
zoomView.center = CGPoint(x: scrollView.contentSize.width / 2, y: scrollView.contentSize.height)
//向下偏移放大
if (offset > 0) { return }
let heightOffset = abs(offset)
let widhtOffset = abs(offset) * (size.width / size.height)
zoomView.bounds.size.height = heightOffset + size.height
zoomView.bounds.size.width = widhtOffset + size.width
}
}
// MARK: - Setup
private extension CircleScrollView {
func setupUI() {
//ScrollView
scrollView.clipsToBounds = false
scrollView.showsVerticalScrollIndicator = false
scrollView.showsHorizontalScrollIndicator = false
scrollView.delegate = self
scrollView.bounces = true
scrollView.isPagingEnabled = true
scrollView.backgroundColor = .clear
scrollView.isScrollEnabled = false
addSubview(scrollView)
previousView.contentMode = .scaleAspectFill
previousView.clipsToBounds = true
scrollView.addSubview(previousView)
currentView.contentMode = .scaleAspectFill
currentView.clipsToBounds = true
scrollView.addSubview(currentView)
nextView.contentMode = .scaleAspectFill
nextView.clipsToBounds = true
scrollView.addSubview(nextView)
pageControl.isUserInteractionEnabled = false
pageControl.hidesForSinglePage = true
addSubview(pageControl)
pageControl.layout.add { (make) in
make.leading().trailing().bottom().equal(self)
}
let tapGR = UITapGestureRecognizer()
tapGR.numberOfTapsRequired = 1
tapGR.numberOfTouchesRequired = 1
tapGR.addTarget(self, action: #selector(clickContent(_:)))
addGestureRecognizer(tapGR)
}
}
// MARK: - Action
private extension CircleScrollView {
@objc func clickContent(_ sender: Any) {
guard images.count > 0 else { return }
delegate?.circleScrollView(self, didSelectAt: currentIndex)
}
/// 开始循环
func startLoop() {
//大于1,轮播,否则不轮播
guard images.count > 1 else {
stopLoop()
return
}
//已经启动则不再重新启动
if let _ = timer { return }
//正常启动
timer = Timer(timeInterval: 5, target: self, selector: #selector(loop), userInfo: nil, repeats: true)
guard let temp = timer else { return }
RunLoop.main.add(temp, forMode: RunLoop.Mode.default)
DispatchQueue.main.asyncAfter(deadline: .now() + 5) {
self.timer?.fire()
}
}
/// 停止循环
func stopLoop() {
timer?.invalidate()
timer = nil
}
@objc func loop(_ timer: Timer) {
scrollToNext()
}
func scrollToPrevious() {
var offset: CGPoint = .zero
switch direction {
case .horizontal: offset.x = 0
case .vertical: offset.y = 0
}
scrollView.isUserInteractionEnabled = false
UIView.animate(withDuration: 0.5, animations: {
self.scrollView.contentOffset = offset
}, completion: { (_) in
self.scrollView.isUserInteractionEnabled = true
self.updateContent()
})
}
func scrollToNext() {
var offset: CGPoint = .zero
switch direction {
case .horizontal: offset.x = scrollView.bounds.width * 2
case .vertical: offset.y = scrollView.bounds.height * 2
}
scrollView.isUserInteractionEnabled = false
UIView.animate(withDuration: 0.5, animations: {
self.scrollView.contentOffset = offset
}, completion: { (_) in
self.scrollView.isUserInteractionEnabled = true
self.updateContent()
})
}
}
// MARK: - Utility
private extension CircleScrollView {
func updateContent() {
defer {
pageControl.currentPage = currentIndex
delegate?.circleScrollView(self, didScrollTo: currentIndex)
}
var offset: CGPoint = .zero
var isPrevious: Bool = false
var isNext: Bool = false
switch direction {
case .horizontal:
let width: CGFloat = scrollView.bounds.width
offset = CGPoint(x: width, y: 0)
if scrollView.contentOffset.x < width { isPrevious = true }
if scrollView.contentOffset.x > width { isNext = true }
case .vertical:
let height: CGFloat = scrollView.bounds.height
offset = CGPoint(x: 0, y: height)
if scrollView.contentOffset.y < height { isPrevious = true }
if scrollView.contentOffset.y > height { isNext = true }
}
if isPrevious == true {
// 更新索引
currentIndex -= 1
if currentIndex < 0 { currentIndex = images.count - 1 }
// 交换位置
(previousView, currentView) = (currentView, previousView)
(previousView.frame, currentView.frame) = (currentView.frame, previousView.frame)
} else if isNext == true {
// 更新索引
currentIndex += 1
if currentIndex > images.count - 1 { currentIndex = 0 }
// 交换位置
(currentView, nextView) = (nextView, currentView)
(currentView.frame, nextView.frame) = (nextView.frame, currentView.frame)
} else {
return
}
scrollView.contentOffset = offset
guard previousIndex < images.count else { return }
guard nextIndex < images.count else { return }
update(view: previousView, with: images[previousIndex])
update(view: nextView, with: images[nextIndex])
}
func update(view: UIView, with content: Any) {
guard let imageView = view as? UIImageView else { return }
if let url = content as? String {
imageView.setImage(with: url, placeholder: placeholder)
} else if let image = content as? UIImage {
imageView.image = image
} else if let url = content as? URL {
imageView.setImage(with: url, placeholder: placeholder)
} else {
// Nothing
}
}
}
| 24.614486 | 105 | 0.634551 |
62a54f34322f7250410e90a5721f004ecf592284 | 5,107 | rs | Rust | arraymath/src/vectormath.rs | jakobj/arraymath-rs | 6d4c54b29c054fb84ec3e6d528c3133f9c4b1c68 | [
"MIT"
] | null | null | null | arraymath/src/vectormath.rs | jakobj/arraymath-rs | 6d4c54b29c054fb84ec3e6d528c3133f9c4b1c68 | [
"MIT"
] | null | null | null | arraymath/src/vectormath.rs | jakobj/arraymath-rs | 6d4c54b29c054fb84ec3e6d528c3133f9c4b1c68 | [
"MIT"
] | null | null | null | macro_rules! binary_op_vector_prototype {
($function_name: ident, $input_inner: ty, $output: ty) => {
fn $function_name(&self, other: &[$input_inner]) -> $output;
};
}
macro_rules! binary_op_vector_inplace_prototype {
($function_name: ident, $input_inner: ty, $output: ty) => {
fn $function_name(&mut self, other: &[$input_inner]);
};
}
macro_rules! binary_op_vector_into_prototype {
($function_name: ident, $input: ty, $into: ty) => {
fn $function_name(&self, other: &[$input], into: &mut $into);
};
}
macro_rules! binary_op_vector {
($function_name: ident, $op: expr, $input_inner: ty, $output: ty) => {
#[inline(always)]
fn $function_name(&self, other: &[$input_inner]) -> $output {
let mut res: [T; N] = [self[0]; N];
for i in 0..self.len() {
res[i] = $op(self[i], other[i]);
}
res
}
};
}
macro_rules! binary_op_vector_inplace {
($function_name: ident, $op: expr, $input_inner: ty, $output: ty) => {
#[inline(always)]
fn $function_name(&mut self, other: &[$input_inner]) {
for i in 0..self.len() {
$op(&mut self[i], other[i]);
}
}
};
}
macro_rules! binary_op_vector_into {
($function_name: ident, $op: expr, $input: ty, $into: ty) => {
#[inline(always)]
fn $function_name(&self, other: &[$input], into: &mut $into) {
for i in 0..self.len() {
into[i] = $op(self[i], other[i]);
}
}
};
}
pub trait VectorMath {
type InputInner;
type Output;
binary_op_vector_prototype!(addv, Self::InputInner, Self::Output);
binary_op_vector_inplace_prototype!(addv_assign, Self::InputInner, Self::Output);
binary_op_vector_into_prototype!(addv_into, Self::InputInner, Self::Output);
binary_op_vector_prototype!(subv, Self::InputInner, Self::Output);
binary_op_vector_inplace_prototype!(subv_assign, Self::InputInner, Self::Output);
binary_op_vector_into_prototype!(subv_into, Self::InputInner, Self::Output);
}
impl<T, const N: usize> VectorMath for [T; N]
where
T: Copy
+ std::ops::Add<Output = T>
+ std::ops::AddAssign
+ std::ops::Sub<Output = T>
+ std::ops::SubAssign,
{
type InputInner = T;
type Output = [T; N];
binary_op_vector!(
addv,
|lhs: T, rhs: T| lhs + rhs,
Self::InputInner,
Self::Output
);
binary_op_vector_inplace!(
addv_assign,
|lhs: &mut T, rhs: T| lhs.add_assign(rhs),
Self::InputInner,
Self::Output
);
binary_op_vector_into!(
addv_into,
|lhs: T, rhs: T| lhs + rhs,
Self::InputInner,
Self::Output
);
binary_op_vector!(
subv,
|lhs: T, rhs: T| lhs - rhs,
Self::InputInner,
Self::Output
);
binary_op_vector_inplace!(
subv_assign,
|lhs: &mut T, rhs: T| lhs.sub_assign(rhs),
Self::InputInner,
Self::Output
);
binary_op_vector_into!(
subv_into,
|lhs: T, rhs: T| lhs - rhs,
Self::InputInner,
Self::Output
);
}
#[cfg(test)]
mod tests {
use assert_approx_eq::assert_approx_eq;
use super::*;
#[test]
fn test_addv() {
let a = [1.2, 1.3, 1.4];
let b = [1.3, 1.5, 2.4];
let c = a.addv(&b);
let c_expected: [f64; 3] = [2.5, 2.8, 3.8];
for i in 0..c.len() {
assert_approx_eq!(c[i], c_expected[i]);
}
}
#[test]
fn test_addv_assign() {
let mut a = [1.2, 1.3, 1.4];
let b = [1.3, 1.5, 2.4];
a.addv_assign(&b);
let a_expected: [f64; 3] = [2.5, 2.8, 3.8];
for i in 0..a.len() {
assert_approx_eq!(a[i], a_expected[i]);
}
}
#[test]
fn test_addv_into() {
let a = [1.2, 1.3, 1.4];
let b = [1.3, 1.5, 2.4];
let mut c = [99.0, 99.0, 99.0];
a.addv_into(&b, &mut c);
let c_expected: [f64; 3] = [2.5, 2.8, 3.8];
for i in 0..c.len() {
assert_approx_eq!(c[i], c_expected[i]);
}
}
#[test]
fn test_subv() {
let a = [1.2, 1.3, 1.4];
let b = [1.3, 1.5, 2.4];
let c = a.subv(&b);
let c_expected: [f64; 3] = [-0.1, -0.2, -1.0];
for i in 0..c.len() {
assert_approx_eq!(c[i], c_expected[i]);
}
}
#[test]
fn test_subv_assign() {
let mut a = [1.2, 1.3, 1.4];
let b = [1.3, 1.5, 2.4];
a.subv_assign(&b);
let a_expected: [f64; 3] = [-0.1, -0.2, -1.0];
for i in 0..a.len() {
assert_approx_eq!(a[i], a_expected[i]);
}
}
#[test]
fn test_subv_into() {
let a = [1.2, 1.3, 1.4];
let b = [1.3, 1.5, 2.4];
let mut c = [99.0, 99.0, 99.0];
a.subv_into(&b, &mut c);
let c_expected: [f64; 3] = [-0.1, -0.2, -1.0];
for i in 0..c.len() {
assert_approx_eq!(c[i], c_expected[i]);
}
}
}
| 27.021164 | 85 | 0.508126 |
92308cd4f466bb21ede7bbdd76440154d821633b | 187 | sql | SQL | Portal/WebSystem/WCMS.Framework.SqlDabase/dbo/Stored Procedures/WebPartConfig_Del.sql | dsalunga/mPortal | 3c727231e576c4ab0c3ace21cc412a2cf344974a | [
"MIT"
] | 1 | 2018-05-08T21:06:38.000Z | 2018-05-08T21:06:38.000Z | Portal/WebSystem/WCMS.Framework.SqlDabase/dbo/Stored Procedures/WebPartConfig_Del.sql | dsalunga/mPortal | 3c727231e576c4ab0c3ace21cc412a2cf344974a | [
"MIT"
] | null | null | null | Portal/WebSystem/WCMS.Framework.SqlDabase/dbo/Stored Procedures/WebPartConfig_Del.sql | dsalunga/mPortal | 3c727231e576c4ab0c3ace21cc412a2cf344974a | [
"MIT"
] | 3 | 2017-12-19T17:51:25.000Z | 2022-02-02T03:45:43.000Z | CREATE PROCEDURE [dbo].[WebPartConfig_Del]
(
@PartConfigId int
)
AS
SET NOCOUNT ON
IF(@PartConfigId > 0)
DELETE FROM WebPartConfig
WHERE PartConfigId=@PartConfigId
RETURN | 15.583333 | 43 | 0.73262 |
0ffb7752c86edb0e3f5b3121446aab1c1999a2cf | 9,030 | swift | Swift | Examples/Litecoin/CryptoApiLib_CoreLitecoin/CryptoApiLib_CoreLitecoin/ViewController.swift | cryptoapi-project/cryptoapi-swift | 0cf1aee72e01259363bf963f82aee1c81fb093e2 | [
"MIT"
] | 1 | 2020-02-26T17:12:19.000Z | 2020-02-26T17:12:19.000Z | Examples/Litecoin/CryptoApiLib_CoreLitecoin/CryptoApiLib_CoreLitecoin/ViewController.swift | cryptoapi-project/cryptoapi-swift | 0cf1aee72e01259363bf963f82aee1c81fb093e2 | [
"MIT"
] | null | null | null | Examples/Litecoin/CryptoApiLib_CoreLitecoin/CryptoApiLib_CoreLitecoin/ViewController.swift | cryptoapi-project/cryptoapi-swift | 0cf1aee72e01259363bf963f82aee1c81fb093e2 | [
"MIT"
] | null | null | null | //
// ViewController.swift
// CryptoApiLib_CoreLitecoin
//
// Created by Alexander Eskin on 5/19/20.
// Copyright © 2020 PixelPlex. All rights reserved.
//
import CryptoApiLib
import UIKit
enum ExampleConstants {
static let authToken = "Your token"
static let mainnetDerivationPath = "m/44'/0'/0'/0/0"
static let testnetDerivationPath = "m/44'/1'/0'/0/0"
static let changeAddress = "sender address"
static let toAddress = "recipient address"
static let sendAmount = 2100
static let password: String? = nil
static let mnemonicArray = ["array", "of", "your", "brainkey", "words"]
}
class ViewController: UIViewController {
func configCryptoApiLib() -> CryptoAPI {
// Initialize setting for CryptoApi with your authorization token.
let settings = Settings(authorizationToken: ExampleConstants.authToken) { configurator in
configurator.networkType = NetworkType.testnet
}
let cryptoApi = CryptoAPI(settings: settings)
return cryptoApi
}
override func viewDidLoad() {
super.viewDidLoad()
let cryptoApi = configCryptoApiLib()
let mnemonic: LTCMnemonic! = LTCMnemonic(
words: ExampleConstants.mnemonicArray,
password: ExampleConstants.password,
wordListType: .english
)
let keychain = mnemonic.keychain.derivedKeychain(withPath: ExampleConstants.testnetDerivationPath)!
let key = keychain.key!
// MARK: Get outputs
// Get address unspent outputs to calculate balance or build the transaction
cryptoApi.ltc.addressesOutputs(addresses: [key.addressTestnet.string], status: "unspent", skip: 0, limit: nil) { result in
switch result {
case .success(let outputs):
for output in outputs {
print("Output Value: \(output.value)")
}
// MARK: Build transaction
let transactionHex = self.createTransaction(key: key, outputs: outputs)
// MARK: Send transaction
self.sendRawTransaction(transactionHex: transactionHex)
case .failure(let error):
print(error.localizedDescription)
}
}
// MARK: Fee estimating
// First of all, you need to get fee rate for kilobyte
cryptoApi.ltc.feePerKb { result in
switch result {
case .success(let feeString):
// Response has result like "0.00001". Convert it to litoshis if necessary.
let feePerKb = Double(feeString)! * 10000000
self.estimateFee(feePerKb: LTCAmount(feePerKb))
case .failure(let error):
print(error)
}
}
}
func estimateFee(feePerKb: LTCAmount) {
// We need to calculate how much the transaction weighs and how many outs we need to take in transaction
// to cover the amount sent and the fee.
var resultFee = feePerKb
let fee = LTCAmount(3800)
let maxFee = 100000000 // fee cannot be greater than 1 LTC (100000000 litoshi)
while fee < maxFee {
let transaction = LTCTransaction() // build transaction like example above.
let validFee = transaction.estimatedFee(withRate: feePerKb)
if validFee <= fee {
resultFee = validFee
break
}
resultFee += feePerKb
}
// resultFee is the result of estimation of fee
print(resultFee)
}
func createTransaction(key: LTCKey, outputs: [LTCAddressOutputResponseModel]) -> String {
// Prepare values for transaction
let outputs = mapOutputsResponse(model: outputs)
let changeAddress = LTCAddress(string: ExampleConstants.changeAddress)
let toAddress = LTCAddress(string: ExampleConstants.toAddress)
let value = LTCAmount(ExampleConstants.sendAmount)
let fee = LTCAmount(4000)
let transaction = LTCTransaction()
transaction.fee = fee
var spentCoins = LTCAmount(0)
// Convert each output to transaction input
for txOut in outputs {
let txIn = LTCTransactionInput()
txIn.previousHash = txOut.transactionHash
txIn.previousIndex = txOut.index
txIn.value = txOut.value
txIn.signatureScript = txOut.script
transaction.addInput(txIn)
spentCoins += txOut.value
}
// Prepare outputs for transaction
let paymentOutput = LTCTransactionOutput(value: LTCAmount(value), address: toAddress)
transaction.addOutput(paymentOutput)
// If you have a change, then create output with your change address
if spentCoins > (value + fee) {
let changeValue = spentCoins - (value + fee)
let changeOutput = LTCTransactionOutput(value: changeValue, address: changeAddress)
transaction.addOutput(changeOutput)
}
// Sign the transaction
for i in 0..<outputs.count {
let txOut = outputs[i]
let txIn = transaction.inputs[i] as! LTCTransactionInput
let hash = try! transaction.signatureHash(for: txOut.script, inputIndex: UInt32(i), hashType: .signatureHashTypeAll)
let signature = key.signature(forHash: hash)
var signatureForScript = signature
let hashTypeData = LTCSignatureHashType.signatureHashTypeAll.rawValue
var hashType = hashTypeData
signatureForScript?.append(&hashType, count: 1)
let sigScript = LTCScript()
_ = sigScript?.appendData(signatureForScript)
_ = sigScript?.appendData(key.publicKey as Data?)
txIn.signatureScript = sigScript
}
// Get a transaction hex and send it with CryptoApi
let transactionHex = LTCHexFromData(transaction.data)!
print(transactionHex)
return transactionHex
}
func sendRawTransaction(transactionHex: String) {
let cryptoApi = configCryptoApiLib()
cryptoApi.ltc.sendRaw(transaction: transactionHex) { result in
switch result {
case .success(let response):
print("Transaction Hash: \(response.result)")
case .failure(let error):
print(error)
}
}
}
}
extension ViewController {
// Use this method if you want to select optimal number of outputs.
func selectNeededOutputs(for value: Int64, from: [LTCTransactionOutput]) ->
(outs: [LTCTransactionOutput], selectedOutsAmount: LTCAmount)? {
var neededOuts = [LTCTransactionOutput]()
var total: LTCAmount = 0
var utxos = from
guard utxos.count > 0 else {
return nil
}
utxos = utxos.sorted(by: { $0.value < $1.value })
for txout in utxos {
if txout.script.isPayToPublicKeyHashScript {
neededOuts.append(txout)
total += txout.value
}
if total >= value {
break
}
}
if total < value {
return nil
}
return (neededOuts, total)
}
func mapOutputsResponse(model: [LTCAddressOutputResponseModel]) -> [LTCTransactionOutput] {
var outputs = [LTCTransactionOutput]()
for item in model {
let out = LTCTransactionOutput()
out.value = LTCAmount(item.value)
out.script = LTCScript(data: LTCDataFromHex(item.script))
out.transactionHash = LTCDataFromHex(item.mintTransactionHash.invertHex())
out.index = UInt32(item.mintIndex)
out.blockHeight = item.mintBlockHeight
outputs.append(out)
}
return outputs
}
}
private extension String {
func invertHex() -> String {
let hexString = String(self)
var reversedString = String()
var charIndex = self.count
while charIndex > 0 {
charIndex -= 2
let start = index(startIndex, offsetBy: charIndex)
let end = index(startIndex, offsetBy: charIndex + 2)
let substring = hexString[start..<end]
let first: Character! = substring.first
let last: Character! = substring.last
reversedString += String(describing: String(first))
reversedString += String(describing: String(last))
}
return reversedString
}
}
| 35.136187 | 130 | 0.580288 |
770ae318e5abbb9e683051d07265f0cd57b6a5ac | 3,146 | rs | Rust | vrp-pragmatic/tests/helpers/fixtures.rs | valerivp/vrp | 27ee30e5f4c44e051e5cec1248e606305b52fc00 | [
"Apache-2.0"
] | 1 | 2021-04-06T08:26:03.000Z | 2021-04-06T08:26:03.000Z | vrp-pragmatic/tests/helpers/fixtures.rs | valerivp/vrp | 27ee30e5f4c44e051e5cec1248e606305b52fc00 | [
"Apache-2.0"
] | null | null | null | vrp-pragmatic/tests/helpers/fixtures.rs | valerivp/vrp | 27ee30e5f4c44e051e5cec1248e606305b52fc00 | [
"Apache-2.0"
] | null | null | null | pub const SIMPLE_PROBLEM: &str = r#"
{
"plan": {
"jobs": [
{
"id": "single_job",
"deliveries": [
{
"places": [
{
"location": {
"lat": 52.5622847,
"lng": 13.4023099
},
"duration": 240.0,
"times": [
[
"2019-07-04T10:00:00Z",
"2019-07-04T16:00:00Z"
]
]
}
],
"demand": [
1
]
}
]
},
{
"id": "multi_job",
"pickups": [
{
"places": [
{
"location": {
"lat": 52.5622847,
"lng": 13.4023099
},
"duration": 240.0
}
],
"demand": [
1
],
"tag": "p1"
},
{
"places": [
{
"location": {
"lat": 52.5330881,
"lng": 13.3973059
},
"duration": 240.0
}
],
"demand": [
1
],
"tag": "p2"
}
],
"deliveries": [
{
"places": [
{
"location": {
"lat": 52.5252832,
"lng": 13.4188422
},
"duration": 240.0
}
],
"demand": [
2
],
"tag": "d1"
}
]
}
]
},
"fleet": {
"vehicles": [
{
"typeId": "vehicle",
"vehicleIds": [
"vehicle_1"
],
"profile": {
"matrix": "normal_car"
},
"costs": {
"fixed": 22.0,
"distance": 0.0002,
"time": 0.004806
},
"shifts": [
{
"start": {
"earliest": "2019-07-04T09:00:00Z",
"latest": "2019-07-04T09:30:00Z",
"location": {
"lat": 52.4664257,
"lng": 13.2812488
}
},
"end": {
"earliest": "2019-07-04T17:30:00Z",
"latest": "2019-07-04T18:00:00Z",
"location": {
"lat": 52.4664257,
"lng": 13.2812488
}
}
}
],
"capacity": [
10
]
}
],
"profiles": [
{
"name": "normal_car"
}
]
}
}
"#;
pub const SIMPLE_MATRIX: &str = r#"
{
"profile": "normal_car",
"travelTimes": [
0,
939,
1077,
2251,
1003,
0,
645,
2220,
1068,
701,
0,
2385,
2603,
2420,
2597,
0
],
"distances": [
0,
4870,
5113,
17309,
4580,
0,
2078,
16983,
5306,
2688,
0,
15180,
19743,
14154,
14601,
0
]
}
"#;
| 18.08046 | 49 | 0.267006 |
83e4b872116e67f4047c4666533d0806d4e3f9e1 | 27,699 | rs | Rust | tests/known_cases.rs | arctic-hen7/bonnie | 6694d9e8d45e69ad74bf1326937eb711079a3223 | [
"MIT"
] | 41 | 2021-04-13T14:01:42.000Z | 2022-03-27T14:34:53.000Z | tests/known_cases.rs | arctic-hen7/bonnie | 6694d9e8d45e69ad74bf1326937eb711079a3223 | [
"MIT"
] | 23 | 2021-04-21T17:59:05.000Z | 2022-03-12T09:13:03.000Z | tests/known_cases.rs | arctic-hen7/bonnie | 6694d9e8d45e69ad74bf1326937eb711079a3223 | [
"MIT"
] | 4 | 2021-04-16T06:10:03.000Z | 2022-01-06T01:13:20.000Z | // Bonnie mostly follows a strategy of integration testing to mimc real usage
// This also significantly reduces the brittleness of tests
// Note that the commands specified in testing WILL ACTUALLY BE RUN, so change things here carefully!
// Commands epcified should `echo` their name so we trace them back and `exit` with some exit code
// This file handles manually-coded known cases
// All these tests are Linux-specific due to their OS-specific testing/shells (sorry!), they are marked as such for conditional compilation
use lib::{Config, BONNIE_VERSION};
// A testing utility that represents all Bonnie returns as the promise of an exit code
// This is modelled off the code in `main.rs` that actually runs Bonnie
// This takes an output, which will be a simple vector in testing
#[cfg(test)]
fn run_e2e_test(
cfg_str: &str,
prog_args: Vec<String>,
version: &str,
output: &mut impl std::io::Write,
) -> Result<i32, String> {
let cfg = Config::new(cfg_str)?.to_final(version, output)?;
let (command_to_run, command_name, relevant_args) = cfg.get_command_for_args(&prog_args)?;
let bone = command_to_run.prepare(&command_name, &relevant_args, &cfg.default_shell)?;
// We don't want it verbose, it'll be so anyway in development
let exit_code = bone.run(&command_name, false, output)?;
Ok(exit_code)
}
// A testing utility macro that allows us to expect an exit code to be returned
// This returns the output of the execution (warnings, command info, etc.) as a vector of lines
// The config string given here does not have to contain any version tag, that will be added
#[cfg(test)]
macro_rules! expect_exit_code {
($exit_code:literal, $raw_cfg_str:expr, $version:expr, [ $($arg:expr),+ ]) => {
{
// We define a vector that warnings and command information will be printed to
let mut output = Vec::new();
let prog_args = vec![$($arg.to_string()), +];
let cfg_str = "version = \"".to_string() + $version + "\"\n" + $raw_cfg_str;
let res = run_e2e_test(&cfg_str, prog_args, $version, &mut output);
assert_eq!(res, Ok($exit_code));
// We know this will only be filled with `u8` bytes, so we can safely call `.unwrap()`
let output_string = String::from_utf8(output).unwrap();
let output_lines: Vec<String> = output_string.lines().map(|x| x.to_string()).collect();
output_lines
};
}
}
// A testing utility macro that allows us to expect some error to be returned
// This returns the output of the execution (warnings, command info, etc.) as a vector of lines
// The config string given here does not have to contain any version tag, that will be added
// TODO after `error_chain` migration, test for specific errors here
#[cfg(test)]
macro_rules! expect_error {
($raw_cfg_str:expr, $version:expr, [ $($arg:expr),+ ]) => {
{
// We define a vector that warnings and command information will be printed to
let mut output = Vec::new();
let prog_args = vec![$($arg.to_string()), +];
let cfg_str = "version = \"".to_string() + $version + "\"\n" + $raw_cfg_str;
let res = run_e2e_test(&cfg_str, prog_args, $version, &mut output);
println!("{:#?}", res);
assert!(matches!(res, Err(_)));
// We know this will only be filled with `u8` bytes, so we can safely call `.unwrap()`
let output_string = String::from_utf8(output).unwrap();
let output_lines: Vec<String> = output_string.lines().map(|x| x.to_string()).collect();
output_lines
}
}
}
// A utility testing macro that asserts the ordered presence of a series of elements in a vector of strings
#[cfg(test)]
macro_rules! assert_contains_ordered {
($vec:expr, [ $($elem:expr),+ ]) => {
{
// Concatenate everything so we can easily assert order
let concat_vec = $vec.join(" | ");
let concat_checks = vec![$($elem.to_string()), +].join(" | ");
assert!(concat_vec.contains(&concat_checks))
}
}
}
// A utility testing macro that asserts the unordered presence of a series of elements in a vector of strings
#[cfg(test)]
macro_rules! assert_contains {
($vec:expr, [ $($elem:expr),+ ]) => {
{
let checks = vec![$($elem.to_string()), +];
let mut contains = false;
for check in checks.iter() {
if $vec.contains(check) {
// We only need
contains = true;
}
}
assert!(contains)
}
}
}
// This test suite tests all the major syntactic feature of Bonnie
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_kv_syntax() {
let output = expect_exit_code!(
0,
r#"
[scripts]
basic = "exit 0"
"#,
BONNIE_VERSION,
["basic"]
);
println!("{:#?}", output);
assert_contains!(output, ["sh, [\"-c\", \"exit 0\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux (uses the `USER` environment variable, the feature itself should be fine)
fn succeeds_with_env_var_interpolation() {
let output = expect_exit_code!(
0,
r#"
[scripts]
basic.cmd = "echo %USER && exit 0"
basic.env_vars = ["USER"]
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(
output,
["sh, [\"-c\", \"echo ".to_string() + &std::env::var("USER").unwrap() + " && exit 0\"]"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_arg_interpolation() {
let output = expect_exit_code!(
0,
r#"
[scripts]
basic.cmd = "echo %name && exit 0"
basic.args = ["name"]
"#,
BONNIE_VERSION,
["basic", "Name"]
);
assert_contains_ordered!(output, ["sh, [\"-c\", \"echo Name && exit 0\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn returns_error_on_too_few_args() {
expect_error!(
r#"
[scripts]
basic.cmd = "echo %name && exit 0"
basic.args = ["name"]
"#,
BONNIE_VERSION,
["basic"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_mass_arg_interpolation_and_no_args() {
let output = expect_exit_code!(
0,
r#"
[scripts]
basic = "echo %% && exit 0"
"#,
BONNIE_VERSION,
["basic"]
);
println!("{:?}", output);
assert_contains_ordered!(output, ["sh, [\"-c\", \"echo && exit 0\"]"]); // Note the extra space from concatenation
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_mass_arg_interpolation_and_one_arg() {
let output = expect_exit_code!(
0,
r#"
[scripts]
basic = "echo %% && exit 0"
"#,
BONNIE_VERSION,
["basic", "Test"]
);
assert_contains_ordered!(output, ["sh, [\"-c\", \"echo Test && exit 0\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_mass_arg_interpolation_and_many_args() {
let output = expect_exit_code!(
0,
r#"
[scripts]
basic = "echo %% && exit 0"
"#,
BONNIE_VERSION,
["basic", "foo", "bar"]
);
assert_contains_ordered!(output, ["sh, [\"-c\", \"echo foo bar && exit 0\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_mass_arg_interpolation_and_escaping() {
let output = expect_exit_code!(
0,
r#"
[scripts]
basic = "echo %% \\%% && exit 0"
"#,
BONNIE_VERSION,
["basic", "foo", "bar"]
);
assert_contains_ordered!(output, ["sh, [\"-c\", \"echo foo bar %% && exit 0\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_mass_arg_interpolation_and_specific_arg_interpolation() {
let output = expect_exit_code!(
0,
r#"
[scripts]
basic.cmd = "echo %name %% && exit 0"
basic.args = ["name"]
"#,
BONNIE_VERSION,
["basic", "Name", "foo", "bar"]
);
assert_contains_ordered!(output, ["sh, [\"-c\", \"echo Name foo bar && exit 0\"]"]);
}
// This test is dependent on the contents of `.env`
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn loads_env_files() {
let output = expect_exit_code!(
0,
r#"
env_files = ["src/.env"]
[scripts]
basic.cmd = "echo %SHORTGREETING && exit 0"
basic.env_vars = ["SHORTGREETING"]
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(output, ["sh, [\"-c\", \"echo Hello && exit 0\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn returns_error_on_nonexistent_env_file() {
expect_error!(
r#"
env_files = ["src/.ennv"] # Misspelt this line
[scripts]
basic.cmd = "echo %SHORTGREETING && exit 0"
basic.env_vars = ["SHORTGREETING"]
"#,
BONNIE_VERSION,
["basic"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn returns_error_on_invalid_env_file() {
expect_error!(
r#"
env_files = ["src/.env.invalid"] # This file contains an uninclosed ' ', and is thus invalid
[scripts]
basic.cmd = "echo %INVALID_VAR && exit 0"
basic.env_vars = ["INVALID_VAR"]
"#,
BONNIE_VERSION,
["basic"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_full_interpolation() {
let output = expect_exit_code!(
0,
r#"
env_files = ["src/.env"]
[scripts]
basic.cmd = "echo \"%SHORTGREETING %name %%\" && exit 0"
basic.args = ["name"]
basic.env_vars = ["SHORTGREETING"]
"#,
BONNIE_VERSION,
["basic", "Name", "(extra stuff)"]
);
assert_contains_ordered!(
output,
["sh, [\"-c\", \"echo \\\"Hello Name (extra stuff)\\\" && exit 0\"]"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_multistage() {
let output = expect_exit_code!(
1,
r#"
[scripts]
basic = ["(exit 0)", "exit 1"]
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(output, ["sh, [\"-c\", \"(exit 0) && exit 1\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_multistage_with_interpolation() {
let output = expect_exit_code!(
1,
r#"
env_files = ["src/.env"]
[scripts]
basic.cmd = [
"echo %SHORTGREETING %%",
"echo %name && exit 1"
]
basic.args = ["name"]
basic.env_vars = ["SHORTGREETING"]
"#,
BONNIE_VERSION,
["basic", "Name", "foo", "bar"]
);
assert_contains_ordered!(
output,
["sh, [\"-c\", \"echo Hello foo bar && echo Name && exit 1\"]"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_kv_unordered_subcommands() {
let cfg = r#"
[scripts]
basic.subcommands.test = "exit 0"
basic.subcommands.other = "exit 1"
"#;
let output1 = expect_exit_code!(0, cfg, BONNIE_VERSION, ["basic", "test"]);
assert_contains_ordered!(output1, ["sh, [\"-c\", \"exit 0\"]"]);
let output2 = expect_exit_code!(1, cfg, BONNIE_VERSION, ["basic", "other"]);
assert_contains_ordered!(output2, ["sh, [\"-c\", \"exit 1\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_multistage_and_interpolation_unordered_subcommands() {
let cfg = r#"
env_files = ["src/.env"]
[scripts]
basic.subcommands.test.cmd = [
"echo %SHORTGREETING %%",
"echo %name && exit 1"
]
basic.subcommands.test.args = ["name"]
basic.subcommands.test.env_vars = ["SHORTGREETING"]
basic.subcommands.other = "exit 1"
"#;
let output1 = expect_exit_code!(1, cfg, BONNIE_VERSION, ["basic", "test", "Name", "foo bar"]);
assert_contains_ordered!(
output1,
["sh, [\"-c\", \"echo Hello foo bar && echo Name && exit 1\"]"]
);
let output2 = expect_exit_code!(1, cfg, BONNIE_VERSION, ["basic", "other"]);
assert_contains_ordered!(output2, ["sh, [\"-c\", \"exit 1\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_root_cmd_for_unordered_subcommands() {
let cfg = r#"
[scripts]
basic.cmd = "exit 0"
basic.subcommands.test = "exit 1"
basic.subcommands.other = "exit 2"
"#;
let root_output = expect_exit_code!(0, cfg, BONNIE_VERSION, ["basic"]);
assert_contains_ordered!(root_output, ["sh, [\"-c\", \"exit 0\"]"]);
let output1 = expect_exit_code!(1, cfg, BONNIE_VERSION, ["basic", "test"]);
assert_contains_ordered!(output1, ["sh, [\"-c\", \"exit 1\"]"]);
let output2 = expect_exit_code!(2, cfg, BONNIE_VERSION, ["basic", "other"]);
assert_contains_ordered!(output2, ["sh, [\"-c\", \"exit 2\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn returns_error_on_missing_cmd() {
expect_error!(
r#"
[scripts]
basic.args = ["name"]
"#,
BONNIE_VERSION,
["basic", "Name"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_os_specific_kv_cmd() {
let output = expect_exit_code!(
0,
r#"
[scripts]
basic.cmd.generic = "exit 1"
basic.cmd.targets.linux = "exit 0"
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(output, ["sh, [\"-c\", \"exit 0\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_os_specific_multistage_and_interpolation_cmd() {
let output = expect_exit_code!(
1,
r#"
env_files = ["src/.env"]
[scripts]
basic.cmd.generic = "exit 2"
basic.cmd.targets.linux = [
"echo %SHORTGREETING %%",
"echo %name && exit 1"
]
basic.args = ["name"]
basic.env_vars = ["SHORTGREETING"]
"#,
BONNIE_VERSION,
["basic", "Name", "foo", "bar"]
);
println!("{:?}", output);
assert_contains_ordered!(
output,
["sh, [\"-c\", \"echo Hello foo bar && echo Name && exit 1\"]"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_custom_shell() {
let output = expect_exit_code!(
0,
r#"
[scripts]
basic.cmd.exec = "exit 0"
basic.cmd.shell = ["bash", "-c", "{COMMAND}"]
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(output, ["bash, [\"-c\", \"exit 0\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_custom_shell_with_delimiter() {
let output = expect_exit_code!(
0,
r#"
[scripts]
basic.cmd.exec = "exit 0"
basic.cmd.shell = { parts = ["bash", "-c", "{COMMAND}"], delimiter = " && " }
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(output, ["bash, [\"-c\", \"exit 0\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_custom_shell_and_os_specificity_and_multistage_and_interpolation() {
let output = expect_exit_code!(
1,
r#"
env_files = ["src/.env"]
[scripts]
basic.cmd.generic = "exit 2"
basic.cmd.targets.linux.exec = [
"echo %SHORTGREETING %%",
"echo %name && exit 1"
]
basic.cmd.targets.linux.shell = ["bash", "-c", "{COMMAND}"]
basic.args = ["name"]
basic.env_vars = ["SHORTGREETING"]
"#,
BONNIE_VERSION,
["basic", "Name", "foo", "bar"]
);
println!("{:?}", output);
assert_contains_ordered!(
output,
["bash, [\"-c\", \"echo Hello foo bar && echo Name && exit 1\"]"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn returns_error_if_generic_os_specifier_not_given() {
expect_error!(
r#"
[scripts]
basic.cmd.targets.linux = "exit 0"
"#,
BONNIE_VERSION,
["basic"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn uses_simple_default_shell() {
let output = expect_exit_code!(
0,
r#"
default_shell = ["bash", "-c", "{COMMAND}"]
[scripts]
basic = "exit 0"
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(output, ["bash, [\"-c\", \"exit 0\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn uses_generic_default_shell() {
let output = expect_exit_code!(
0,
r#"
default_shell.generic = ["bash", "-c", "{COMMAND}"]
[scripts]
basic = "exit 0"
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(output, ["bash, [\"-c\", \"exit 0\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn uses_generic_default_shell_with_delimiter() {
let output = expect_exit_code!(
0,
r#"
default_shell.generic = { parts = ["bash", "-c", "{COMMAND}"], delimiter = " && " }
[scripts]
basic = "exit 0"
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(output, ["bash, [\"-c\", \"exit 0\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn uses_os_specific_default_shell() {
let output = expect_exit_code!(
0,
r#"
default_shell.generic = ["sh", "-c", "{COMMAND}"]
default_shell.targets.linux = ["bash", "-c", "{COMMAND}"]
[scripts]
basic = "exit 0"
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(output, ["bash, [\"-c\", \"exit 0\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_kv_simple_ordered_subcommands() {
let output = expect_exit_code!(
0,
r#"
[scripts]
basic.subcommands.test = "exit 0"
basic.subcommands.other = "exit 1"
basic.order = "test"
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(output, ["sh, [\"-c\", \"exit 0\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_kv_complex_ordered_subcommands() {
let output = expect_exit_code!(
1,
r#"
[scripts]
basic.subcommands.test = "exit 0"
basic.subcommands.other = "exit 1"
basic.order = """
test {
Any => other
}
"""
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(
output,
["sh, [\"-c\", \"exit 0\"]", "sh, [\"-c\", \"exit 1\"]"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn returns_error_on_non_global_args_for_ordered_subcommands() {
expect_error!(
r#"
[scripts]
basic.subcommands.test = "echo %name && exit 0"
basic.subcommands.test.args = ["name"] # This has to be `basic.args` instead
basic.subcommands.other = "exit 1"
basic.order = """
test {
Any => other
}
"""
"#,
BONNIE_VERSION,
["basic"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn returns_error_on_unordered_nesting_in_order() {
expect_error!(
r#"
[scripts]
basic.subcommands.test = "echo %name && exit 0"
basic.subcommands.test.args = ["name"] # This has to be `basic.args` instead
basic.subcommands.other = "exit 1"
basic.subcommands.nested.subcommands.test = "exit 0"
basic.subcommands.nested.subcommands.other = "exit 1"
basic.order = """
test {
Any => other
}
"""
"#,
BONNIE_VERSION,
["basic"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn returns_error_on_cmd_and_ordered_subcommands() {
expect_error!(
r#"
[scripts]
basic.cmd = "exit 0"
basic.subcommands.test = "exit 0"
basic.subcommands.other = "exit 1"
basic.order = """
test {
Any => other
}
"""
"#,
BONNIE_VERSION,
["basic"]
);
}
// This test should basically represent the most complex use-case of Bonnie in terms of syntax
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_everything() {
let output = expect_exit_code!(
1,
r#"
env_files = ["src/.env"]
default_env.generic = ["sh", "-c", "{COMMAND}"]
default_env.targets.linux = ["bash", "-c", "{COMMAND}"]
[scripts]
basic.subcommands.test.cmd.generic = "exit 5"
basic.subcommands.test.cmd.targets.linux.exec = [
"echo %SHORTGREETING %%",
"echo %name && exit 1"
]
basic.subcommands.test.env_vars = ["SHORTGREETING"]
basic.subcommands.test.cmd.targets.linux.shell = ["sh", "-c", "{COMMAND}"]
basic.subcommands.nested.subcommands.test = "exit 2"
basic.subcommands.nested.subcommands.other = "exit 3"
basic.subcommands.nested.order = """
test {
Any => other
}
"""
basic.args = ["name"]
basic.order = """
test {
Any => nested {
Any => test
}
}
"""
"#,
BONNIE_VERSION,
["basic", "Name", "foo", "bar"]
);
println!("{:?}", output);
assert_contains_ordered!(
output,
[
"sh, [\"-c\", \"echo Hello foo bar && echo Name && exit 1\"]",
"sh, [\"-c\", \"exit 2\"]",
"sh, [\"-c\", \"exit 3\"]",
"sh, [\"-c\", \"echo Hello foo bar && echo Name && exit 1\"]"
]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_success_failure_order_control() {
let output1 = expect_exit_code!(
1,
r#"
[scripts]
basic.subcommands.test = "exit 0"
basic.subcommands.other = "exit 1"
basic.order = """
test {
Success => other
}
"""
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(
output1,
["sh, [\"-c\", \"exit 0\"]", "sh, [\"-c\", \"exit 1\"]"]
);
let output2 = expect_exit_code!(
0,
r#"
[scripts]
basic.subcommands.test = "exit 1"
basic.subcommands.other = "exit 0"
basic.order = """
test {
Failure => other
}
"""
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(
output2,
["sh, [\"-c\", \"exit 1\"]", "sh, [\"-c\", \"exit 0\"]"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_exit_code_order_control() {
let output1 = expect_exit_code!(
1,
r#"
[scripts]
basic.subcommands.test = "exit 0"
basic.subcommands.other = "exit 1"
basic.order = """
test {
0 => other
}
"""
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(
output1,
["sh, [\"-c\", \"exit 0\"]", "sh, [\"-c\", \"exit 1\"]"]
);
let output2 = expect_exit_code!(
0,
r#"
[scripts]
basic.subcommands.test = "exit 1"
basic.subcommands.other = "exit 0"
basic.order = """
test {
1 => other
}
"""
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(
output2,
["sh, [\"-c\", \"exit 1\"]", "sh, [\"-c\", \"exit 0\"]"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_not_exit_code_order_control() {
let output1 = expect_exit_code!(
1,
r#"
[scripts]
basic.subcommands.test = "exit 0"
basic.subcommands.other = "exit 1"
basic.order = """
test {
!1 => other
}
"""
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(
output1,
["sh, [\"-c\", \"exit 0\"]", "sh, [\"-c\", \"exit 1\"]"]
);
let output2 = expect_exit_code!(
0,
r#"
[scripts]
basic.subcommands.test = "exit 1"
basic.subcommands.other = "exit 0"
basic.order = """
test {
!0 => other
}
"""
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(
output2,
["sh, [\"-c\", \"exit 1\"]", "sh, [\"-c\", \"exit 0\"]"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_any_none_order_control() {
let output1 = expect_exit_code!(
1,
r#"
[scripts]
basic.subcommands.test = "exit 0"
basic.subcommands.other = "exit 1"
basic.order = """
test {
Any => other
}
"""
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(
output1,
["sh, [\"-c\", \"exit 0\"]", "sh, [\"-c\", \"exit 1\"]"]
);
let output2 = expect_exit_code!(
1,
r#"
[scripts]
basic.subcommands.test = "exit 1"
basic.subcommands.other = "exit 0"
basic.order = """
test {
None => other
}
"""
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(output2, ["sh, [\"-c\", \"exit 1\"]"]);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_union_order_control() {
let output = expect_exit_code!(
1,
r#"
[scripts]
basic.subcommands.test = "exit 0"
basic.subcommands.other = "exit 1"
basic.order = """
test {
0|Success|2 => other
}
"""
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(
output,
["sh, [\"-c\", \"exit 0\"]", "sh, [\"-c\", \"exit 1\"]"]
);
}
#[test]
#[cfg(target_os = "linux")] // This test will only work on Linux
fn succeeds_with_intersection_order_control() {
let output = expect_exit_code!(
1,
r#"
[scripts]
basic.subcommands.test = "exit 0"
basic.subcommands.other = "exit 1"
basic.order = """
test {
0+Success => other
}
"""
"#,
BONNIE_VERSION,
["basic"]
);
assert_contains_ordered!(
output,
["sh, [\"-c\", \"exit 0\"]", "sh, [\"-c\", \"exit 1\"]"]
);
}
| 29.719957 | 139 | 0.538034 |
165abf1dc0de9fdde33355d69f5e684626cf9307 | 1,313 | h | C | Headers/_SBIconWallpaperBackgroundProvider.h | MoTheNerd/oledlock | b1a79668a8f31d0c8cdfea11e14d5dc19654f380 | [
"MIT"
] | null | null | null | Headers/_SBIconWallpaperBackgroundProvider.h | MoTheNerd/oledlock | b1a79668a8f31d0c8cdfea11e14d5dc19654f380 | [
"MIT"
] | null | null | null | Headers/_SBIconWallpaperBackgroundProvider.h | MoTheNerd/oledlock | b1a79668a8f31d0c8cdfea11e14d5dc19654f380 | [
"MIT"
] | 1 | 2018-03-05T19:20:57.000Z | 2018-03-05T19:20:57.000Z | //
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by Steve Nygard.
//
#import "NSObject.h"
#import "SBWallpaperObserver.h"
#import "_UISettingsKeyObserver.h"
@class NSHashTable, NSString, SBIconColorSettings;
@interface _SBIconWallpaperBackgroundProvider : NSObject <_UISettingsKeyObserver, SBWallpaperObserver>
{
SBIconColorSettings *_colorSettings;
NSHashTable *_clients;
struct CGImage *_blurImage;
struct CGColor *_solidColor;
}
+ (id)sharedInstance;
- (void).cxx_destruct;
- (void)_updateBackgrounds;
- (void)_updateBlurForClient:(id)arg1;
- (void)_updateClient:(id)arg1;
- (void)_updateAllClients;
- (void)wallpaperGeometryDidChangeForVariant:(long long)arg1;
- (void)wallpaperLegibilitySettingsDidChange:(id)arg1 forVariant:(long long)arg2;
- (void)wallpaperDidChangeForVariant:(long long)arg1;
- (void)settings:(id)arg1 changedValueForKey:(id)arg2;
- (void)noteClientWallpaperRelativeBoundsDidChange:(id)arg1;
- (void)removeClient:(id)arg1;
- (void)addClient:(id)arg1;
- (void)dealloc;
- (id)init;
// Remaining properties
@property(readonly, copy) NSString *debugDescription;
@property(readonly, copy) NSString *description;
@property(readonly) unsigned long long hash;
@property(readonly) Class superclass;
@end
| 28.543478 | 102 | 0.765423 |
f01e36c7e52b2f29e3153f9812f722135e5763dd | 2,483 | py | Python | Curso em Video/D_045.py | tonmarcondes/UNIVESP | a66a623d4811e8f3f9e2999f09e38a4470035ae2 | [
"MIT"
] | null | null | null | Curso em Video/D_045.py | tonmarcondes/UNIVESP | a66a623d4811e8f3f9e2999f09e38a4470035ae2 | [
"MIT"
] | null | null | null | Curso em Video/D_045.py | tonmarcondes/UNIVESP | a66a623d4811e8f3f9e2999f09e38a4470035ae2 | [
"MIT"
] | null | null | null | import random
cor = {
'fim':'\033[m',
'amarelo':'\033[1;033m',
'vermelho':'\033[1;031m',
'vermelhof':'\033[7;031m',
'azul':'\033[1;034m',
'verde':'\033[1;32m',
'verdef':'\033[7;32m',
'branco':'\033[1;030m'
}
print('''
Escolha uma das opções abaixo:
\t {}1{} {}PEDRA{}:
\t {}2{} {}PAPEL{}:
\t {}3{} {}TESOURA{}:'''.format(
cor['vermelho'], cor['fim'], cor['azul'], cor['fim'],
cor['vermelho'], cor['fim'], cor['azul'], cor['fim'],
cor['vermelho'], cor['fim'], cor['azul'], cor['fim']
))
eu = int(input('\t '))
if eu == 1:
me = 'PEDRA'
elif eu == 2:
me = 'PAPEL'
else:
me = 'TESOURA'
pc = ['PEDRA', 'PAPEL', 'TESOURA']
random.shuffle(pc)
if eu < 1 or eu > 3:
print('\n\t\t{}ESCOLHA UM VALOR VÁLIDO{}\n'.format(cor['vermelho'], cor['fim']))
elif eu == 1 and pc[0] == 'PEDRA' or eu == 2 and pc[0] == 'PAPEL' or eu == 3 and pc[0] == 'TESOURA':
print('{}EU{}: {}\t\t{}PC{}: {}'.format(cor['vermelho'], cor['fim'], me, cor['vermelho'], cor['fim'], pc[0]))
print('{} EMPATE, JOGUE OUTRA VEZ {}\n'.format(cor['vermelhof'], cor['fim']))
elif eu == 1 and pc[0] == 'PAPEL':
print('{}EU{}: {}\t\t{}PC{}: {}'.format(cor['vermelho'], cor['fim'], me, cor['vermelho'], cor['fim'], pc[0]))
print('PAPEL {}EMBRULHA{} PEDRA\n'.format(cor['amarelo'], cor['fim']))
elif eu == 1 and pc[0] == 'PAPEL':
print('{}EU{}: {}\t\t{}PC{}: {}'.format(cor['vermelho'], cor['fim'], me, cor['vermelho'], cor['fim'], pc[0]))
print('PEDRA {}QUEBRA{} TESOURA\n'.format(cor['amarelo'], cor['fim']))
elif eu == 2 and pc[0] == 'PEDRA':
print('{}EU{}: {}\t\t{}PC{}: {}'.format(cor['vermelho'], cor['fim'], me, cor['vermelho'], cor['fim'], pc[0]))
print('PAPEL {}EMBRULHA{} PEDRA\n'.format(cor['amarelo'], cor['fim']))
elif eu == 2 and pc[0] == 'TESOURA':
print('{}EU{}: {}\t\t{}PC{}: {}'.format(cor['vermelho'], cor['fim'], me, cor['vermelho'], cor['fim'], pc[0]))
print('TESOURA {}CORTA{} PAPEL\n'.format(cor['amarelo'], cor['fim']))
elif eu == 3 and pc[0] == 'PEDRA':
print('{}EU{}: {}\t\t{}PC{}: {}'.format(cor['vermelho'], cor['fim'], me, cor['vermelho'], cor['fim'], pc[0]))
print('PEDRA {}QUEBRA{} TESOURA\n'.format(cor['amarelo'], cor['fim']))
else:
print('{}EU{}: {}\t\t{}PC{}: {}'.format(cor['vermelho'], cor['fim'], me, cor['vermelho'], cor['fim'], pc[0]))
print('TESOURA {}CORTA{} PAPEL\n'.format(cor['amarelo'], cor['fim']))
| 42.084746 | 114 | 0.515103 |
f06bb4aa4ae144cd78a4c0d281b10b1b55cacb4b | 865 | js | JavaScript | src/main/resources/static/app-directives.js | bjornlindstrom/javawebapp | a0e4ff0f39a14e5294db160c494486426c6f6e96 | [
"MIT"
] | null | null | null | src/main/resources/static/app-directives.js | bjornlindstrom/javawebapp | a0e4ff0f39a14e5294db160c494486426c6f6e96 | [
"MIT"
] | null | null | null | src/main/resources/static/app-directives.js | bjornlindstrom/javawebapp | a0e4ff0f39a14e5294db160c494486426c6f6e96 | [
"MIT"
] | null | null | null | 'use strict';
angular.module('appDirectives', [])
.directive('navMenu', function($location) {
return {
restrict:'E',
templateUrl: '/components/nav-menu.html',
scope: false,
link: function (scope, element) {
function setActive() {
var path = $location.path();
if (path) {
angular.forEach(element.find('li'), function (li) {
var anchor = li.querySelector('a');
if (anchor.href.match('#' + path + '(?=\\?|$)')) {
angular.element(li).addClass('active');
} else {
angular.element(li).removeClass('active');
}
});
}
}
setActive();
scope.$on('$locationChangeSuccess', setActive);
}
};
}); | 28.833333 | 72 | 0.446243 |
5b26e77017920a89cfb122390ba0ec44a43f78a3 | 189 | sql | SQL | public/dsscript/create_db.sql | duncanssmith/dart | c6c233d9b32d9ac4bf799516f38a4eb44635043e | [
"MIT"
] | null | null | null | public/dsscript/create_db.sql | duncanssmith/dart | c6c233d9b32d9ac4bf799516f38a4eb44635043e | [
"MIT"
] | null | null | null | public/dsscript/create_db.sql | duncanssmith/dart | c6c233d9b32d9ac4bf799516f38a4eb44635043e | [
"MIT"
] | null | null | null | drop database dart;
create database dart;
create user 'dart'@'localhost' identified by 'sienna';
grant all privileges on dart.* to 'dart'@'localhost' with grant option;
flush privileges;
| 23.625 | 71 | 0.761905 |
3f584348f86007940b303e786f4ae32cbf238cd5 | 683 | swift | Swift | Demo/Source/GetUserPosts.swift | UpBra/Winkie | 8b73a0ff4bcb16b9f7c8e66c73ea563f8f0431df | [
"MIT"
] | null | null | null | Demo/Source/GetUserPosts.swift | UpBra/Winkie | 8b73a0ff4bcb16b9f7c8e66c73ea563f8f0431df | [
"MIT"
] | 5 | 2020-03-17T19:44:15.000Z | 2020-03-27T14:48:01.000Z | Demo/Source/GetUserPosts.swift | UpBra/Winkie | 8b73a0ff4bcb16b9f7c8e66c73ea563f8f0431df | [
"MIT"
] | null | null | null | // —————————————————————————————————————————————————————————————————————————
//
// GetUserPosts.swift
// Copyright © 2019 gleesh. All rights reserved.
//
// —————————————————————————————————————————————————————————————————————————
import Winkie
enum Endpoint {
enum Placeholder {
static let users = "https://jsonplaceholder.typicode.com/posts"
}
}
struct PostResponse: Codable {
let userId: Int
let id: Int
let title: String
let body: String
}
struct PostsRequest: NetworkRequest {
typealias ResultType = [PostResponse]
var request: URLRequest?
init(string: String) {
guard let url = URL(string: string) else { return }
request = URLRequest(url: url)
}
}
| 17.512821 | 76 | 0.544656 |
917c88b6e4e70b0ce13ae062a5023c30f5042fb1 | 236 | lua | Lua | test-configs/sqlite3.lua | LuaDist-testing/sqltable | 3f5e42b998fd20103f7abd9fda2dc4f269f29654 | [
"MIT"
] | null | null | null | test-configs/sqlite3.lua | LuaDist-testing/sqltable | 3f5e42b998fd20103f7abd9fda2dc4f269f29654 | [
"MIT"
] | null | null | null | test-configs/sqlite3.lua | LuaDist-testing/sqltable | 3f5e42b998fd20103f7abd9fda2dc4f269f29654 | [
"MIT"
] | null | null | null | #!/usr/bin/env lua
return {
connection = {
type = 'SQLite3',
name = 'sqlite3-test',
},
table1_vendor = {
booleans = { 'flag1', 'flag2' }
},
table2_vendor = {
},
table3_vendor = {
booleans = { 'flag1', 'flag2' }
}
}
| 11.238095 | 33 | 0.54661 |
bcb87df886ced5734ff688085cdcedc23b03af10 | 97,054 | sql | SQL | src/allmznew20130315.sql | hangtoo/Tools | 6a35ab27cba075bd3feecd3c103de7f723cec109 | [
"Apache-2.0"
] | null | null | null | src/allmznew20130315.sql | hangtoo/Tools | 6a35ab27cba075bd3feecd3c103de7f723cec109 | [
"Apache-2.0"
] | null | null | null | src/allmznew20130315.sql | hangtoo/Tools | 6a35ab27cba075bd3feecd3c103de7f723cec109 | [
"Apache-2.0"
] | null | null | null | //////////////sms
select count(distinct(called)),min(send_time),
sum(if(send_time >='2013-03-15 00:00:00' and send_time<'2013-03-15 01:00:00',1,0)) w0,
sum(if(send_time >='2013-03-15 01:00:00' and send_time<'2013-03-15 02:00:00',1,0)) w1,
sum(if(send_time >='2013-03-15 02:00:00' and send_time<'2013-03-15 03:00:00',1,0)) w2,
sum(if(send_time >='2013-03-15 03:00:00' and send_time<'2013-03-15 04:00:00',1,0)) w3,
sum(if(send_time >='2013-03-15 04:00:00' and send_time<'2013-03-15 05:00:00',1,0)) w4,
sum(if(send_time >='2013-03-15 05:00:00' and send_time<'2013-03-15 06:00:00',1,0)) w5,
sum(if(send_time >='2013-03-15 06:00:00' and send_time<'2013-03-15 07:00:00',1,0)) w6,
sum(if(send_time >='2013-03-15 07:00:00' and send_time<'2013-03-15 08:00:00',1,0)) w7,
sum(if(send_time >='2013-03-15 08:00:00' and send_time<'2013-03-15 09:00:00',1,0)) w8,
sum(if(send_time >='2013-03-15 09:00:00' and send_time<'2013-03-15 10:00:00',1,0)) w9,
sum(if(send_time >='2013-03-15 10:00:00' and send_time<'2013-03-15 11:00:00',1,0)) w10,
sum(if(send_time >='2013-03-15 11:00:00' and send_time<'2013-03-15 12:00:00',1,0)) w11,
sum(if(send_time >='2013-03-15 12:00:00' and send_time<'2013-03-15 13:00:00',1,0)) w12,
sum(if(send_time >='2013-03-15 13:00:00' and send_time<'2013-03-15 14:00:00',1,0)) w13,
sum(if(send_time >='2013-03-15 14:00:00' and send_time<'2013-03-15 15:00:00',1,0)) w14,
sum(if(send_time >='2013-03-15 15:00:00' and send_time<'2013-03-15 16:00:00',1,0)) w15,
sum(if(send_time >='2013-03-15 16:00:00' and send_time<'2013-03-15 17:00:00',1,0)) w16,
sum(if(send_time >='2013-03-15 17:00:00' and send_time<'2013-03-15 18:00:00',1,0)) w17,
sum(if(send_time >='2013-03-15 18:00:00' and send_time<'2013-03-15 19:00:00',1,0)) w18,
sum(if(send_time >='2013-03-15 19:00:00' and send_time<'2013-03-15 20:00:00',1,0)) w19,
sum(if(send_time >='2013-03-15 20:00:00' and send_time<'2013-03-15 21:00:00',1,0)) w20,
sum(if(send_time >='2013-03-15 21:00:00' and send_time<'2013-03-15 22:00:00',1,0)) w21,
sum(if(send_time >='2013-03-15 22:00:00' and send_time<'2013-03-15 23:00:00',1,0)) w22,
sum(if(send_time >='2013-03-15 23:00:00' and send_time<'2013-03-15 24:00:00',1,0)) w23
from tbl_sms_log where send_time >'2013-03-15' and send_time<='2013-03-15 24:00:00' and send_type!='2'
union all select count(distinct(called)),min(send_time),
sum(if(send_time >='2013-03-16 00:00:00' and send_time<'2013-03-16 01:00:00',1,0)) w0,
sum(if(send_time >='2013-03-16 01:00:00' and send_time<'2013-03-16 02:00:00',1,0)) w1,
sum(if(send_time >='2013-03-16 02:00:00' and send_time<'2013-03-16 03:00:00',1,0)) w2,
sum(if(send_time >='2013-03-16 03:00:00' and send_time<'2013-03-16 04:00:00',1,0)) w3,
sum(if(send_time >='2013-03-16 04:00:00' and send_time<'2013-03-16 05:00:00',1,0)) w4,
sum(if(send_time >='2013-03-16 05:00:00' and send_time<'2013-03-16 06:00:00',1,0)) w5,
sum(if(send_time >='2013-03-16 06:00:00' and send_time<'2013-03-16 07:00:00',1,0)) w6,
sum(if(send_time >='2013-03-16 07:00:00' and send_time<'2013-03-16 08:00:00',1,0)) w7,
sum(if(send_time >='2013-03-16 08:00:00' and send_time<'2013-03-16 09:00:00',1,0)) w8,
sum(if(send_time >='2013-03-16 09:00:00' and send_time<'2013-03-16 10:00:00',1,0)) w9,
sum(if(send_time >='2013-03-16 10:00:00' and send_time<'2013-03-16 11:00:00',1,0)) w10,
sum(if(send_time >='2013-03-16 11:00:00' and send_time<'2013-03-16 12:00:00',1,0)) w11,
sum(if(send_time >='2013-03-16 12:00:00' and send_time<'2013-03-16 13:00:00',1,0)) w12,
sum(if(send_time >='2013-03-16 13:00:00' and send_time<'2013-03-16 14:00:00',1,0)) w13,
sum(if(send_time >='2013-03-16 14:00:00' and send_time<'2013-03-16 15:00:00',1,0)) w14,
sum(if(send_time >='2013-03-16 15:00:00' and send_time<'2013-03-16 16:00:00',1,0)) w15,
sum(if(send_time >='2013-03-16 16:00:00' and send_time<'2013-03-16 17:00:00',1,0)) w16,
sum(if(send_time >='2013-03-16 17:00:00' and send_time<'2013-03-16 18:00:00',1,0)) w17,
sum(if(send_time >='2013-03-16 18:00:00' and send_time<'2013-03-16 19:00:00',1,0)) w18,
sum(if(send_time >='2013-03-16 19:00:00' and send_time<'2013-03-16 20:00:00',1,0)) w19,
sum(if(send_time >='2013-03-16 20:00:00' and send_time<'2013-03-16 21:00:00',1,0)) w20,
sum(if(send_time >='2013-03-16 21:00:00' and send_time<'2013-03-16 22:00:00',1,0)) w21,
sum(if(send_time >='2013-03-16 22:00:00' and send_time<'2013-03-16 23:00:00',1,0)) w22,
sum(if(send_time >='2013-03-16 23:00:00' and send_time<'2013-03-16 24:00:00',1,0)) w23
from tbl_sms_log where send_time >'2013-03-16' and send_time<='2013-03-16 24:00:00' and send_type!='2'
union all select count(distinct(called)),min(send_time),
sum(if(send_time >='2013-03-17 00:00:00' and send_time<'2013-03-17 01:00:00',1,0)) w0,
sum(if(send_time >='2013-03-17 01:00:00' and send_time<'2013-03-17 02:00:00',1,0)) w1,
sum(if(send_time >='2013-03-17 02:00:00' and send_time<'2013-03-17 03:00:00',1,0)) w2,
sum(if(send_time >='2013-03-17 03:00:00' and send_time<'2013-03-17 04:00:00',1,0)) w3,
sum(if(send_time >='2013-03-17 04:00:00' and send_time<'2013-03-17 05:00:00',1,0)) w4,
sum(if(send_time >='2013-03-17 05:00:00' and send_time<'2013-03-17 06:00:00',1,0)) w5,
sum(if(send_time >='2013-03-17 06:00:00' and send_time<'2013-03-17 07:00:00',1,0)) w6,
sum(if(send_time >='2013-03-17 07:00:00' and send_time<'2013-03-17 08:00:00',1,0)) w7,
sum(if(send_time >='2013-03-17 08:00:00' and send_time<'2013-03-17 09:00:00',1,0)) w8,
sum(if(send_time >='2013-03-17 09:00:00' and send_time<'2013-03-17 10:00:00',1,0)) w9,
sum(if(send_time >='2013-03-17 10:00:00' and send_time<'2013-03-17 11:00:00',1,0)) w10,
sum(if(send_time >='2013-03-17 11:00:00' and send_time<'2013-03-17 12:00:00',1,0)) w11,
sum(if(send_time >='2013-03-17 12:00:00' and send_time<'2013-03-17 13:00:00',1,0)) w12,
sum(if(send_time >='2013-03-17 13:00:00' and send_time<'2013-03-17 14:00:00',1,0)) w13,
sum(if(send_time >='2013-03-17 14:00:00' and send_time<'2013-03-17 15:00:00',1,0)) w14,
sum(if(send_time >='2013-03-17 15:00:00' and send_time<'2013-03-17 16:00:00',1,0)) w15,
sum(if(send_time >='2013-03-17 16:00:00' and send_time<'2013-03-17 17:00:00',1,0)) w16,
sum(if(send_time >='2013-03-17 17:00:00' and send_time<'2013-03-17 18:00:00',1,0)) w17,
sum(if(send_time >='2013-03-17 18:00:00' and send_time<'2013-03-17 19:00:00',1,0)) w18,
sum(if(send_time >='2013-03-17 19:00:00' and send_time<'2013-03-17 20:00:00',1,0)) w19,
sum(if(send_time >='2013-03-17 20:00:00' and send_time<'2013-03-17 21:00:00',1,0)) w20,
sum(if(send_time >='2013-03-17 21:00:00' and send_time<'2013-03-17 22:00:00',1,0)) w21,
sum(if(send_time >='2013-03-17 22:00:00' and send_time<'2013-03-17 23:00:00',1,0)) w22,
sum(if(send_time >='2013-03-17 23:00:00' and send_time<'2013-03-17 24:00:00',1,0)) w23
from tbl_sms_log where send_time >'2013-03-17' and send_time<='2013-03-17 24:00:00' and send_type!='2'
union all select count(distinct(called)),min(send_time),
sum(if(send_time >='2013-03-18 00:00:00' and send_time<'2013-03-18 01:00:00',1,0)) w0,
sum(if(send_time >='2013-03-18 01:00:00' and send_time<'2013-03-18 02:00:00',1,0)) w1,
sum(if(send_time >='2013-03-18 02:00:00' and send_time<'2013-03-18 03:00:00',1,0)) w2,
sum(if(send_time >='2013-03-18 03:00:00' and send_time<'2013-03-18 04:00:00',1,0)) w3,
sum(if(send_time >='2013-03-18 04:00:00' and send_time<'2013-03-18 05:00:00',1,0)) w4,
sum(if(send_time >='2013-03-18 05:00:00' and send_time<'2013-03-18 06:00:00',1,0)) w5,
sum(if(send_time >='2013-03-18 06:00:00' and send_time<'2013-03-18 07:00:00',1,0)) w6,
sum(if(send_time >='2013-03-18 07:00:00' and send_time<'2013-03-18 08:00:00',1,0)) w7,
sum(if(send_time >='2013-03-18 08:00:00' and send_time<'2013-03-18 09:00:00',1,0)) w8,
sum(if(send_time >='2013-03-18 09:00:00' and send_time<'2013-03-18 10:00:00',1,0)) w9,
sum(if(send_time >='2013-03-18 10:00:00' and send_time<'2013-03-18 11:00:00',1,0)) w10,
sum(if(send_time >='2013-03-18 11:00:00' and send_time<'2013-03-18 12:00:00',1,0)) w11,
sum(if(send_time >='2013-03-18 12:00:00' and send_time<'2013-03-18 13:00:00',1,0)) w12,
sum(if(send_time >='2013-03-18 13:00:00' and send_time<'2013-03-18 14:00:00',1,0)) w13,
sum(if(send_time >='2013-03-18 14:00:00' and send_time<'2013-03-18 15:00:00',1,0)) w14,
sum(if(send_time >='2013-03-18 15:00:00' and send_time<'2013-03-18 16:00:00',1,0)) w15,
sum(if(send_time >='2013-03-18 16:00:00' and send_time<'2013-03-18 17:00:00',1,0)) w16,
sum(if(send_time >='2013-03-18 17:00:00' and send_time<'2013-03-18 18:00:00',1,0)) w17,
sum(if(send_time >='2013-03-18 18:00:00' and send_time<'2013-03-18 19:00:00',1,0)) w18,
sum(if(send_time >='2013-03-18 19:00:00' and send_time<'2013-03-18 20:00:00',1,0)) w19,
sum(if(send_time >='2013-03-18 20:00:00' and send_time<'2013-03-18 21:00:00',1,0)) w20,
sum(if(send_time >='2013-03-18 21:00:00' and send_time<'2013-03-18 22:00:00',1,0)) w21,
sum(if(send_time >='2013-03-18 22:00:00' and send_time<'2013-03-18 23:00:00',1,0)) w22,
sum(if(send_time >='2013-03-18 23:00:00' and send_time<'2013-03-18 24:00:00',1,0)) w23
from tbl_sms_log where send_time >'2013-03-18' and send_time<='2013-03-18 24:00:00' and send_type!='2'
union all select count(distinct(called)),min(send_time),
sum(if(send_time >='2013-03-19 00:00:00' and send_time<'2013-03-19 01:00:00',1,0)) w0,
sum(if(send_time >='2013-03-19 01:00:00' and send_time<'2013-03-19 02:00:00',1,0)) w1,
sum(if(send_time >='2013-03-19 02:00:00' and send_time<'2013-03-19 03:00:00',1,0)) w2,
sum(if(send_time >='2013-03-19 03:00:00' and send_time<'2013-03-19 04:00:00',1,0)) w3,
sum(if(send_time >='2013-03-19 04:00:00' and send_time<'2013-03-19 05:00:00',1,0)) w4,
sum(if(send_time >='2013-03-19 05:00:00' and send_time<'2013-03-19 06:00:00',1,0)) w5,
sum(if(send_time >='2013-03-19 06:00:00' and send_time<'2013-03-19 07:00:00',1,0)) w6,
sum(if(send_time >='2013-03-19 07:00:00' and send_time<'2013-03-19 08:00:00',1,0)) w7,
sum(if(send_time >='2013-03-19 08:00:00' and send_time<'2013-03-19 09:00:00',1,0)) w8,
sum(if(send_time >='2013-03-19 09:00:00' and send_time<'2013-03-19 10:00:00',1,0)) w9,
sum(if(send_time >='2013-03-19 10:00:00' and send_time<'2013-03-19 11:00:00',1,0)) w10,
sum(if(send_time >='2013-03-19 11:00:00' and send_time<'2013-03-19 12:00:00',1,0)) w11,
sum(if(send_time >='2013-03-19 12:00:00' and send_time<'2013-03-19 13:00:00',1,0)) w12,
sum(if(send_time >='2013-03-19 13:00:00' and send_time<'2013-03-19 14:00:00',1,0)) w13,
sum(if(send_time >='2013-03-19 14:00:00' and send_time<'2013-03-19 15:00:00',1,0)) w14,
sum(if(send_time >='2013-03-19 15:00:00' and send_time<'2013-03-19 16:00:00',1,0)) w15,
sum(if(send_time >='2013-03-19 16:00:00' and send_time<'2013-03-19 17:00:00',1,0)) w16,
sum(if(send_time >='2013-03-19 17:00:00' and send_time<'2013-03-19 18:00:00',1,0)) w17,
sum(if(send_time >='2013-03-19 18:00:00' and send_time<'2013-03-19 19:00:00',1,0)) w18,
sum(if(send_time >='2013-03-19 19:00:00' and send_time<'2013-03-19 20:00:00',1,0)) w19,
sum(if(send_time >='2013-03-19 20:00:00' and send_time<'2013-03-19 21:00:00',1,0)) w20,
sum(if(send_time >='2013-03-19 21:00:00' and send_time<'2013-03-19 22:00:00',1,0)) w21,
sum(if(send_time >='2013-03-19 22:00:00' and send_time<'2013-03-19 23:00:00',1,0)) w22,
sum(if(send_time >='2013-03-19 23:00:00' and send_time<'2013-03-19 24:00:00',1,0)) w23
from tbl_sms_log where send_time >'2013-03-19' and send_time<='2013-03-19 24:00:00' and send_type!='2'
union all select count(distinct(called)),min(send_time),
sum(if(send_time >='2013-03-20 00:00:00' and send_time<'2013-03-20 01:00:00',1,0)) w0,
sum(if(send_time >='2013-03-20 01:00:00' and send_time<'2013-03-20 02:00:00',1,0)) w1,
sum(if(send_time >='2013-03-20 02:00:00' and send_time<'2013-03-20 03:00:00',1,0)) w2,
sum(if(send_time >='2013-03-20 03:00:00' and send_time<'2013-03-20 04:00:00',1,0)) w3,
sum(if(send_time >='2013-03-20 04:00:00' and send_time<'2013-03-20 05:00:00',1,0)) w4,
sum(if(send_time >='2013-03-20 05:00:00' and send_time<'2013-03-20 06:00:00',1,0)) w5,
sum(if(send_time >='2013-03-20 06:00:00' and send_time<'2013-03-20 07:00:00',1,0)) w6,
sum(if(send_time >='2013-03-20 07:00:00' and send_time<'2013-03-20 08:00:00',1,0)) w7,
sum(if(send_time >='2013-03-20 08:00:00' and send_time<'2013-03-20 09:00:00',1,0)) w8,
sum(if(send_time >='2013-03-20 09:00:00' and send_time<'2013-03-20 10:00:00',1,0)) w9,
sum(if(send_time >='2013-03-20 10:00:00' and send_time<'2013-03-20 11:00:00',1,0)) w10,
sum(if(send_time >='2013-03-20 11:00:00' and send_time<'2013-03-20 12:00:00',1,0)) w11,
sum(if(send_time >='2013-03-20 12:00:00' and send_time<'2013-03-20 13:00:00',1,0)) w12,
sum(if(send_time >='2013-03-20 13:00:00' and send_time<'2013-03-20 14:00:00',1,0)) w13,
sum(if(send_time >='2013-03-20 14:00:00' and send_time<'2013-03-20 15:00:00',1,0)) w14,
sum(if(send_time >='2013-03-20 15:00:00' and send_time<'2013-03-20 16:00:00',1,0)) w15,
sum(if(send_time >='2013-03-20 16:00:00' and send_time<'2013-03-20 17:00:00',1,0)) w16,
sum(if(send_time >='2013-03-20 17:00:00' and send_time<'2013-03-20 18:00:00',1,0)) w17,
sum(if(send_time >='2013-03-20 18:00:00' and send_time<'2013-03-20 19:00:00',1,0)) w18,
sum(if(send_time >='2013-03-20 19:00:00' and send_time<'2013-03-20 20:00:00',1,0)) w19,
sum(if(send_time >='2013-03-20 20:00:00' and send_time<'2013-03-20 21:00:00',1,0)) w20,
sum(if(send_time >='2013-03-20 21:00:00' and send_time<'2013-03-20 22:00:00',1,0)) w21,
sum(if(send_time >='2013-03-20 22:00:00' and send_time<'2013-03-20 23:00:00',1,0)) w22,
sum(if(send_time >='2013-03-20 23:00:00' and send_time<'2013-03-20 24:00:00',1,0)) w23
from tbl_sms_log where send_time >'2013-03-20' and send_time<='2013-03-20 24:00:00' and send_type!='2'
union all select count(distinct(called)),min(send_time),
sum(if(send_time >='2013-03-21 00:00:00' and send_time<'2013-03-21 01:00:00',1,0)) w0,
sum(if(send_time >='2013-03-21 01:00:00' and send_time<'2013-03-21 02:00:00',1,0)) w1,
sum(if(send_time >='2013-03-21 02:00:00' and send_time<'2013-03-21 03:00:00',1,0)) w2,
sum(if(send_time >='2013-03-21 03:00:00' and send_time<'2013-03-21 04:00:00',1,0)) w3,
sum(if(send_time >='2013-03-21 04:00:00' and send_time<'2013-03-21 05:00:00',1,0)) w4,
sum(if(send_time >='2013-03-21 05:00:00' and send_time<'2013-03-21 06:00:00',1,0)) w5,
sum(if(send_time >='2013-03-21 06:00:00' and send_time<'2013-03-21 07:00:00',1,0)) w6,
sum(if(send_time >='2013-03-21 07:00:00' and send_time<'2013-03-21 08:00:00',1,0)) w7,
sum(if(send_time >='2013-03-21 08:00:00' and send_time<'2013-03-21 09:00:00',1,0)) w8,
sum(if(send_time >='2013-03-21 09:00:00' and send_time<'2013-03-21 10:00:00',1,0)) w9,
sum(if(send_time >='2013-03-21 10:00:00' and send_time<'2013-03-21 11:00:00',1,0)) w10,
sum(if(send_time >='2013-03-21 11:00:00' and send_time<'2013-03-21 12:00:00',1,0)) w11,
sum(if(send_time >='2013-03-21 12:00:00' and send_time<'2013-03-21 13:00:00',1,0)) w12,
sum(if(send_time >='2013-03-21 13:00:00' and send_time<'2013-03-21 14:00:00',1,0)) w13,
sum(if(send_time >='2013-03-21 14:00:00' and send_time<'2013-03-21 15:00:00',1,0)) w14,
sum(if(send_time >='2013-03-21 15:00:00' and send_time<'2013-03-21 16:00:00',1,0)) w15,
sum(if(send_time >='2013-03-21 16:00:00' and send_time<'2013-03-21 17:00:00',1,0)) w16,
sum(if(send_time >='2013-03-21 17:00:00' and send_time<'2013-03-21 18:00:00',1,0)) w17,
sum(if(send_time >='2013-03-21 18:00:00' and send_time<'2013-03-21 19:00:00',1,0)) w18,
sum(if(send_time >='2013-03-21 19:00:00' and send_time<'2013-03-21 20:00:00',1,0)) w19,
sum(if(send_time >='2013-03-21 20:00:00' and send_time<'2013-03-21 21:00:00',1,0)) w20,
sum(if(send_time >='2013-03-21 21:00:00' and send_time<'2013-03-21 22:00:00',1,0)) w21,
sum(if(send_time >='2013-03-21 22:00:00' and send_time<'2013-03-21 23:00:00',1,0)) w22,
sum(if(send_time >='2013-03-21 23:00:00' and send_time<'2013-03-21 24:00:00',1,0)) w23
from tbl_sms_log where send_time >'2013-03-21' and send_time<='2013-03-21 24:00:00' and send_type!='2'
///////////////////////////presetting
select min(settime),count(distinct(mdn)),
sum(if(settime >='2013-03-15 00:00:00' and settime<'2013-03-15 01:00:00',1,0)) w0,
sum(if(settime >='2013-03-15 01:00:00' and settime<'2013-03-15 02:00:00',1,0)) w1,
sum(if(settime >='2013-03-15 02:00:00' and settime<'2013-03-15 03:00:00',1,0)) w2,
sum(if(settime >='2013-03-15 03:00:00' and settime<'2013-03-15 04:00:00',1,0)) w3,
sum(if(settime >='2013-03-15 04:00:00' and settime<'2013-03-15 05:00:00',1,0)) w4,
sum(if(settime >='2013-03-15 05:00:00' and settime<'2013-03-15 06:00:00',1,0)) w5,
sum(if(settime >='2013-03-15 06:00:00' and settime<'2013-03-15 07:00:00',1,0)) w6,
sum(if(settime >='2013-03-15 07:00:00' and settime<'2013-03-15 08:00:00',1,0)) w7,
sum(if(settime >='2013-03-15 08:00:00' and settime<'2013-03-15 09:00:00',1,0)) w8,
sum(if(settime >='2013-03-15 09:00:00' and settime<'2013-03-15 10:00:00',1,0)) w9,
sum(if(settime >='2013-03-15 10:00:00' and settime<'2013-03-15 11:00:00',1,0)) w10,
sum(if(settime >='2013-03-15 11:00:00' and settime<'2013-03-15 12:00:00',1,0)) w11,
sum(if(settime >='2013-03-15 12:00:00' and settime<'2013-03-15 13:00:00',1,0)) w12,
sum(if(settime >='2013-03-15 13:00:00' and settime<'2013-03-15 14:00:00',1,0)) w13,
sum(if(settime >='2013-03-15 14:00:00' and settime<'2013-03-15 15:00:00',1,0)) w14,
sum(if(settime >='2013-03-15 15:00:00' and settime<'2013-03-15 16:00:00',1,0)) w15,
sum(if(settime >='2013-03-15 16:00:00' and settime<'2013-03-15 17:00:00',1,0)) w16,
sum(if(settime >='2013-03-15 17:00:00' and settime<'2013-03-15 18:00:00',1,0)) w17,
sum(if(settime >='2013-03-15 18:00:00' and settime<'2013-03-15 19:00:00',1,0)) w18,
sum(if(settime >='2013-03-15 19:00:00' and settime<'2013-03-15 20:00:00',1,0)) w19,
sum(if(settime >='2013-03-15 20:00:00' and settime<'2013-03-15 21:00:00',1,0)) w20,
sum(if(settime >='2013-03-15 21:00:00' and settime<'2013-03-15 22:00:00',1,0)) w21,
sum(if(settime >='2013-03-15 22:00:00' and settime<'2013-03-15 23:00:00',1,0)) w22,
sum(if(settime >='2013-03-15 23:00:00' and settime<='2013-03-15 24:00:00',1,0)) w23
from tbl_presetting_log where settime >'2013-03-15' and settime<='2013-03-15 24:00:00'
union all select min(settime),count(distinct(mdn)),
sum(if(settime >='2013-03-16 00:00:00' and settime<'2013-03-16 01:00:00',1,0)) w0,
sum(if(settime >='2013-03-16 01:00:00' and settime<'2013-03-16 02:00:00',1,0)) w1,
sum(if(settime >='2013-03-16 02:00:00' and settime<'2013-03-16 03:00:00',1,0)) w2,
sum(if(settime >='2013-03-16 03:00:00' and settime<'2013-03-16 04:00:00',1,0)) w3,
sum(if(settime >='2013-03-16 04:00:00' and settime<'2013-03-16 05:00:00',1,0)) w4,
sum(if(settime >='2013-03-16 05:00:00' and settime<'2013-03-16 06:00:00',1,0)) w5,
sum(if(settime >='2013-03-16 06:00:00' and settime<'2013-03-16 07:00:00',1,0)) w6,
sum(if(settime >='2013-03-16 07:00:00' and settime<'2013-03-16 08:00:00',1,0)) w7,
sum(if(settime >='2013-03-16 08:00:00' and settime<'2013-03-16 09:00:00',1,0)) w8,
sum(if(settime >='2013-03-16 09:00:00' and settime<'2013-03-16 10:00:00',1,0)) w9,
sum(if(settime >='2013-03-16 10:00:00' and settime<'2013-03-16 11:00:00',1,0)) w10,
sum(if(settime >='2013-03-16 11:00:00' and settime<'2013-03-16 12:00:00',1,0)) w11,
sum(if(settime >='2013-03-16 12:00:00' and settime<'2013-03-16 13:00:00',1,0)) w12,
sum(if(settime >='2013-03-16 13:00:00' and settime<'2013-03-16 14:00:00',1,0)) w13,
sum(if(settime >='2013-03-16 14:00:00' and settime<'2013-03-16 15:00:00',1,0)) w14,
sum(if(settime >='2013-03-16 15:00:00' and settime<'2013-03-16 16:00:00',1,0)) w15,
sum(if(settime >='2013-03-16 16:00:00' and settime<'2013-03-16 17:00:00',1,0)) w16,
sum(if(settime >='2013-03-16 17:00:00' and settime<'2013-03-16 18:00:00',1,0)) w17,
sum(if(settime >='2013-03-16 18:00:00' and settime<'2013-03-16 19:00:00',1,0)) w18,
sum(if(settime >='2013-03-16 19:00:00' and settime<'2013-03-16 20:00:00',1,0)) w19,
sum(if(settime >='2013-03-16 20:00:00' and settime<'2013-03-16 21:00:00',1,0)) w20,
sum(if(settime >='2013-03-16 21:00:00' and settime<'2013-03-16 22:00:00',1,0)) w21,
sum(if(settime >='2013-03-16 22:00:00' and settime<'2013-03-16 23:00:00',1,0)) w22,
sum(if(settime >='2013-03-16 23:00:00' and settime<='2013-03-16 24:00:00',1,0)) w23
from tbl_presetting_log where settime >'2013-03-16' and settime<='2013-03-16 24:00:00'
union all select min(settime),count(distinct(mdn)),
sum(if(settime >='2013-03-17 00:00:00' and settime<'2013-03-17 01:00:00',1,0)) w0,
sum(if(settime >='2013-03-17 01:00:00' and settime<'2013-03-17 02:00:00',1,0)) w1,
sum(if(settime >='2013-03-17 02:00:00' and settime<'2013-03-17 03:00:00',1,0)) w2,
sum(if(settime >='2013-03-17 03:00:00' and settime<'2013-03-17 04:00:00',1,0)) w3,
sum(if(settime >='2013-03-17 04:00:00' and settime<'2013-03-17 05:00:00',1,0)) w4,
sum(if(settime >='2013-03-17 05:00:00' and settime<'2013-03-17 06:00:00',1,0)) w5,
sum(if(settime >='2013-03-17 06:00:00' and settime<'2013-03-17 07:00:00',1,0)) w6,
sum(if(settime >='2013-03-17 07:00:00' and settime<'2013-03-17 08:00:00',1,0)) w7,
sum(if(settime >='2013-03-17 08:00:00' and settime<'2013-03-17 09:00:00',1,0)) w8,
sum(if(settime >='2013-03-17 09:00:00' and settime<'2013-03-17 10:00:00',1,0)) w9,
sum(if(settime >='2013-03-17 10:00:00' and settime<'2013-03-17 11:00:00',1,0)) w10,
sum(if(settime >='2013-03-17 11:00:00' and settime<'2013-03-17 12:00:00',1,0)) w11,
sum(if(settime >='2013-03-17 12:00:00' and settime<'2013-03-17 13:00:00',1,0)) w12,
sum(if(settime >='2013-03-17 13:00:00' and settime<'2013-03-17 14:00:00',1,0)) w13,
sum(if(settime >='2013-03-17 14:00:00' and settime<'2013-03-17 15:00:00',1,0)) w14,
sum(if(settime >='2013-03-17 15:00:00' and settime<'2013-03-17 16:00:00',1,0)) w15,
sum(if(settime >='2013-03-17 16:00:00' and settime<'2013-03-17 17:00:00',1,0)) w16,
sum(if(settime >='2013-03-17 17:00:00' and settime<'2013-03-17 18:00:00',1,0)) w17,
sum(if(settime >='2013-03-17 18:00:00' and settime<'2013-03-17 19:00:00',1,0)) w18,
sum(if(settime >='2013-03-17 19:00:00' and settime<'2013-03-17 20:00:00',1,0)) w19,
sum(if(settime >='2013-03-17 20:00:00' and settime<'2013-03-17 21:00:00',1,0)) w20,
sum(if(settime >='2013-03-17 21:00:00' and settime<'2013-03-17 22:00:00',1,0)) w21,
sum(if(settime >='2013-03-17 22:00:00' and settime<'2013-03-17 23:00:00',1,0)) w22,
sum(if(settime >='2013-03-17 23:00:00' and settime<='2013-03-17 24:00:00',1,0)) w23
from tbl_presetting_log where settime >'2013-03-17' and settime<='2013-03-17 24:00:00'
union all select min(settime),count(distinct(mdn)),
sum(if(settime >='2013-03-18 00:00:00' and settime<'2013-03-18 01:00:00',1,0)) w0,
sum(if(settime >='2013-03-18 01:00:00' and settime<'2013-03-18 02:00:00',1,0)) w1,
sum(if(settime >='2013-03-18 02:00:00' and settime<'2013-03-18 03:00:00',1,0)) w2,
sum(if(settime >='2013-03-18 03:00:00' and settime<'2013-03-18 04:00:00',1,0)) w3,
sum(if(settime >='2013-03-18 04:00:00' and settime<'2013-03-18 05:00:00',1,0)) w4,
sum(if(settime >='2013-03-18 05:00:00' and settime<'2013-03-18 06:00:00',1,0)) w5,
sum(if(settime >='2013-03-18 06:00:00' and settime<'2013-03-18 07:00:00',1,0)) w6,
sum(if(settime >='2013-03-18 07:00:00' and settime<'2013-03-18 08:00:00',1,0)) w7,
sum(if(settime >='2013-03-18 08:00:00' and settime<'2013-03-18 09:00:00',1,0)) w8,
sum(if(settime >='2013-03-18 09:00:00' and settime<'2013-03-18 10:00:00',1,0)) w9,
sum(if(settime >='2013-03-18 10:00:00' and settime<'2013-03-18 11:00:00',1,0)) w10,
sum(if(settime >='2013-03-18 11:00:00' and settime<'2013-03-18 12:00:00',1,0)) w11,
sum(if(settime >='2013-03-18 12:00:00' and settime<'2013-03-18 13:00:00',1,0)) w12,
sum(if(settime >='2013-03-18 13:00:00' and settime<'2013-03-18 14:00:00',1,0)) w13,
sum(if(settime >='2013-03-18 14:00:00' and settime<'2013-03-18 15:00:00',1,0)) w14,
sum(if(settime >='2013-03-18 15:00:00' and settime<'2013-03-18 16:00:00',1,0)) w15,
sum(if(settime >='2013-03-18 16:00:00' and settime<'2013-03-18 17:00:00',1,0)) w16,
sum(if(settime >='2013-03-18 17:00:00' and settime<'2013-03-18 18:00:00',1,0)) w17,
sum(if(settime >='2013-03-18 18:00:00' and settime<'2013-03-18 19:00:00',1,0)) w18,
sum(if(settime >='2013-03-18 19:00:00' and settime<'2013-03-18 20:00:00',1,0)) w19,
sum(if(settime >='2013-03-18 20:00:00' and settime<'2013-03-18 21:00:00',1,0)) w20,
sum(if(settime >='2013-03-18 21:00:00' and settime<'2013-03-18 22:00:00',1,0)) w21,
sum(if(settime >='2013-03-18 22:00:00' and settime<'2013-03-18 23:00:00',1,0)) w22,
sum(if(settime >='2013-03-18 23:00:00' and settime<='2013-03-18 24:00:00',1,0)) w23
from tbl_presetting_log where settime >'2013-03-18' and settime<='2013-03-18 24:00:00'
union all select min(settime),count(distinct(mdn)),
sum(if(settime >='2013-03-19 00:00:00' and settime<'2013-03-19 01:00:00',1,0)) w0,
sum(if(settime >='2013-03-19 01:00:00' and settime<'2013-03-19 02:00:00',1,0)) w1,
sum(if(settime >='2013-03-19 02:00:00' and settime<'2013-03-19 03:00:00',1,0)) w2,
sum(if(settime >='2013-03-19 03:00:00' and settime<'2013-03-19 04:00:00',1,0)) w3,
sum(if(settime >='2013-03-19 04:00:00' and settime<'2013-03-19 05:00:00',1,0)) w4,
sum(if(settime >='2013-03-19 05:00:00' and settime<'2013-03-19 06:00:00',1,0)) w5,
sum(if(settime >='2013-03-19 06:00:00' and settime<'2013-03-19 07:00:00',1,0)) w6,
sum(if(settime >='2013-03-19 07:00:00' and settime<'2013-03-19 08:00:00',1,0)) w7,
sum(if(settime >='2013-03-19 08:00:00' and settime<'2013-03-19 09:00:00',1,0)) w8,
sum(if(settime >='2013-03-19 09:00:00' and settime<'2013-03-19 10:00:00',1,0)) w9,
sum(if(settime >='2013-03-19 10:00:00' and settime<'2013-03-19 11:00:00',1,0)) w10,
sum(if(settime >='2013-03-19 11:00:00' and settime<'2013-03-19 12:00:00',1,0)) w11,
sum(if(settime >='2013-03-19 12:00:00' and settime<'2013-03-19 13:00:00',1,0)) w12,
sum(if(settime >='2013-03-19 13:00:00' and settime<'2013-03-19 14:00:00',1,0)) w13,
sum(if(settime >='2013-03-19 14:00:00' and settime<'2013-03-19 15:00:00',1,0)) w14,
sum(if(settime >='2013-03-19 15:00:00' and settime<'2013-03-19 16:00:00',1,0)) w15,
sum(if(settime >='2013-03-19 16:00:00' and settime<'2013-03-19 17:00:00',1,0)) w16,
sum(if(settime >='2013-03-19 17:00:00' and settime<'2013-03-19 18:00:00',1,0)) w17,
sum(if(settime >='2013-03-19 18:00:00' and settime<'2013-03-19 19:00:00',1,0)) w18,
sum(if(settime >='2013-03-19 19:00:00' and settime<'2013-03-19 20:00:00',1,0)) w19,
sum(if(settime >='2013-03-19 20:00:00' and settime<'2013-03-19 21:00:00',1,0)) w20,
sum(if(settime >='2013-03-19 21:00:00' and settime<'2013-03-19 22:00:00',1,0)) w21,
sum(if(settime >='2013-03-19 22:00:00' and settime<'2013-03-19 23:00:00',1,0)) w22,
sum(if(settime >='2013-03-19 23:00:00' and settime<='2013-03-19 24:00:00',1,0)) w23
from tbl_presetting_log where settime >'2013-03-19' and settime<='2013-03-19 24:00:00'
union all select min(settime),count(distinct(mdn)),
sum(if(settime >='2013-03-20 00:00:00' and settime<'2013-03-20 01:00:00',1,0)) w0,
sum(if(settime >='2013-03-20 01:00:00' and settime<'2013-03-20 02:00:00',1,0)) w1,
sum(if(settime >='2013-03-20 02:00:00' and settime<'2013-03-20 03:00:00',1,0)) w2,
sum(if(settime >='2013-03-20 03:00:00' and settime<'2013-03-20 04:00:00',1,0)) w3,
sum(if(settime >='2013-03-20 04:00:00' and settime<'2013-03-20 05:00:00',1,0)) w4,
sum(if(settime >='2013-03-20 05:00:00' and settime<'2013-03-20 06:00:00',1,0)) w5,
sum(if(settime >='2013-03-20 06:00:00' and settime<'2013-03-20 07:00:00',1,0)) w6,
sum(if(settime >='2013-03-20 07:00:00' and settime<'2013-03-20 08:00:00',1,0)) w7,
sum(if(settime >='2013-03-20 08:00:00' and settime<'2013-03-20 09:00:00',1,0)) w8,
sum(if(settime >='2013-03-20 09:00:00' and settime<'2013-03-20 10:00:00',1,0)) w9,
sum(if(settime >='2013-03-20 10:00:00' and settime<'2013-03-20 11:00:00',1,0)) w10,
sum(if(settime >='2013-03-20 11:00:00' and settime<'2013-03-20 12:00:00',1,0)) w11,
sum(if(settime >='2013-03-20 12:00:00' and settime<'2013-03-20 13:00:00',1,0)) w12,
sum(if(settime >='2013-03-20 13:00:00' and settime<'2013-03-20 14:00:00',1,0)) w13,
sum(if(settime >='2013-03-20 14:00:00' and settime<'2013-03-20 15:00:00',1,0)) w14,
sum(if(settime >='2013-03-20 15:00:00' and settime<'2013-03-20 16:00:00',1,0)) w15,
sum(if(settime >='2013-03-20 16:00:00' and settime<'2013-03-20 17:00:00',1,0)) w16,
sum(if(settime >='2013-03-20 17:00:00' and settime<'2013-03-20 18:00:00',1,0)) w17,
sum(if(settime >='2013-03-20 18:00:00' and settime<'2013-03-20 19:00:00',1,0)) w18,
sum(if(settime >='2013-03-20 19:00:00' and settime<'2013-03-20 20:00:00',1,0)) w19,
sum(if(settime >='2013-03-20 20:00:00' and settime<'2013-03-20 21:00:00',1,0)) w20,
sum(if(settime >='2013-03-20 21:00:00' and settime<'2013-03-20 22:00:00',1,0)) w21,
sum(if(settime >='2013-03-20 22:00:00' and settime<'2013-03-20 23:00:00',1,0)) w22,
sum(if(settime >='2013-03-20 23:00:00' and settime<='2013-03-20 24:00:00',1,0)) w23
from tbl_presetting_log where settime >'2013-03-20' and settime<='2013-03-20 24:00:00'
union all select min(settime),count(distinct(mdn)),
sum(if(settime >='2013-03-21 00:00:00' and settime<'2013-03-21 01:00:00',1,0)) w0,
sum(if(settime >='2013-03-21 01:00:00' and settime<'2013-03-21 02:00:00',1,0)) w1,
sum(if(settime >='2013-03-21 02:00:00' and settime<'2013-03-21 03:00:00',1,0)) w2,
sum(if(settime >='2013-03-21 03:00:00' and settime<'2013-03-21 04:00:00',1,0)) w3,
sum(if(settime >='2013-03-21 04:00:00' and settime<'2013-03-21 05:00:00',1,0)) w4,
sum(if(settime >='2013-03-21 05:00:00' and settime<'2013-03-21 06:00:00',1,0)) w5,
sum(if(settime >='2013-03-21 06:00:00' and settime<'2013-03-21 07:00:00',1,0)) w6,
sum(if(settime >='2013-03-21 07:00:00' and settime<'2013-03-21 08:00:00',1,0)) w7,
sum(if(settime >='2013-03-21 08:00:00' and settime<'2013-03-21 09:00:00',1,0)) w8,
sum(if(settime >='2013-03-21 09:00:00' and settime<'2013-03-21 10:00:00',1,0)) w9,
sum(if(settime >='2013-03-21 10:00:00' and settime<'2013-03-21 11:00:00',1,0)) w10,
sum(if(settime >='2013-03-21 11:00:00' and settime<'2013-03-21 12:00:00',1,0)) w11,
sum(if(settime >='2013-03-21 12:00:00' and settime<'2013-03-21 13:00:00',1,0)) w12,
sum(if(settime >='2013-03-21 13:00:00' and settime<'2013-03-21 14:00:00',1,0)) w13,
sum(if(settime >='2013-03-21 14:00:00' and settime<'2013-03-21 15:00:00',1,0)) w14,
sum(if(settime >='2013-03-21 15:00:00' and settime<'2013-03-21 16:00:00',1,0)) w15,
sum(if(settime >='2013-03-21 16:00:00' and settime<'2013-03-21 17:00:00',1,0)) w16,
sum(if(settime >='2013-03-21 17:00:00' and settime<'2013-03-21 18:00:00',1,0)) w17,
sum(if(settime >='2013-03-21 18:00:00' and settime<'2013-03-21 19:00:00',1,0)) w18,
sum(if(settime >='2013-03-21 19:00:00' and settime<'2013-03-21 20:00:00',1,0)) w19,
sum(if(settime >='2013-03-21 20:00:00' and settime<'2013-03-21 21:00:00',1,0)) w20,
sum(if(settime >='2013-03-21 21:00:00' and settime<'2013-03-21 22:00:00',1,0)) w21,
sum(if(settime >='2013-03-21 22:00:00' and settime<'2013-03-21 23:00:00',1,0)) w22,
sum(if(settime >='2013-03-21 23:00:00' and settime<='2013-03-21 24:00:00',1,0)) w23
from tbl_presetting_log where settime >'2013-03-21' and settime<='2013-03-21 24:00:00'
//////////���û��������
select min(t1.check_time),sum(if(settime >='2013-03-15 ' and settime<'2013-03-15 24:00:00',1,0))
from tbl_mdn t1,tbl_presetting_log t2 where t1.mdn=t2.mdn and t1.check_time >'2013-03-15' and t1.check_time<='2013-03-15 24:00:00'
union all
select min(t1.check_time),sum(if(settime >='2013-03-16 ' and settime<'2013-03-16 24:00:00',1,0))
from tbl_mdn t1,tbl_presetting_log t2 where t1.mdn=t2.mdn and t1.check_time >'2013-03-16' and t1.check_time<='2013-03-16 24:00:00'
union all
select min(t1.check_time),sum(if(settime >='2013-03-17 ' and settime<'2013-03-17 24:00:00',1,0))
from tbl_mdn t1,tbl_presetting_log t2 where t1.mdn=t2.mdn and t1.check_time >'2013-03-17' and t1.check_time<='2013-03-17 24:00:00'
union all
select min(t1.check_time),sum(if(settime >='2013-03-18 ' and settime<'2013-03-18 24:00:00',1,0))
from tbl_mdn t1,tbl_presetting_log t2 where t1.mdn=t2.mdn and t1.check_time >'2013-03-18' and t1.check_time<='2013-03-18 24:00:00'
union all
select min(t1.check_time),sum(if(settime >='2013-03-19 ' and settime<'2013-03-19 24:00:00',1,0))
from tbl_mdn t1,tbl_presetting_log t2 where t1.mdn=t2.mdn and t1.check_time >'2013-03-19' and t1.check_time<='2013-03-19 24:00:00'
union all
select min(t1.check_time),sum(if(settime >='2013-03-20 ' and settime<'2013-03-20 24:00:00',1,0))
from tbl_mdn t1,tbl_presetting_log t2 where t1.mdn=t2.mdn and t1.check_time >'2013-03-20' and t1.check_time<='2013-03-20 24:00:00'
union all
select min(t1.check_time),sum(if(settime >='2013-03-21 ' and settime<'2013-03-21 24:00:00',1,0))
from tbl_mdn t1,tbl_presetting_log t2 where t1.mdn=t2.mdn and t1.check_time >'2013-03-21' and t1.check_time<='2013-03-21 24:00:00'
///////////////call_log
select count(distinct(called)),count(distinct(caller)),count(caller),sum(if(menuid!=0,1,0)) from tbl_call_log
where calltime>='2013-03-15' and calltime<'2013-03-16'
union all
select count(distinct(called)),count(distinct(caller)),count(caller),sum(if(menuid!=0,1,0)) from tbl_call_log
where calltime>='2013-03-16' and calltime<'2013-03-17'
union all
select count(distinct(called)),count(distinct(caller)),count(caller),sum(if(menuid!=0,1,0)) from tbl_call_log
where calltime>='2013-03-17' and calltime<'2013-03-18'
union all
select count(distinct(called)),count(distinct(caller)),count(caller),sum(if(menuid!=0,1,0)) from tbl_call_log
where calltime>='2013-03-18' and calltime<'2013-03-19'
union all
select count(distinct(called)),count(distinct(caller)),count(caller),sum(if(menuid!=0,1,0)) from tbl_call_log
where calltime>='2013-03-19' and calltime<'2013-03-20'
union all
select count(distinct(called)),count(distinct(caller)),count(caller),sum(if(menuid!=0,1,0)) from tbl_call_log
where calltime>='2013-03-20' and calltime<'2013-03-21'
union all
select count(distinct(called)),count(distinct(caller)),count(caller),sum(if(menuid!=0,1,0)) from tbl_call_log
where calltime>='2013-03-21' and calltime<='2013-03-21 24:00:00'
///////////////����
select count(caller),calltime,
sum(if(calltime >='2013-03-15 00:00:00' and calltime<'2013-03-15 01:00:00',1,0)) w0,
sum(if(calltime >='2013-03-15 01:00:00' and calltime<'2013-03-15 02:00:00',1,0)) w1,
sum(if(calltime >='2013-03-15 02:00:00' and calltime<'2013-03-15 03:00:00',1,0)) w2,
sum(if(calltime >='2013-03-15 03:00:00' and calltime<'2013-03-15 04:00:00',1,0)) w3,
sum(if(calltime >='2013-03-15 04:00:00' and calltime<'2013-03-15 05:00:00',1,0)) w4,
sum(if(calltime >='2013-03-15 05:00:00' and calltime<'2013-03-15 06:00:00',1,0)) w5,
sum(if(calltime >='2013-03-15 06:00:00' and calltime<'2013-03-15 07:00:00',1,0)) w6,
sum(if(calltime >='2013-03-15 07:00:00' and calltime<'2013-03-15 08:00:00',1,0)) w7,
sum(if(calltime >='2013-03-15 08:00:00' and calltime<'2013-03-15 09:00:00',1,0)) w8,
sum(if(calltime >='2013-03-15 09:00:00' and calltime<'2013-03-15 10:00:00',1,0)) w9,
sum(if(calltime >='2013-03-15 10:00:00' and calltime<'2013-03-15 11:00:00',1,0)) w10,
sum(if(calltime >='2013-03-15 11:00:00' and calltime<'2013-03-15 12:00:00',1,0)) w11,
sum(if(calltime >='2013-03-15 12:00:00' and calltime<'2013-03-15 13:00:00',1,0)) w12,
sum(if(calltime >='2013-03-15 13:00:00' and calltime<'2013-03-15 14:00:00',1,0)) w13,
sum(if(calltime >='2013-03-15 14:00:00' and calltime<'2013-03-15 15:00:00',1,0)) w14,
sum(if(calltime >='2013-03-15 15:00:00' and calltime<'2013-03-15 16:00:00',1,0)) w15,
sum(if(calltime >='2013-03-15 16:00:00' and calltime<'2013-03-15 17:00:00',1,0)) w16,
sum(if(calltime >='2013-03-15 17:00:00' and calltime<'2013-03-15 18:00:00',1,0)) w17,
sum(if(calltime >='2013-03-15 18:00:00' and calltime<'2013-03-15 19:00:00',1,0)) w18,
sum(if(calltime >='2013-03-15 19:00:00' and calltime<'2013-03-15 20:00:00',1,0)) w19,
sum(if(calltime >='2013-03-15 20:00:00' and calltime<'2013-03-15 21:00:00',1,0)) w20,
sum(if(calltime >='2013-03-15 21:00:00' and calltime<'2013-03-15 22:00:00',1,0)) w21,
sum(if(calltime >='2013-03-15 22:00:00' and calltime<'2013-03-15 23:00:00',1,0)) w22,
sum(if(calltime >='2013-03-15 23:00:00' and calltime<='2013-03-15 24:00:00',1,0)) w23
from tbl_call_log where calltime>='2013-03-15' and calltime<'2013-03-15 24:00:00'
union all select count(caller),calltime,
sum(if(calltime >='2013-03-16 00:00:00' and calltime<'2013-03-16 01:00:00',1,0)) w0,
sum(if(calltime >='2013-03-16 01:00:00' and calltime<'2013-03-16 02:00:00',1,0)) w1,
sum(if(calltime >='2013-03-16 02:00:00' and calltime<'2013-03-16 03:00:00',1,0)) w2,
sum(if(calltime >='2013-03-16 03:00:00' and calltime<'2013-03-16 04:00:00',1,0)) w3,
sum(if(calltime >='2013-03-16 04:00:00' and calltime<'2013-03-16 05:00:00',1,0)) w4,
sum(if(calltime >='2013-03-16 05:00:00' and calltime<'2013-03-16 06:00:00',1,0)) w5,
sum(if(calltime >='2013-03-16 06:00:00' and calltime<'2013-03-16 07:00:00',1,0)) w6,
sum(if(calltime >='2013-03-16 07:00:00' and calltime<'2013-03-16 08:00:00',1,0)) w7,
sum(if(calltime >='2013-03-16 08:00:00' and calltime<'2013-03-16 09:00:00',1,0)) w8,
sum(if(calltime >='2013-03-16 09:00:00' and calltime<'2013-03-16 10:00:00',1,0)) w9,
sum(if(calltime >='2013-03-16 10:00:00' and calltime<'2013-03-16 11:00:00',1,0)) w10,
sum(if(calltime >='2013-03-16 11:00:00' and calltime<'2013-03-16 12:00:00',1,0)) w11,
sum(if(calltime >='2013-03-16 12:00:00' and calltime<'2013-03-16 13:00:00',1,0)) w12,
sum(if(calltime >='2013-03-16 13:00:00' and calltime<'2013-03-16 14:00:00',1,0)) w13,
sum(if(calltime >='2013-03-16 14:00:00' and calltime<'2013-03-16 15:00:00',1,0)) w14,
sum(if(calltime >='2013-03-16 15:00:00' and calltime<'2013-03-16 16:00:00',1,0)) w15,
sum(if(calltime >='2013-03-16 16:00:00' and calltime<'2013-03-16 17:00:00',1,0)) w16,
sum(if(calltime >='2013-03-16 17:00:00' and calltime<'2013-03-16 18:00:00',1,0)) w17,
sum(if(calltime >='2013-03-16 18:00:00' and calltime<'2013-03-16 19:00:00',1,0)) w18,
sum(if(calltime >='2013-03-16 19:00:00' and calltime<'2013-03-16 20:00:00',1,0)) w19,
sum(if(calltime >='2013-03-16 20:00:00' and calltime<'2013-03-16 21:00:00',1,0)) w20,
sum(if(calltime >='2013-03-16 21:00:00' and calltime<'2013-03-16 22:00:00',1,0)) w21,
sum(if(calltime >='2013-03-16 22:00:00' and calltime<'2013-03-16 23:00:00',1,0)) w22,
sum(if(calltime >='2013-03-16 23:00:00' and calltime<='2013-03-16 24:00:00',1,0)) w23
from tbl_call_log where calltime>='2013-03-16' and calltime<'2013-03-16 24:00:00'
union all select count(caller),calltime,
sum(if(calltime >='2013-03-17 00:00:00' and calltime<'2013-03-17 01:00:00',1,0)) w0,
sum(if(calltime >='2013-03-17 01:00:00' and calltime<'2013-03-17 02:00:00',1,0)) w1,
sum(if(calltime >='2013-03-17 02:00:00' and calltime<'2013-03-17 03:00:00',1,0)) w2,
sum(if(calltime >='2013-03-17 03:00:00' and calltime<'2013-03-17 04:00:00',1,0)) w3,
sum(if(calltime >='2013-03-17 04:00:00' and calltime<'2013-03-17 05:00:00',1,0)) w4,
sum(if(calltime >='2013-03-17 05:00:00' and calltime<'2013-03-17 06:00:00',1,0)) w5,
sum(if(calltime >='2013-03-17 06:00:00' and calltime<'2013-03-17 07:00:00',1,0)) w6,
sum(if(calltime >='2013-03-17 07:00:00' and calltime<'2013-03-17 08:00:00',1,0)) w7,
sum(if(calltime >='2013-03-17 08:00:00' and calltime<'2013-03-17 09:00:00',1,0)) w8,
sum(if(calltime >='2013-03-17 09:00:00' and calltime<'2013-03-17 10:00:00',1,0)) w9,
sum(if(calltime >='2013-03-17 10:00:00' and calltime<'2013-03-17 11:00:00',1,0)) w10,
sum(if(calltime >='2013-03-17 11:00:00' and calltime<'2013-03-17 12:00:00',1,0)) w11,
sum(if(calltime >='2013-03-17 12:00:00' and calltime<'2013-03-17 13:00:00',1,0)) w12,
sum(if(calltime >='2013-03-17 13:00:00' and calltime<'2013-03-17 14:00:00',1,0)) w13,
sum(if(calltime >='2013-03-17 14:00:00' and calltime<'2013-03-17 15:00:00',1,0)) w14,
sum(if(calltime >='2013-03-17 15:00:00' and calltime<'2013-03-17 16:00:00',1,0)) w15,
sum(if(calltime >='2013-03-17 16:00:00' and calltime<'2013-03-17 17:00:00',1,0)) w16,
sum(if(calltime >='2013-03-17 17:00:00' and calltime<'2013-03-17 18:00:00',1,0)) w17,
sum(if(calltime >='2013-03-17 18:00:00' and calltime<'2013-03-17 19:00:00',1,0)) w18,
sum(if(calltime >='2013-03-17 19:00:00' and calltime<'2013-03-17 20:00:00',1,0)) w19,
sum(if(calltime >='2013-03-17 20:00:00' and calltime<'2013-03-17 21:00:00',1,0)) w20,
sum(if(calltime >='2013-03-17 21:00:00' and calltime<'2013-03-17 22:00:00',1,0)) w21,
sum(if(calltime >='2013-03-17 22:00:00' and calltime<'2013-03-17 23:00:00',1,0)) w22,
sum(if(calltime >='2013-03-17 23:00:00' and calltime<='2013-03-17 24:00:00',1,0)) w23
from tbl_call_log where calltime>='2013-03-17' and calltime<'2013-03-17 24:00:00'
union all select count(caller),calltime,
sum(if(calltime >='2013-03-18 00:00:00' and calltime<'2013-03-18 01:00:00',1,0)) w0,
sum(if(calltime >='2013-03-18 01:00:00' and calltime<'2013-03-18 02:00:00',1,0)) w1,
sum(if(calltime >='2013-03-18 02:00:00' and calltime<'2013-03-18 03:00:00',1,0)) w2,
sum(if(calltime >='2013-03-18 03:00:00' and calltime<'2013-03-18 04:00:00',1,0)) w3,
sum(if(calltime >='2013-03-18 04:00:00' and calltime<'2013-03-18 05:00:00',1,0)) w4,
sum(if(calltime >='2013-03-18 05:00:00' and calltime<'2013-03-18 06:00:00',1,0)) w5,
sum(if(calltime >='2013-03-18 06:00:00' and calltime<'2013-03-18 07:00:00',1,0)) w6,
sum(if(calltime >='2013-03-18 07:00:00' and calltime<'2013-03-18 08:00:00',1,0)) w7,
sum(if(calltime >='2013-03-18 08:00:00' and calltime<'2013-03-18 09:00:00',1,0)) w8,
sum(if(calltime >='2013-03-18 09:00:00' and calltime<'2013-03-18 10:00:00',1,0)) w9,
sum(if(calltime >='2013-03-18 10:00:00' and calltime<'2013-03-18 11:00:00',1,0)) w10,
sum(if(calltime >='2013-03-18 11:00:00' and calltime<'2013-03-18 12:00:00',1,0)) w11,
sum(if(calltime >='2013-03-18 12:00:00' and calltime<'2013-03-18 13:00:00',1,0)) w12,
sum(if(calltime >='2013-03-18 13:00:00' and calltime<'2013-03-18 14:00:00',1,0)) w13,
sum(if(calltime >='2013-03-18 14:00:00' and calltime<'2013-03-18 15:00:00',1,0)) w14,
sum(if(calltime >='2013-03-18 15:00:00' and calltime<'2013-03-18 16:00:00',1,0)) w15,
sum(if(calltime >='2013-03-18 16:00:00' and calltime<'2013-03-18 17:00:00',1,0)) w16,
sum(if(calltime >='2013-03-18 17:00:00' and calltime<'2013-03-18 18:00:00',1,0)) w17,
sum(if(calltime >='2013-03-18 18:00:00' and calltime<'2013-03-18 19:00:00',1,0)) w18,
sum(if(calltime >='2013-03-18 19:00:00' and calltime<'2013-03-18 20:00:00',1,0)) w19,
sum(if(calltime >='2013-03-18 20:00:00' and calltime<'2013-03-18 21:00:00',1,0)) w20,
sum(if(calltime >='2013-03-18 21:00:00' and calltime<'2013-03-18 22:00:00',1,0)) w21,
sum(if(calltime >='2013-03-18 22:00:00' and calltime<'2013-03-18 23:00:00',1,0)) w22,
sum(if(calltime >='2013-03-18 23:00:00' and calltime<='2013-03-18 24:00:00',1,0)) w23
from tbl_call_log where calltime>='2013-03-18' and calltime<'2013-03-18 24:00:00'
union all select count(caller),calltime,
sum(if(calltime >='2013-03-19 00:00:00' and calltime<'2013-03-19 01:00:00',1,0)) w0,
sum(if(calltime >='2013-03-19 01:00:00' and calltime<'2013-03-19 02:00:00',1,0)) w1,
sum(if(calltime >='2013-03-19 02:00:00' and calltime<'2013-03-19 03:00:00',1,0)) w2,
sum(if(calltime >='2013-03-19 03:00:00' and calltime<'2013-03-19 04:00:00',1,0)) w3,
sum(if(calltime >='2013-03-19 04:00:00' and calltime<'2013-03-19 05:00:00',1,0)) w4,
sum(if(calltime >='2013-03-19 05:00:00' and calltime<'2013-03-19 06:00:00',1,0)) w5,
sum(if(calltime >='2013-03-19 06:00:00' and calltime<'2013-03-19 07:00:00',1,0)) w6,
sum(if(calltime >='2013-03-19 07:00:00' and calltime<'2013-03-19 08:00:00',1,0)) w7,
sum(if(calltime >='2013-03-19 08:00:00' and calltime<'2013-03-19 09:00:00',1,0)) w8,
sum(if(calltime >='2013-03-19 09:00:00' and calltime<'2013-03-19 10:00:00',1,0)) w9,
sum(if(calltime >='2013-03-19 10:00:00' and calltime<'2013-03-19 11:00:00',1,0)) w10,
sum(if(calltime >='2013-03-19 11:00:00' and calltime<'2013-03-19 12:00:00',1,0)) w11,
sum(if(calltime >='2013-03-19 12:00:00' and calltime<'2013-03-19 13:00:00',1,0)) w12,
sum(if(calltime >='2013-03-19 13:00:00' and calltime<'2013-03-19 14:00:00',1,0)) w13,
sum(if(calltime >='2013-03-19 14:00:00' and calltime<'2013-03-19 15:00:00',1,0)) w14,
sum(if(calltime >='2013-03-19 15:00:00' and calltime<'2013-03-19 16:00:00',1,0)) w15,
sum(if(calltime >='2013-03-19 16:00:00' and calltime<'2013-03-19 17:00:00',1,0)) w16,
sum(if(calltime >='2013-03-19 17:00:00' and calltime<'2013-03-19 18:00:00',1,0)) w17,
sum(if(calltime >='2013-03-19 18:00:00' and calltime<'2013-03-19 19:00:00',1,0)) w18,
sum(if(calltime >='2013-03-19 19:00:00' and calltime<'2013-03-19 20:00:00',1,0)) w19,
sum(if(calltime >='2013-03-19 20:00:00' and calltime<'2013-03-19 21:00:00',1,0)) w20,
sum(if(calltime >='2013-03-19 21:00:00' and calltime<'2013-03-19 22:00:00',1,0)) w21,
sum(if(calltime >='2013-03-19 22:00:00' and calltime<'2013-03-19 23:00:00',1,0)) w22,
sum(if(calltime >='2013-03-19 23:00:00' and calltime<='2013-03-19 24:00:00',1,0)) w23
from tbl_call_log where calltime>='2013-03-19' and calltime<'2013-03-19 24:00:00'
union all select count(caller),calltime,
sum(if(calltime >='2013-03-20 00:00:00' and calltime<'2013-03-20 01:00:00',1,0)) w0,
sum(if(calltime >='2013-03-20 01:00:00' and calltime<'2013-03-20 02:00:00',1,0)) w1,
sum(if(calltime >='2013-03-20 02:00:00' and calltime<'2013-03-20 03:00:00',1,0)) w2,
sum(if(calltime >='2013-03-20 03:00:00' and calltime<'2013-03-20 04:00:00',1,0)) w3,
sum(if(calltime >='2013-03-20 04:00:00' and calltime<'2013-03-20 05:00:00',1,0)) w4,
sum(if(calltime >='2013-03-20 05:00:00' and calltime<'2013-03-20 06:00:00',1,0)) w5,
sum(if(calltime >='2013-03-20 06:00:00' and calltime<'2013-03-20 07:00:00',1,0)) w6,
sum(if(calltime >='2013-03-20 07:00:00' and calltime<'2013-03-20 08:00:00',1,0)) w7,
sum(if(calltime >='2013-03-20 08:00:00' and calltime<'2013-03-20 09:00:00',1,0)) w8,
sum(if(calltime >='2013-03-20 09:00:00' and calltime<'2013-03-20 10:00:00',1,0)) w9,
sum(if(calltime >='2013-03-20 10:00:00' and calltime<'2013-03-20 11:00:00',1,0)) w10,
sum(if(calltime >='2013-03-20 11:00:00' and calltime<'2013-03-20 12:00:00',1,0)) w11,
sum(if(calltime >='2013-03-20 12:00:00' and calltime<'2013-03-20 13:00:00',1,0)) w12,
sum(if(calltime >='2013-03-20 13:00:00' and calltime<'2013-03-20 14:00:00',1,0)) w13,
sum(if(calltime >='2013-03-20 14:00:00' and calltime<'2013-03-20 15:00:00',1,0)) w14,
sum(if(calltime >='2013-03-20 15:00:00' and calltime<'2013-03-20 16:00:00',1,0)) w15,
sum(if(calltime >='2013-03-20 16:00:00' and calltime<'2013-03-20 17:00:00',1,0)) w16,
sum(if(calltime >='2013-03-20 17:00:00' and calltime<'2013-03-20 18:00:00',1,0)) w17,
sum(if(calltime >='2013-03-20 18:00:00' and calltime<'2013-03-20 19:00:00',1,0)) w18,
sum(if(calltime >='2013-03-20 19:00:00' and calltime<'2013-03-20 20:00:00',1,0)) w19,
sum(if(calltime >='2013-03-20 20:00:00' and calltime<'2013-03-20 21:00:00',1,0)) w20,
sum(if(calltime >='2013-03-20 21:00:00' and calltime<'2013-03-20 22:00:00',1,0)) w21,
sum(if(calltime >='2013-03-20 22:00:00' and calltime<'2013-03-20 23:00:00',1,0)) w22,
sum(if(calltime >='2013-03-20 23:00:00' and calltime<='2013-03-20 24:00:00',1,0)) w23
from tbl_call_log where calltime>='2013-03-20' and calltime<'2013-03-20 24:00:00'
union all select count(caller),calltime,
sum(if(calltime >='2013-03-21 00:00:00' and calltime<'2013-03-21 01:00:00',1,0)) w0,
sum(if(calltime >='2013-03-21 01:00:00' and calltime<'2013-03-21 02:00:00',1,0)) w1,
sum(if(calltime >='2013-03-21 02:00:00' and calltime<'2013-03-21 03:00:00',1,0)) w2,
sum(if(calltime >='2013-03-21 03:00:00' and calltime<'2013-03-21 04:00:00',1,0)) w3,
sum(if(calltime >='2013-03-21 04:00:00' and calltime<'2013-03-21 05:00:00',1,0)) w4,
sum(if(calltime >='2013-03-21 05:00:00' and calltime<'2013-03-21 06:00:00',1,0)) w5,
sum(if(calltime >='2013-03-21 06:00:00' and calltime<'2013-03-21 07:00:00',1,0)) w6,
sum(if(calltime >='2013-03-21 07:00:00' and calltime<'2013-03-21 08:00:00',1,0)) w7,
sum(if(calltime >='2013-03-21 08:00:00' and calltime<'2013-03-21 09:00:00',1,0)) w8,
sum(if(calltime >='2013-03-21 09:00:00' and calltime<'2013-03-21 10:00:00',1,0)) w9,
sum(if(calltime >='2013-03-21 10:00:00' and calltime<'2013-03-21 11:00:00',1,0)) w10,
sum(if(calltime >='2013-03-21 11:00:00' and calltime<'2013-03-21 12:00:00',1,0)) w11,
sum(if(calltime >='2013-03-21 12:00:00' and calltime<'2013-03-21 13:00:00',1,0)) w12,
sum(if(calltime >='2013-03-21 13:00:00' and calltime<'2013-03-21 14:00:00',1,0)) w13,
sum(if(calltime >='2013-03-21 14:00:00' and calltime<'2013-03-21 15:00:00',1,0)) w14,
sum(if(calltime >='2013-03-21 15:00:00' and calltime<'2013-03-21 16:00:00',1,0)) w15,
sum(if(calltime >='2013-03-21 16:00:00' and calltime<'2013-03-21 17:00:00',1,0)) w16,
sum(if(calltime >='2013-03-21 17:00:00' and calltime<'2013-03-21 18:00:00',1,0)) w17,
sum(if(calltime >='2013-03-21 18:00:00' and calltime<'2013-03-21 19:00:00',1,0)) w18,
sum(if(calltime >='2013-03-21 19:00:00' and calltime<'2013-03-21 20:00:00',1,0)) w19,
sum(if(calltime >='2013-03-21 20:00:00' and calltime<'2013-03-21 21:00:00',1,0)) w20,
sum(if(calltime >='2013-03-21 21:00:00' and calltime<'2013-03-21 22:00:00',1,0)) w21,
sum(if(calltime >='2013-03-21 22:00:00' and calltime<'2013-03-21 23:00:00',1,0)) w22,
sum(if(calltime >='2013-03-21 23:00:00' and calltime<='2013-03-21 24:00:00',1,0)) w23
from tbl_call_log where calltime>='2013-03-21' and calltime<'2013-03-21 24:00:00'
///////////////erl
select StartDate,
sum(if(StartTime >='00:00:00' and StartTime<'01:00:00',SECOND(CallDuration),0))/216000 w0,
sum(if(StartTime >='01:00:00' and StartTime<'02:00:00',SECOND(CallDuration),0))/216000 w1,
sum(if(StartTime >='02:00:00' and StartTime<'03:00:00',SECOND(CallDuration),0))/216000 w2,
sum(if(StartTime >='03:00:00' and StartTime<'04:00:00',SECOND(CallDuration),0))/216000 w3,
sum(if(StartTime >='04:00:00' and StartTime<'05:00:00',SECOND(CallDuration),0))/216000 w4,
sum(if(StartTime >='05:00:00' and StartTime<'06:00:00',SECOND(CallDuration),0))/216000 w5,
sum(if(StartTime >='06:00:00' and StartTime<'07:00:00',SECOND(CallDuration),0))/216000 w6,
sum(if(StartTime >='07:00:00' and StartTime<'08:00:00',SECOND(CallDuration),0))/216000 w7,
sum(if(StartTime >='08:00:00' and StartTime<'09:00:00',SECOND(CallDuration),0))/216000 w8,
sum(if(StartTime >='09:00:00' and StartTime<'10:00:00',SECOND(CallDuration),0))/216000 w9,
sum(if(StartTime >='10:00:00' and StartTime<'11:00:00',SECOND(CallDuration),0))/216000 w10,
sum(if(StartTime >='11:00:00' and StartTime<'12:00:00',SECOND(CallDuration),0))/216000 w11,
sum(if(StartTime >='12:00:00' and StartTime<'13:00:00',SECOND(CallDuration),0))/216000 w12,
sum(if(StartTime >='13:00:00' and StartTime<'14:00:00',SECOND(CallDuration),0))/216000 w13,
sum(if(StartTime >='14:00:00' and StartTime<'15:00:00',SECOND(CallDuration),0))/216000 w14,
sum(if(StartTime >='15:00:00' and StartTime<'16:00:00',SECOND(CallDuration),0))/216000 w15,
sum(if(StartTime >='16:00:00' and StartTime<'17:00:00',SECOND(CallDuration),0))/216000 w16,
sum(if(StartTime >='17:00:00' and StartTime<'18:00:00',SECOND(CallDuration),0))/216000 w17,
sum(if(StartTime >='18:00:00' and StartTime<'19:00:00',SECOND(CallDuration),0))/216000 w18,
sum(if(StartTime >='19:00:00' and StartTime<'20:00:00',SECOND(CallDuration),0))/216000 w19,
sum(if(StartTime >='20:00:00' and StartTime<'21:00:00',SECOND(CallDuration),0))/216000 w20,
sum(if(StartTime >='21:00:00' and StartTime<'22:00:00',SECOND(CallDuration),0))/216000 w21,
sum(if(StartTime >='22:00:00' and StartTime<'23:00:00',SECOND(CallDuration),0))/216000 w22,
sum(if(StartTime >='23:00:00' and StartTime<'24:00:00',SECOND(CallDuration),0))/216000 w23
from cticall where ServiceType=1 and StartDate='15-03-2013'
union
select StartDate,
sum(if(StartTime >='00:00:00' and StartTime<'01:00:00',SECOND(CallDuration),0))/216000 w0,
sum(if(StartTime >='01:00:00' and StartTime<'02:00:00',SECOND(CallDuration),0))/216000 w1,
sum(if(StartTime >='02:00:00' and StartTime<'03:00:00',SECOND(CallDuration),0))/216000 w2,
sum(if(StartTime >='03:00:00' and StartTime<'04:00:00',SECOND(CallDuration),0))/216000 w3,
sum(if(StartTime >='04:00:00' and StartTime<'05:00:00',SECOND(CallDuration),0))/216000 w4,
sum(if(StartTime >='05:00:00' and StartTime<'06:00:00',SECOND(CallDuration),0))/216000 w5,
sum(if(StartTime >='06:00:00' and StartTime<'07:00:00',SECOND(CallDuration),0))/216000 w6,
sum(if(StartTime >='07:00:00' and StartTime<'08:00:00',SECOND(CallDuration),0))/216000 w7,
sum(if(StartTime >='08:00:00' and StartTime<'09:00:00',SECOND(CallDuration),0))/216000 w8,
sum(if(StartTime >='09:00:00' and StartTime<'10:00:00',SECOND(CallDuration),0))/216000 w9,
sum(if(StartTime >='10:00:00' and StartTime<'11:00:00',SECOND(CallDuration),0))/216000 w10,
sum(if(StartTime >='11:00:00' and StartTime<'12:00:00',SECOND(CallDuration),0))/216000 w11,
sum(if(StartTime >='12:00:00' and StartTime<'13:00:00',SECOND(CallDuration),0))/216000 w12,
sum(if(StartTime >='13:00:00' and StartTime<'14:00:00',SECOND(CallDuration),0))/216000 w13,
sum(if(StartTime >='14:00:00' and StartTime<'15:00:00',SECOND(CallDuration),0))/216000 w14,
sum(if(StartTime >='15:00:00' and StartTime<'16:00:00',SECOND(CallDuration),0))/216000 w15,
sum(if(StartTime >='16:00:00' and StartTime<'17:00:00',SECOND(CallDuration),0))/216000 w16,
sum(if(StartTime >='17:00:00' and StartTime<'18:00:00',SECOND(CallDuration),0))/216000 w17,
sum(if(StartTime >='18:00:00' and StartTime<'19:00:00',SECOND(CallDuration),0))/216000 w18,
sum(if(StartTime >='19:00:00' and StartTime<'20:00:00',SECOND(CallDuration),0))/216000 w19,
sum(if(StartTime >='20:00:00' and StartTime<'21:00:00',SECOND(CallDuration),0))/216000 w20,
sum(if(StartTime >='21:00:00' and StartTime<'22:00:00',SECOND(CallDuration),0))/216000 w21,
sum(if(StartTime >='22:00:00' and StartTime<'23:00:00',SECOND(CallDuration),0))/216000 w22,
sum(if(StartTime >='23:00:00' and StartTime<'24:00:00',SECOND(CallDuration),0))/216000 w23
from cticall where ServiceType=1 and StartDate='16-03-2013'
union
select StartDate,
sum(if(StartTime >='00:00:00' and StartTime<'01:00:00',SECOND(CallDuration),0))/216000 w0,
sum(if(StartTime >='01:00:00' and StartTime<'02:00:00',SECOND(CallDuration),0))/216000 w1,
sum(if(StartTime >='02:00:00' and StartTime<'03:00:00',SECOND(CallDuration),0))/216000 w2,
sum(if(StartTime >='03:00:00' and StartTime<'04:00:00',SECOND(CallDuration),0))/216000 w3,
sum(if(StartTime >='04:00:00' and StartTime<'05:00:00',SECOND(CallDuration),0))/216000 w4,
sum(if(StartTime >='05:00:00' and StartTime<'06:00:00',SECOND(CallDuration),0))/216000 w5,
sum(if(StartTime >='06:00:00' and StartTime<'07:00:00',SECOND(CallDuration),0))/216000 w6,
sum(if(StartTime >='07:00:00' and StartTime<'08:00:00',SECOND(CallDuration),0))/216000 w7,
sum(if(StartTime >='08:00:00' and StartTime<'09:00:00',SECOND(CallDuration),0))/216000 w8,
sum(if(StartTime >='09:00:00' and StartTime<'10:00:00',SECOND(CallDuration),0))/216000 w9,
sum(if(StartTime >='10:00:00' and StartTime<'11:00:00',SECOND(CallDuration),0))/216000 w10,
sum(if(StartTime >='11:00:00' and StartTime<'12:00:00',SECOND(CallDuration),0))/216000 w11,
sum(if(StartTime >='12:00:00' and StartTime<'13:00:00',SECOND(CallDuration),0))/216000 w12,
sum(if(StartTime >='13:00:00' and StartTime<'14:00:00',SECOND(CallDuration),0))/216000 w13,
sum(if(StartTime >='14:00:00' and StartTime<'15:00:00',SECOND(CallDuration),0))/216000 w14,
sum(if(StartTime >='15:00:00' and StartTime<'16:00:00',SECOND(CallDuration),0))/216000 w15,
sum(if(StartTime >='16:00:00' and StartTime<'17:00:00',SECOND(CallDuration),0))/216000 w16,
sum(if(StartTime >='17:00:00' and StartTime<'18:00:00',SECOND(CallDuration),0))/216000 w17,
sum(if(StartTime >='18:00:00' and StartTime<'19:00:00',SECOND(CallDuration),0))/216000 w18,
sum(if(StartTime >='19:00:00' and StartTime<'20:00:00',SECOND(CallDuration),0))/216000 w19,
sum(if(StartTime >='20:00:00' and StartTime<'21:00:00',SECOND(CallDuration),0))/216000 w20,
sum(if(StartTime >='21:00:00' and StartTime<'22:00:00',SECOND(CallDuration),0))/216000 w21,
sum(if(StartTime >='22:00:00' and StartTime<'23:00:00',SECOND(CallDuration),0))/216000 w22,
sum(if(StartTime >='23:00:00' and StartTime<'24:00:00',SECOND(CallDuration),0))/216000 w23
from cticall where ServiceType=1 and StartDate='17-03-2013'
union
select StartDate,
sum(if(StartTime >='00:00:00' and StartTime<'01:00:00',SECOND(CallDuration),0))/216000 w0,
sum(if(StartTime >='01:00:00' and StartTime<'02:00:00',SECOND(CallDuration),0))/216000 w1,
sum(if(StartTime >='02:00:00' and StartTime<'03:00:00',SECOND(CallDuration),0))/216000 w2,
sum(if(StartTime >='03:00:00' and StartTime<'04:00:00',SECOND(CallDuration),0))/216000 w3,
sum(if(StartTime >='04:00:00' and StartTime<'05:00:00',SECOND(CallDuration),0))/216000 w4,
sum(if(StartTime >='05:00:00' and StartTime<'06:00:00',SECOND(CallDuration),0))/216000 w5,
sum(if(StartTime >='06:00:00' and StartTime<'07:00:00',SECOND(CallDuration),0))/216000 w6,
sum(if(StartTime >='07:00:00' and StartTime<'08:00:00',SECOND(CallDuration),0))/216000 w7,
sum(if(StartTime >='08:00:00' and StartTime<'09:00:00',SECOND(CallDuration),0))/216000 w8,
sum(if(StartTime >='09:00:00' and StartTime<'10:00:00',SECOND(CallDuration),0))/216000 w9,
sum(if(StartTime >='10:00:00' and StartTime<'11:00:00',SECOND(CallDuration),0))/216000 w10,
sum(if(StartTime >='11:00:00' and StartTime<'12:00:00',SECOND(CallDuration),0))/216000 w11,
sum(if(StartTime >='12:00:00' and StartTime<'13:00:00',SECOND(CallDuration),0))/216000 w12,
sum(if(StartTime >='13:00:00' and StartTime<'14:00:00',SECOND(CallDuration),0))/216000 w13,
sum(if(StartTime >='14:00:00' and StartTime<'15:00:00',SECOND(CallDuration),0))/216000 w14,
sum(if(StartTime >='15:00:00' and StartTime<'16:00:00',SECOND(CallDuration),0))/216000 w15,
sum(if(StartTime >='16:00:00' and StartTime<'17:00:00',SECOND(CallDuration),0))/216000 w16,
sum(if(StartTime >='17:00:00' and StartTime<'18:00:00',SECOND(CallDuration),0))/216000 w17,
sum(if(StartTime >='18:00:00' and StartTime<'19:00:00',SECOND(CallDuration),0))/216000 w18,
sum(if(StartTime >='19:00:00' and StartTime<'20:00:00',SECOND(CallDuration),0))/216000 w19,
sum(if(StartTime >='20:00:00' and StartTime<'21:00:00',SECOND(CallDuration),0))/216000 w20,
sum(if(StartTime >='21:00:00' and StartTime<'22:00:00',SECOND(CallDuration),0))/216000 w21,
sum(if(StartTime >='22:00:00' and StartTime<'23:00:00',SECOND(CallDuration),0))/216000 w22,
sum(if(StartTime >='23:00:00' and StartTime<'24:00:00',SECOND(CallDuration),0))/216000 w23
from cticall where ServiceType=1 and StartDate='18-03-2013'
union
select StartDate,
sum(if(StartTime >='00:00:00' and StartTime<'01:00:00',SECOND(CallDuration),0))/216000 w0,
sum(if(StartTime >='01:00:00' and StartTime<'02:00:00',SECOND(CallDuration),0))/216000 w1,
sum(if(StartTime >='02:00:00' and StartTime<'03:00:00',SECOND(CallDuration),0))/216000 w2,
sum(if(StartTime >='03:00:00' and StartTime<'04:00:00',SECOND(CallDuration),0))/216000 w3,
sum(if(StartTime >='04:00:00' and StartTime<'05:00:00',SECOND(CallDuration),0))/216000 w4,
sum(if(StartTime >='05:00:00' and StartTime<'06:00:00',SECOND(CallDuration),0))/216000 w5,
sum(if(StartTime >='06:00:00' and StartTime<'07:00:00',SECOND(CallDuration),0))/216000 w6,
sum(if(StartTime >='07:00:00' and StartTime<'08:00:00',SECOND(CallDuration),0))/216000 w7,
sum(if(StartTime >='08:00:00' and StartTime<'09:00:00',SECOND(CallDuration),0))/216000 w8,
sum(if(StartTime >='09:00:00' and StartTime<'10:00:00',SECOND(CallDuration),0))/216000 w9,
sum(if(StartTime >='10:00:00' and StartTime<'11:00:00',SECOND(CallDuration),0))/216000 w10,
sum(if(StartTime >='11:00:00' and StartTime<'12:00:00',SECOND(CallDuration),0))/216000 w11,
sum(if(StartTime >='12:00:00' and StartTime<'13:00:00',SECOND(CallDuration),0))/216000 w12,
sum(if(StartTime >='13:00:00' and StartTime<'14:00:00',SECOND(CallDuration),0))/216000 w13,
sum(if(StartTime >='14:00:00' and StartTime<'15:00:00',SECOND(CallDuration),0))/216000 w14,
sum(if(StartTime >='15:00:00' and StartTime<'16:00:00',SECOND(CallDuration),0))/216000 w15,
sum(if(StartTime >='16:00:00' and StartTime<'17:00:00',SECOND(CallDuration),0))/216000 w16,
sum(if(StartTime >='17:00:00' and StartTime<'18:00:00',SECOND(CallDuration),0))/216000 w17,
sum(if(StartTime >='18:00:00' and StartTime<'19:00:00',SECOND(CallDuration),0))/216000 w18,
sum(if(StartTime >='19:00:00' and StartTime<'20:00:00',SECOND(CallDuration),0))/216000 w19,
sum(if(StartTime >='20:00:00' and StartTime<'21:00:00',SECOND(CallDuration),0))/216000 w20,
sum(if(StartTime >='21:00:00' and StartTime<'22:00:00',SECOND(CallDuration),0))/216000 w21,
sum(if(StartTime >='22:00:00' and StartTime<'23:00:00',SECOND(CallDuration),0))/216000 w22,
sum(if(StartTime >='23:00:00' and StartTime<'24:00:00',SECOND(CallDuration),0))/216000 w23
from cticall where ServiceType=1 and StartDate='19-03-2013'
union
select StartDate,
sum(if(StartTime >='00:00:00' and StartTime<'01:00:00',SECOND(CallDuration),0))/216000 w0,
sum(if(StartTime >='01:00:00' and StartTime<'02:00:00',SECOND(CallDuration),0))/216000 w1,
sum(if(StartTime >='02:00:00' and StartTime<'03:00:00',SECOND(CallDuration),0))/216000 w2,
sum(if(StartTime >='03:00:00' and StartTime<'04:00:00',SECOND(CallDuration),0))/216000 w3,
sum(if(StartTime >='04:00:00' and StartTime<'05:00:00',SECOND(CallDuration),0))/216000 w4,
sum(if(StartTime >='05:00:00' and StartTime<'06:00:00',SECOND(CallDuration),0))/216000 w5,
sum(if(StartTime >='06:00:00' and StartTime<'07:00:00',SECOND(CallDuration),0))/216000 w6,
sum(if(StartTime >='07:00:00' and StartTime<'08:00:00',SECOND(CallDuration),0))/216000 w7,
sum(if(StartTime >='08:00:00' and StartTime<'09:00:00',SECOND(CallDuration),0))/216000 w8,
sum(if(StartTime >='09:00:00' and StartTime<'10:00:00',SECOND(CallDuration),0))/216000 w9,
sum(if(StartTime >='10:00:00' and StartTime<'11:00:00',SECOND(CallDuration),0))/216000 w10,
sum(if(StartTime >='11:00:00' and StartTime<'12:00:00',SECOND(CallDuration),0))/216000 w11,
sum(if(StartTime >='12:00:00' and StartTime<'13:00:00',SECOND(CallDuration),0))/216000 w12,
sum(if(StartTime >='13:00:00' and StartTime<'14:00:00',SECOND(CallDuration),0))/216000 w13,
sum(if(StartTime >='14:00:00' and StartTime<'15:00:00',SECOND(CallDuration),0))/216000 w14,
sum(if(StartTime >='15:00:00' and StartTime<'16:00:00',SECOND(CallDuration),0))/216000 w15,
sum(if(StartTime >='16:00:00' and StartTime<'17:00:00',SECOND(CallDuration),0))/216000 w16,
sum(if(StartTime >='17:00:00' and StartTime<'18:00:00',SECOND(CallDuration),0))/216000 w17,
sum(if(StartTime >='18:00:00' and StartTime<'19:00:00',SECOND(CallDuration),0))/216000 w18,
sum(if(StartTime >='19:00:00' and StartTime<'20:00:00',SECOND(CallDuration),0))/216000 w19,
sum(if(StartTime >='20:00:00' and StartTime<'21:00:00',SECOND(CallDuration),0))/216000 w20,
sum(if(StartTime >='21:00:00' and StartTime<'22:00:00',SECOND(CallDuration),0))/216000 w21,
sum(if(StartTime >='22:00:00' and StartTime<'23:00:00',SECOND(CallDuration),0))/216000 w22,
sum(if(StartTime >='23:00:00' and StartTime<'24:00:00',SECOND(CallDuration),0))/216000 w23
from cticall where ServiceType=1 and StartDate='20-03-2013'
union
select StartDate,
sum(if(StartTime >='00:00:00' and StartTime<'01:00:00',SECOND(CallDuration),0))/216000 w0,
sum(if(StartTime >='01:00:00' and StartTime<'02:00:00',SECOND(CallDuration),0))/216000 w1,
sum(if(StartTime >='02:00:00' and StartTime<'03:00:00',SECOND(CallDuration),0))/216000 w2,
sum(if(StartTime >='03:00:00' and StartTime<'04:00:00',SECOND(CallDuration),0))/216000 w3,
sum(if(StartTime >='04:00:00' and StartTime<'05:00:00',SECOND(CallDuration),0))/216000 w4,
sum(if(StartTime >='05:00:00' and StartTime<'06:00:00',SECOND(CallDuration),0))/216000 w5,
sum(if(StartTime >='06:00:00' and StartTime<'07:00:00',SECOND(CallDuration),0))/216000 w6,
sum(if(StartTime >='07:00:00' and StartTime<'08:00:00',SECOND(CallDuration),0))/216000 w7,
sum(if(StartTime >='08:00:00' and StartTime<'09:00:00',SECOND(CallDuration),0))/216000 w8,
sum(if(StartTime >='09:00:00' and StartTime<'10:00:00',SECOND(CallDuration),0))/216000 w9,
sum(if(StartTime >='10:00:00' and StartTime<'11:00:00',SECOND(CallDuration),0))/216000 w10,
sum(if(StartTime >='11:00:00' and StartTime<'12:00:00',SECOND(CallDuration),0))/216000 w11,
sum(if(StartTime >='12:00:00' and StartTime<'13:00:00',SECOND(CallDuration),0))/216000 w12,
sum(if(StartTime >='13:00:00' and StartTime<'14:00:00',SECOND(CallDuration),0))/216000 w13,
sum(if(StartTime >='14:00:00' and StartTime<'15:00:00',SECOND(CallDuration),0))/216000 w14,
sum(if(StartTime >='15:00:00' and StartTime<'16:00:00',SECOND(CallDuration),0))/216000 w15,
sum(if(StartTime >='16:00:00' and StartTime<'17:00:00',SECOND(CallDuration),0))/216000 w16,
sum(if(StartTime >='17:00:00' and StartTime<'18:00:00',SECOND(CallDuration),0))/216000 w17,
sum(if(StartTime >='18:00:00' and StartTime<'19:00:00',SECOND(CallDuration),0))/216000 w18,
sum(if(StartTime >='19:00:00' and StartTime<'20:00:00',SECOND(CallDuration),0))/216000 w19,
sum(if(StartTime >='20:00:00' and StartTime<'21:00:00',SECOND(CallDuration),0))/216000 w20,
sum(if(StartTime >='21:00:00' and StartTime<'22:00:00',SECOND(CallDuration),0))/216000 w21,
sum(if(StartTime >='22:00:00' and StartTime<'23:00:00',SECOND(CallDuration),0))/216000 w22,
sum(if(StartTime >='23:00:00' and StartTime<'24:00:00',SECOND(CallDuration),0))/216000 w23
from cticall where ServiceType=1 and StartDate='21-03-2013';
////////////////////////////////////
load data infile "c:/cti/call_2013-03-15_.log" replace into table cticall character set gbk fields terminated by " " enclosed by "" lines terminated by "\r\n";
load data infile "c:/cti/call_2013-03-16_.log" replace into table cticall character set gbk fields terminated by " " enclosed by "" lines terminated by "\r\n";
load data infile "c:/cti/call_2013-03-17_.log" replace into table cticall character set gbk fields terminated by " " enclosed by "" lines terminated by "\r\n";
load data infile "c:/cti/call_2013-03-18_.log" replace into table cticall character set gbk fields terminated by " " enclosed by "" lines terminated by "\r\n";
load data infile "c:/cti/call_2013-03-19_.log" replace into table cticall character set gbk fields terminated by " " enclosed by "" lines terminated by "\r\n";
load data infile "c:/cti/call_2013-03-20_.log" replace into table cticall character set gbk fields terminated by " " enclosed by "" lines terminated by "\r\n";
load data infile "c:/cti/call_2013-03-21_.log" replace into table cticall character set gbk fields terminated by " " enclosed by "" lines terminated by "\r\n";
////////////////////////////////////////////
load data infile "c:/cti/call_2013-03-15_.csv" replace into table cticall2 character set gbk fields terminated by "," enclosed by "" lines terminated by "\r\n";
load data infile "c:/cti/call_2013-03-16_.csv" replace into table cticall2 character set gbk fields terminated by "," enclosed by "" lines terminated by "\r\n";
load data infile "c:/cti/call_2013-03-17_.csv" replace into table cticall2 character set gbk fields terminated by "," enclosed by "" lines terminated by "\r\n";
load data infile "c:/cti/call_2013-03-18_.csv" replace into table cticall2 character set gbk fields terminated by "," enclosed by "" lines terminated by "\r\n";
load data infile "c:/cti/call_2013-03-19_.csv" replace into table cticall2 character set gbk fields terminated by "," enclosed by "" lines terminated by "\r\n";
load data infile "c:/cti/call_2013-03-20_.csv" replace into table cticall2 character set gbk fields terminated by "," enclosed by "" lines terminated by "\r\n";
load data infile "c:/cti/call_2013-03-21_.csv" replace into table cticall2 character set gbk fields terminated by "," enclosed by "" lines terminated by "\r\n";
///////////////
load data infile "c:/ctimsh/call_2013-03-15_.csv" replace into table cticallmsh character set gbk fields terminated by "," enclosed by "" lines terminated by "\r\n";
load data infile "c:/ctimsh/call_2013-03-16_.csv" replace into table cticallmsh character set gbk fields terminated by "," enclosed by "" lines terminated by "\r\n";
load data infile "c:/ctimsh/call_2013-03-17_.csv" replace into table cticallmsh character set gbk fields terminated by "," enclosed by "" lines terminated by "\r\n";
load data infile "c:/ctimsh/call_2013-03-18_.csv" replace into table cticallmsh character set gbk fields terminated by "," enclosed by "" lines terminated by "\r\n";
load data infile "c:/ctimsh/call_2013-03-19_.csv" replace into table cticallmsh character set gbk fields terminated by "," enclosed by "" lines terminated by "\r\n";
load data infile "c:/ctimsh/call_2013-03-20_.csv" replace into table cticallmsh character set gbk fields terminated by "," enclosed by "" lines terminated by "\r\n";
load data infile "c:/ctimsh/call_2013-03-21_.csv" replace into table cticallmsh character set gbk fields terminated by "," enclosed by "" lines terminated by "\r\n";
///////////////erl
select StartDate,
sum(if(StartTime >='00:00:00' and StartTime<'01:00:00',SECOND(CallDuration),0))/216000 w0,
sum(if(StartTime >='01:00:00' and StartTime<'02:00:00',SECOND(CallDuration),0))/216000 w1,
sum(if(StartTime >='02:00:00' and StartTime<'03:00:00',SECOND(CallDuration),0))/216000 w2,
sum(if(StartTime >='03:00:00' and StartTime<'04:00:00',SECOND(CallDuration),0))/216000 w3,
sum(if(StartTime >='04:00:00' and StartTime<'05:00:00',SECOND(CallDuration),0))/216000 w4,
sum(if(StartTime >='05:00:00' and StartTime<'06:00:00',SECOND(CallDuration),0))/216000 w5,
sum(if(StartTime >='06:00:00' and StartTime<'07:00:00',SECOND(CallDuration),0))/216000 w6,
sum(if(StartTime >='07:00:00' and StartTime<'08:00:00',SECOND(CallDuration),0))/216000 w7,
sum(if(StartTime >='08:00:00' and StartTime<'09:00:00',SECOND(CallDuration),0))/216000 w8,
sum(if(StartTime >='09:00:00' and StartTime<'10:00:00',SECOND(CallDuration),0))/216000 w9,
sum(if(StartTime >='10:00:00' and StartTime<'11:00:00',SECOND(CallDuration),0))/216000 w10,
sum(if(StartTime >='11:00:00' and StartTime<'12:00:00',SECOND(CallDuration),0))/216000 w11,
sum(if(StartTime >='12:00:00' and StartTime<'13:00:00',SECOND(CallDuration),0))/216000 w12,
sum(if(StartTime >='13:00:00' and StartTime<'14:00:00',SECOND(CallDuration),0))/216000 w13,
sum(if(StartTime >='14:00:00' and StartTime<'15:00:00',SECOND(CallDuration),0))/216000 w14,
sum(if(StartTime >='15:00:00' and StartTime<'16:00:00',SECOND(CallDuration),0))/216000 w15,
sum(if(StartTime >='16:00:00' and StartTime<'17:00:00',SECOND(CallDuration),0))/216000 w16,
sum(if(StartTime >='17:00:00' and StartTime<'18:00:00',SECOND(CallDuration),0))/216000 w17,
sum(if(StartTime >='18:00:00' and StartTime<'19:00:00',SECOND(CallDuration),0))/216000 w18,
sum(if(StartTime >='19:00:00' and StartTime<'20:00:00',SECOND(CallDuration),0))/216000 w19,
sum(if(StartTime >='20:00:00' and StartTime<'21:00:00',SECOND(CallDuration),0))/216000 w20,
sum(if(StartTime >='21:00:00' and StartTime<'22:00:00',SECOND(CallDuration),0))/216000 w21,
sum(if(StartTime >='22:00:00' and StartTime<'23:00:00',SECOND(CallDuration),0))/216000 w22,
sum(if(StartTime >='23:00:00' and StartTime<'24:00:00',SECOND(CallDuration),0))/216000 w23
from cticall2 where ServiceType=1 and StartDate='15-03-2013'
union
select StartDate,
sum(if(StartTime >='00:00:00' and StartTime<'01:00:00',SECOND(CallDuration),0))/216000 w0,
sum(if(StartTime >='01:00:00' and StartTime<'02:00:00',SECOND(CallDuration),0))/216000 w1,
sum(if(StartTime >='02:00:00' and StartTime<'03:00:00',SECOND(CallDuration),0))/216000 w2,
sum(if(StartTime >='03:00:00' and StartTime<'04:00:00',SECOND(CallDuration),0))/216000 w3,
sum(if(StartTime >='04:00:00' and StartTime<'05:00:00',SECOND(CallDuration),0))/216000 w4,
sum(if(StartTime >='05:00:00' and StartTime<'06:00:00',SECOND(CallDuration),0))/216000 w5,
sum(if(StartTime >='06:00:00' and StartTime<'07:00:00',SECOND(CallDuration),0))/216000 w6,
sum(if(StartTime >='07:00:00' and StartTime<'08:00:00',SECOND(CallDuration),0))/216000 w7,
sum(if(StartTime >='08:00:00' and StartTime<'09:00:00',SECOND(CallDuration),0))/216000 w8,
sum(if(StartTime >='09:00:00' and StartTime<'10:00:00',SECOND(CallDuration),0))/216000 w9,
sum(if(StartTime >='10:00:00' and StartTime<'11:00:00',SECOND(CallDuration),0))/216000 w10,
sum(if(StartTime >='11:00:00' and StartTime<'12:00:00',SECOND(CallDuration),0))/216000 w11,
sum(if(StartTime >='12:00:00' and StartTime<'13:00:00',SECOND(CallDuration),0))/216000 w12,
sum(if(StartTime >='13:00:00' and StartTime<'14:00:00',SECOND(CallDuration),0))/216000 w13,
sum(if(StartTime >='14:00:00' and StartTime<'15:00:00',SECOND(CallDuration),0))/216000 w14,
sum(if(StartTime >='15:00:00' and StartTime<'16:00:00',SECOND(CallDuration),0))/216000 w15,
sum(if(StartTime >='16:00:00' and StartTime<'17:00:00',SECOND(CallDuration),0))/216000 w16,
sum(if(StartTime >='17:00:00' and StartTime<'18:00:00',SECOND(CallDuration),0))/216000 w17,
sum(if(StartTime >='18:00:00' and StartTime<'19:00:00',SECOND(CallDuration),0))/216000 w18,
sum(if(StartTime >='19:00:00' and StartTime<'20:00:00',SECOND(CallDuration),0))/216000 w19,
sum(if(StartTime >='20:00:00' and StartTime<'21:00:00',SECOND(CallDuration),0))/216000 w20,
sum(if(StartTime >='21:00:00' and StartTime<'22:00:00',SECOND(CallDuration),0))/216000 w21,
sum(if(StartTime >='22:00:00' and StartTime<'23:00:00',SECOND(CallDuration),0))/216000 w22,
sum(if(StartTime >='23:00:00' and StartTime<'24:00:00',SECOND(CallDuration),0))/216000 w23
from cticall2 where ServiceType=1 and StartDate='16-03-2013'
union
select StartDate,
sum(if(StartTime >='00:00:00' and StartTime<'01:00:00',SECOND(CallDuration),0))/216000 w0,
sum(if(StartTime >='01:00:00' and StartTime<'02:00:00',SECOND(CallDuration),0))/216000 w1,
sum(if(StartTime >='02:00:00' and StartTime<'03:00:00',SECOND(CallDuration),0))/216000 w2,
sum(if(StartTime >='03:00:00' and StartTime<'04:00:00',SECOND(CallDuration),0))/216000 w3,
sum(if(StartTime >='04:00:00' and StartTime<'05:00:00',SECOND(CallDuration),0))/216000 w4,
sum(if(StartTime >='05:00:00' and StartTime<'06:00:00',SECOND(CallDuration),0))/216000 w5,
sum(if(StartTime >='06:00:00' and StartTime<'07:00:00',SECOND(CallDuration),0))/216000 w6,
sum(if(StartTime >='07:00:00' and StartTime<'08:00:00',SECOND(CallDuration),0))/216000 w7,
sum(if(StartTime >='08:00:00' and StartTime<'09:00:00',SECOND(CallDuration),0))/216000 w8,
sum(if(StartTime >='09:00:00' and StartTime<'10:00:00',SECOND(CallDuration),0))/216000 w9,
sum(if(StartTime >='10:00:00' and StartTime<'11:00:00',SECOND(CallDuration),0))/216000 w10,
sum(if(StartTime >='11:00:00' and StartTime<'12:00:00',SECOND(CallDuration),0))/216000 w11,
sum(if(StartTime >='12:00:00' and StartTime<'13:00:00',SECOND(CallDuration),0))/216000 w12,
sum(if(StartTime >='13:00:00' and StartTime<'14:00:00',SECOND(CallDuration),0))/216000 w13,
sum(if(StartTime >='14:00:00' and StartTime<'15:00:00',SECOND(CallDuration),0))/216000 w14,
sum(if(StartTime >='15:00:00' and StartTime<'16:00:00',SECOND(CallDuration),0))/216000 w15,
sum(if(StartTime >='16:00:00' and StartTime<'17:00:00',SECOND(CallDuration),0))/216000 w16,
sum(if(StartTime >='17:00:00' and StartTime<'18:00:00',SECOND(CallDuration),0))/216000 w17,
sum(if(StartTime >='18:00:00' and StartTime<'19:00:00',SECOND(CallDuration),0))/216000 w18,
sum(if(StartTime >='19:00:00' and StartTime<'20:00:00',SECOND(CallDuration),0))/216000 w19,
sum(if(StartTime >='20:00:00' and StartTime<'21:00:00',SECOND(CallDuration),0))/216000 w20,
sum(if(StartTime >='21:00:00' and StartTime<'22:00:00',SECOND(CallDuration),0))/216000 w21,
sum(if(StartTime >='22:00:00' and StartTime<'23:00:00',SECOND(CallDuration),0))/216000 w22,
sum(if(StartTime >='23:00:00' and StartTime<'24:00:00',SECOND(CallDuration),0))/216000 w23
from cticall2 where ServiceType=1 and StartDate='17-03-2013'
union
select StartDate,
sum(if(StartTime >='00:00:00' and StartTime<'01:00:00',SECOND(CallDuration),0))/216000 w0,
sum(if(StartTime >='01:00:00' and StartTime<'02:00:00',SECOND(CallDuration),0))/216000 w1,
sum(if(StartTime >='02:00:00' and StartTime<'03:00:00',SECOND(CallDuration),0))/216000 w2,
sum(if(StartTime >='03:00:00' and StartTime<'04:00:00',SECOND(CallDuration),0))/216000 w3,
sum(if(StartTime >='04:00:00' and StartTime<'05:00:00',SECOND(CallDuration),0))/216000 w4,
sum(if(StartTime >='05:00:00' and StartTime<'06:00:00',SECOND(CallDuration),0))/216000 w5,
sum(if(StartTime >='06:00:00' and StartTime<'07:00:00',SECOND(CallDuration),0))/216000 w6,
sum(if(StartTime >='07:00:00' and StartTime<'08:00:00',SECOND(CallDuration),0))/216000 w7,
sum(if(StartTime >='08:00:00' and StartTime<'09:00:00',SECOND(CallDuration),0))/216000 w8,
sum(if(StartTime >='09:00:00' and StartTime<'10:00:00',SECOND(CallDuration),0))/216000 w9,
sum(if(StartTime >='10:00:00' and StartTime<'11:00:00',SECOND(CallDuration),0))/216000 w10,
sum(if(StartTime >='11:00:00' and StartTime<'12:00:00',SECOND(CallDuration),0))/216000 w11,
sum(if(StartTime >='12:00:00' and StartTime<'13:00:00',SECOND(CallDuration),0))/216000 w12,
sum(if(StartTime >='13:00:00' and StartTime<'14:00:00',SECOND(CallDuration),0))/216000 w13,
sum(if(StartTime >='14:00:00' and StartTime<'15:00:00',SECOND(CallDuration),0))/216000 w14,
sum(if(StartTime >='15:00:00' and StartTime<'16:00:00',SECOND(CallDuration),0))/216000 w15,
sum(if(StartTime >='16:00:00' and StartTime<'17:00:00',SECOND(CallDuration),0))/216000 w16,
sum(if(StartTime >='17:00:00' and StartTime<'18:00:00',SECOND(CallDuration),0))/216000 w17,
sum(if(StartTime >='18:00:00' and StartTime<'19:00:00',SECOND(CallDuration),0))/216000 w18,
sum(if(StartTime >='19:00:00' and StartTime<'20:00:00',SECOND(CallDuration),0))/216000 w19,
sum(if(StartTime >='20:00:00' and StartTime<'21:00:00',SECOND(CallDuration),0))/216000 w20,
sum(if(StartTime >='21:00:00' and StartTime<'22:00:00',SECOND(CallDuration),0))/216000 w21,
sum(if(StartTime >='22:00:00' and StartTime<'23:00:00',SECOND(CallDuration),0))/216000 w22,
sum(if(StartTime >='23:00:00' and StartTime<'24:00:00',SECOND(CallDuration),0))/216000 w23
from cticall2 where ServiceType=1 and StartDate='18-03-2013'
union
select StartDate,
sum(if(StartTime >='00:00:00' and StartTime<'01:00:00',SECOND(CallDuration),0))/216000 w0,
sum(if(StartTime >='01:00:00' and StartTime<'02:00:00',SECOND(CallDuration),0))/216000 w1,
sum(if(StartTime >='02:00:00' and StartTime<'03:00:00',SECOND(CallDuration),0))/216000 w2,
sum(if(StartTime >='03:00:00' and StartTime<'04:00:00',SECOND(CallDuration),0))/216000 w3,
sum(if(StartTime >='04:00:00' and StartTime<'05:00:00',SECOND(CallDuration),0))/216000 w4,
sum(if(StartTime >='05:00:00' and StartTime<'06:00:00',SECOND(CallDuration),0))/216000 w5,
sum(if(StartTime >='06:00:00' and StartTime<'07:00:00',SECOND(CallDuration),0))/216000 w6,
sum(if(StartTime >='07:00:00' and StartTime<'08:00:00',SECOND(CallDuration),0))/216000 w7,
sum(if(StartTime >='08:00:00' and StartTime<'09:00:00',SECOND(CallDuration),0))/216000 w8,
sum(if(StartTime >='09:00:00' and StartTime<'10:00:00',SECOND(CallDuration),0))/216000 w9,
sum(if(StartTime >='10:00:00' and StartTime<'11:00:00',SECOND(CallDuration),0))/216000 w10,
sum(if(StartTime >='11:00:00' and StartTime<'12:00:00',SECOND(CallDuration),0))/216000 w11,
sum(if(StartTime >='12:00:00' and StartTime<'13:00:00',SECOND(CallDuration),0))/216000 w12,
sum(if(StartTime >='13:00:00' and StartTime<'14:00:00',SECOND(CallDuration),0))/216000 w13,
sum(if(StartTime >='14:00:00' and StartTime<'15:00:00',SECOND(CallDuration),0))/216000 w14,
sum(if(StartTime >='15:00:00' and StartTime<'16:00:00',SECOND(CallDuration),0))/216000 w15,
sum(if(StartTime >='16:00:00' and StartTime<'17:00:00',SECOND(CallDuration),0))/216000 w16,
sum(if(StartTime >='17:00:00' and StartTime<'18:00:00',SECOND(CallDuration),0))/216000 w17,
sum(if(StartTime >='18:00:00' and StartTime<'19:00:00',SECOND(CallDuration),0))/216000 w18,
sum(if(StartTime >='19:00:00' and StartTime<'20:00:00',SECOND(CallDuration),0))/216000 w19,
sum(if(StartTime >='20:00:00' and StartTime<'21:00:00',SECOND(CallDuration),0))/216000 w20,
sum(if(StartTime >='21:00:00' and StartTime<'22:00:00',SECOND(CallDuration),0))/216000 w21,
sum(if(StartTime >='22:00:00' and StartTime<'23:00:00',SECOND(CallDuration),0))/216000 w22,
sum(if(StartTime >='23:00:00' and StartTime<'24:00:00',SECOND(CallDuration),0))/216000 w23
from cticall2 where ServiceType=1 and StartDate='19-03-2013'
union
select StartDate,
sum(if(StartTime >='00:00:00' and StartTime<'01:00:00',SECOND(CallDuration),0))/216000 w0,
sum(if(StartTime >='01:00:00' and StartTime<'02:00:00',SECOND(CallDuration),0))/216000 w1,
sum(if(StartTime >='02:00:00' and StartTime<'03:00:00',SECOND(CallDuration),0))/216000 w2,
sum(if(StartTime >='03:00:00' and StartTime<'04:00:00',SECOND(CallDuration),0))/216000 w3,
sum(if(StartTime >='04:00:00' and StartTime<'05:00:00',SECOND(CallDuration),0))/216000 w4,
sum(if(StartTime >='05:00:00' and StartTime<'06:00:00',SECOND(CallDuration),0))/216000 w5,
sum(if(StartTime >='06:00:00' and StartTime<'07:00:00',SECOND(CallDuration),0))/216000 w6,
sum(if(StartTime >='07:00:00' and StartTime<'08:00:00',SECOND(CallDuration),0))/216000 w7,
sum(if(StartTime >='08:00:00' and StartTime<'09:00:00',SECOND(CallDuration),0))/216000 w8,
sum(if(StartTime >='09:00:00' and StartTime<'10:00:00',SECOND(CallDuration),0))/216000 w9,
sum(if(StartTime >='10:00:00' and StartTime<'11:00:00',SECOND(CallDuration),0))/216000 w10,
sum(if(StartTime >='11:00:00' and StartTime<'12:00:00',SECOND(CallDuration),0))/216000 w11,
sum(if(StartTime >='12:00:00' and StartTime<'13:00:00',SECOND(CallDuration),0))/216000 w12,
sum(if(StartTime >='13:00:00' and StartTime<'14:00:00',SECOND(CallDuration),0))/216000 w13,
sum(if(StartTime >='14:00:00' and StartTime<'15:00:00',SECOND(CallDuration),0))/216000 w14,
sum(if(StartTime >='15:00:00' and StartTime<'16:00:00',SECOND(CallDuration),0))/216000 w15,
sum(if(StartTime >='16:00:00' and StartTime<'17:00:00',SECOND(CallDuration),0))/216000 w16,
sum(if(StartTime >='17:00:00' and StartTime<'18:00:00',SECOND(CallDuration),0))/216000 w17,
sum(if(StartTime >='18:00:00' and StartTime<'19:00:00',SECOND(CallDuration),0))/216000 w18,
sum(if(StartTime >='19:00:00' and StartTime<'20:00:00',SECOND(CallDuration),0))/216000 w19,
sum(if(StartTime >='20:00:00' and StartTime<'21:00:00',SECOND(CallDuration),0))/216000 w20,
sum(if(StartTime >='21:00:00' and StartTime<'22:00:00',SECOND(CallDuration),0))/216000 w21,
sum(if(StartTime >='22:00:00' and StartTime<'23:00:00',SECOND(CallDuration),0))/216000 w22,
sum(if(StartTime >='23:00:00' and StartTime<'24:00:00',SECOND(CallDuration),0))/216000 w23
from cticall2 where ServiceType=1 and StartDate='20-03-2013'
union
select StartDate,
sum(if(StartTime >='00:00:00' and StartTime<'01:00:00',SECOND(CallDuration),0))/216000 w0,
sum(if(StartTime >='01:00:00' and StartTime<'02:00:00',SECOND(CallDuration),0))/216000 w1,
sum(if(StartTime >='02:00:00' and StartTime<'03:00:00',SECOND(CallDuration),0))/216000 w2,
sum(if(StartTime >='03:00:00' and StartTime<'04:00:00',SECOND(CallDuration),0))/216000 w3,
sum(if(StartTime >='04:00:00' and StartTime<'05:00:00',SECOND(CallDuration),0))/216000 w4,
sum(if(StartTime >='05:00:00' and StartTime<'06:00:00',SECOND(CallDuration),0))/216000 w5,
sum(if(StartTime >='06:00:00' and StartTime<'07:00:00',SECOND(CallDuration),0))/216000 w6,
sum(if(StartTime >='07:00:00' and StartTime<'08:00:00',SECOND(CallDuration),0))/216000 w7,
sum(if(StartTime >='08:00:00' and StartTime<'09:00:00',SECOND(CallDuration),0))/216000 w8,
sum(if(StartTime >='09:00:00' and StartTime<'10:00:00',SECOND(CallDuration),0))/216000 w9,
sum(if(StartTime >='10:00:00' and StartTime<'11:00:00',SECOND(CallDuration),0))/216000 w10,
sum(if(StartTime >='11:00:00' and StartTime<'12:00:00',SECOND(CallDuration),0))/216000 w11,
sum(if(StartTime >='12:00:00' and StartTime<'13:00:00',SECOND(CallDuration),0))/216000 w12,
sum(if(StartTime >='13:00:00' and StartTime<'14:00:00',SECOND(CallDuration),0))/216000 w13,
sum(if(StartTime >='14:00:00' and StartTime<'15:00:00',SECOND(CallDuration),0))/216000 w14,
sum(if(StartTime >='15:00:00' and StartTime<'16:00:00',SECOND(CallDuration),0))/216000 w15,
sum(if(StartTime >='16:00:00' and StartTime<'17:00:00',SECOND(CallDuration),0))/216000 w16,
sum(if(StartTime >='17:00:00' and StartTime<'18:00:00',SECOND(CallDuration),0))/216000 w17,
sum(if(StartTime >='18:00:00' and StartTime<'19:00:00',SECOND(CallDuration),0))/216000 w18,
sum(if(StartTime >='19:00:00' and StartTime<'20:00:00',SECOND(CallDuration),0))/216000 w19,
sum(if(StartTime >='20:00:00' and StartTime<'21:00:00',SECOND(CallDuration),0))/216000 w20,
sum(if(StartTime >='21:00:00' and StartTime<'22:00:00',SECOND(CallDuration),0))/216000 w21,
sum(if(StartTime >='22:00:00' and StartTime<'23:00:00',SECOND(CallDuration),0))/216000 w22,
sum(if(StartTime >='23:00:00' and StartTime<'24:00:00',SECOND(CallDuration),0))/216000 w23
from cticall2 where ServiceType=1 and StartDate='21-03-2013';
/////////////���á���ת��Ծ��ͳ��
select count(distinct(mdn)) from tbl_presetting_log where settime >'2013-01-21' and settime<='2013-03-21 24:00:00'
select count(distinct(called)) from tbl_call_log where calltime>='2013-01-21' and calltime<='2013-03-21 24:00:00'
/////////////��������·��ɹ���
select count(distinct(mdn)) from tbl_mdn,tbl_sms_log where called=mdn and send_type=2;
/////////////��������·���
select count(distinct(called)) from tbl_sms_log where send_type='2'
select count(1) from tbl_sms_log where send_type='2'
//////如下秘书号 emsh----------------------------------------------------------------------------------------
推荐用户注册数
select count(distinct(t2.mdn)) from tbl_recom_list t1,tbl_register t2 where t1.mdn!='N/A' and t1.recomdn=t2.mdn
推荐数
select count(distinct(recomdn)),count(1) from tbl_recom_list t1 where t1.mdn!='N/A'
//////////////// 注册用户数
SELECT count(vicemdn) FROM TBL_REGISTER where registertime>'2013-03-15' and registertime<'2013-03-16'
union all
SELECT count(vicemdn) FROM TBL_REGISTER where registertime>'2013-03-16' and registertime<'2013-03-17'
union all
SELECT count(vicemdn) FROM TBL_REGISTER where registertime>'2013-03-17' and registertime<'2013-03-18'
union all
SELECT count(vicemdn) FROM TBL_REGISTER where registertime>'2013-03-18' and registertime<'2013-03-19'
union all
SELECT count(vicemdn) FROM TBL_REGISTER where registertime>'2013-03-19' and registertime<'2013-03-20'
union all
SELECT count(vicemdn) FROM TBL_REGISTER where registertime>'2013-03-20' and registertime<'2013-03-21'
union all
SELECT count(vicemdn) FROM TBL_REGISTER where registertime>'2013-03-21' and registertime<='2013-03-21 23:59:59.999'
// 号码池号码
select sum(1),sum(flag) f,sum(status) s from tbl_mdnpool
//签到日志
select min(t1.checkintime),sum(1)
from tbl_checkin t1 where t1.checkintime >'2013-03-15' and t1.checkintime<'2013-03-16'
union all
select min(t1.checkintime),sum(1)
from tbl_checkin t1 where t1.checkintime >'2013-03-16' and t1.checkintime<'2013-03-17'
union all
select min(t1.checkintime),sum(1)
from tbl_checkin t1 where t1.checkintime >'2013-03-17' and t1.checkintime<'2013-03-18'
union all
select min(t1.checkintime),sum(1)
from tbl_checkin t1 where t1.checkintime >'2013-03-18' and t1.checkintime<'2013-03-19'
union all
select min(t1.checkintime),sum(1)
from tbl_checkin t1 where t1.checkintime >'2013-03-19' and t1.checkintime<'2013-03-20'
union all
select min(t1.checkintime),sum(1)
from tbl_checkin t1 where t1.checkintime >'2013-03-20' and t1.checkintime<'2013-03-21'
union all
select min(t1.checkintime),sum(1)
from tbl_checkin t1 where t1.checkintime >'2013-03-21' and t1.checkintime<='2013-03-21 23:59:59.999'
//推荐日志
select min(t1.recomtime),count(distinct(recomdn)),sum(1)
from tbl_recom_list t1 where t1.recomtime >'2013-03-15' and t1.recomtime<'2013-03-16' and mdn!='N/A'
union all
select min(t1.recomtime),count(distinct(recomdn)),sum(1)
from tbl_recom_list t1 where t1.recomtime >'2013-03-16' and t1.recomtime<'2013-03-17' and mdn!='N/A'
union all
select min(t1.recomtime),count(distinct(recomdn)),sum(1)
from tbl_recom_list t1 where t1.recomtime >'2013-03-17' and t1.recomtime<'2013-03-18' and mdn!='N/A'
union all
select min(t1.recomtime),count(distinct(recomdn)),sum(1)
from tbl_recom_list t1 where t1.recomtime >'2013-03-18' and t1.recomtime<'2013-03-19' and mdn!='N/A'
union all
select min(t1.recomtime),count(distinct(recomdn)),sum(1)
from tbl_recom_list t1 where t1.recomtime >'2013-03-19' and t1.recomtime<'2013-03-20' and mdn!='N/A'
union all
select min(t1.recomtime),count(distinct(recomdn)),sum(1)
from tbl_recom_list t1 where t1.recomtime >'2013-03-20' and t1.recomtime<'2013-03-21' and mdn!='N/A'
union all
select min(t1.recomtime),count(distinct(recomdn)),sum(1)
from tbl_recom_list t1 where t1.recomtime >'2013-03-21' and t1.recomtime<='2013-03-21 23:59:59.999' and mdn!='N/A'
//呼叫次数日线 呼叫次数 主叫次数
select min(t1.logtime),sum(endflag),count(caller_start)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >'2013-03-15' and t1.logtime<'2013-03-16' and endflag=1
union all
select min(t1.logtime),sum(endflag),count(caller_start)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >'2013-03-16' and t1.logtime<'2013-03-17' and endflag=1
union all
select min(t1.logtime),sum(endflag),count(caller_start)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >'2013-03-17' and t1.logtime<'2013-03-18' and endflag=1
union all
select min(t1.logtime),sum(endflag),count(caller_start)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >'2013-03-18' and t1.logtime<'2013-03-19' and endflag=1
union all
select min(t1.logtime),sum(endflag),count(caller_start)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >'2013-03-19' and t1.logtime<'2013-03-20' and endflag=1
union all
select min(t1.logtime),sum(endflag),count(caller_start)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >'2013-03-20' and t1.logtime<'2013-03-21' and endflag=1
union all
select min(t1.logtime),sum(endflag),count(caller_start)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >'2013-03-21' and t1.logtime<='2013-03-21 23:59:59.999' and endflag=1
//呼叫次数日线 呼叫分钟数 主叫分钟数
select sum(1),count(caller_start),min(t1.logtime)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >'2013-03-15' and t1.logtime<'2013-03-16'
union all
select sum(1),count(caller_start),min(t1.logtime)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >'2013-03-16' and t1.logtime<'2013-03-17'
union all
select sum(1),count(caller_start),min(t1.logtime)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >'2013-03-17' and t1.logtime<'2013-03-18'
union all
select sum(1),count(caller_start),min(t1.logtime)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >'2013-03-18' and t1.logtime<'2013-03-19'
union all
select sum(1),count(caller_start),min(t1.logtime)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >'2013-03-19' and t1.logtime<'2013-03-20'
union all
select sum(1),count(caller_start),min(t1.logtime)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >'2013-03-20' and t1.logtime<'2013-03-21'
union all
select sum(1),count(caller_start),min(t1.logtime)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >'2013-03-21' and t1.logtime<='2013-03-21 23:59:59.999'
//���ʱK�� ���ʱK�� ��������
select min(t1.logtime),0,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 00:00:00.000' and t1.logtime<'2013-03-21 01:00:00.000'
union all
select min(t1.logtime),1,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 01:00:00.000' and t1.logtime<'2013-03-21 02:00:00.000'
union all
select min(t1.logtime),2,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 02:00:00.000' and t1.logtime<'2013-03-21 03:00:00.000'
union all
select min(t1.logtime),3,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 03:00:00.000' and t1.logtime<'2013-03-21 04:00:00.000'
union all
select min(t1.logtime),4,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 04:00:00.000' and t1.logtime<'2013-03-21 05:00:00.000'
union all
select min(t1.logtime),5,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 05:00:00.000' and t1.logtime<'2013-03-21 06:00:00.000'
union all
select min(t1.logtime),6,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 06:00:00.000' and t1.logtime<'2013-03-21 07:00:00.000'
union all
select min(t1.logtime),7,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 07:00:00.000' and t1.logtime<'2013-03-21 08:00:00.000'
union all
select min(t1.logtime),8,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 08:00:00.000' and t1.logtime<'2013-03-21 09:00:00.000'
union all
select min(t1.logtime),9,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 09:00:00.000' and t1.logtime<'2013-03-21 10:00:00.000'
union all
select min(t1.logtime),10,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 10:00:00.000' and t1.logtime<'2013-03-21 11:00:00.000'
union all
select min(t1.logtime),11,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 11:00:00.000' and t1.logtime<'2013-03-21 12:00:00.000'
union all
select min(t1.logtime),12,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 12:00:00.000' and t1.logtime<'2013-03-21 13:00:00.000'
union all
select min(t1.logtime),13,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 13:00:00.000' and t1.logtime<'2013-03-21 14:00:00.000'
union all
select min(t1.logtime),14,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 14:00:00.000' and t1.logtime<'2013-03-21 15:00:00.000'
union all
select min(t1.logtime),15,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 15:00:00.000' and t1.logtime<'2013-03-21 16:00:00.000'
union all
select min(t1.logtime),16,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 16:00:00.000' and t1.logtime<'2013-03-21 17:00:00.000'
union all
select min(t1.logtime),17,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 17:00:00.000' and t1.logtime<'2013-03-21 18:00:00.000'
union all
select min(t1.logtime),18,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 18:00:00.000' and t1.logtime<'2013-03-21 19:00:00.000'
union all
select min(t1.logtime),19,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 19:00:00.000' and t1.logtime<'2013-03-21 20:00:00.000'
union all
select min(t1.logtime),20,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 20:00:00.000' and t1.logtime<'2013-03-21 21:00:00.000'
union all
select min(t1.logtime),21,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 21:00:00.000' and t1.logtime<'2013-03-21 22:00:00.000'
union all
select min(t1.logtime),22,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 22:00:00.000' and t1.logtime<'2013-03-21 23:00:00.000'
union all
select min(t1.logtime),23,sum(endflag) ,sum(1)
from TBL_ORIGNALCALLLOG t1 where t1.logtime >='2013-03-21 23:00:00.000' and t1.logtime<'2013-03-21 23:59:59.999'
| 78.143317 | 166 | 0.68025 |
543b29dd098b0e15c0ceac7e8dddef016251d811 | 503 | go | Go | internal/homan/domain/usecase/remove.go | anantadwi13/homan | 302e4e10b4a89a86143c647c15bb145b06fc54e8 | [
"MIT"
] | null | null | null | internal/homan/domain/usecase/remove.go | anantadwi13/homan | 302e4e10b4a89a86143c647c15bb145b06fc54e8 | [
"MIT"
] | null | null | null | internal/homan/domain/usecase/remove.go | anantadwi13/homan | 302e4e10b4a89a86143c647c15bb145b06fc54e8 | [
"MIT"
] | null | null | null | package usecase
import (
"context"
)
type UcRemoveParams struct {
Name string
}
type UcRemove interface {
Execute(ctx context.Context, params *UcRemoveParams) Error
}
var (
ErrorUcRemoveSystemNotRunning = NewErrorUser("system service is not running")
ErrorUcRemoveParamsNotFound = NewErrorUser("please specify parameters")
ErrorUcRemoveServiceNotFound = NewErrorUser("service is not found")
ErrorUcRemovePostExecution = NewErrorUser("something went wrong while doing post-execution")
)
| 23.952381 | 96 | 0.791252 |
6e6a911db27cdbdfab553f775e0d9a2ad69387de | 387 | kt | Kotlin | app/src/main/java/com/grumpyshoe/lumos/core/data/src/preferences/PreferenceManager.kt | lumos-app/lumos-android | 376a6363c669a324b52c6728ac6e834144a8e863 | [
"MIT"
] | null | null | null | app/src/main/java/com/grumpyshoe/lumos/core/data/src/preferences/PreferenceManager.kt | lumos-app/lumos-android | 376a6363c669a324b52c6728ac6e834144a8e863 | [
"MIT"
] | null | null | null | app/src/main/java/com/grumpyshoe/lumos/core/data/src/preferences/PreferenceManager.kt | lumos-app/lumos-android | 376a6363c669a324b52c6728ac6e834144a8e863 | [
"MIT"
] | null | null | null | package com.grumpyshoe.lumos.core.data.src.preferences
/**
* interface for PreferenceManager
*
* Created by Thomas Cirksena on 10.06.19.
* Copyright © 2019 Thomas Cirksena. All rights reserved.
*/
interface PreferenceManager {
fun setServerAddress(baseUrl: String)
fun getServerAddress(): String?
fun getClientName(): String?
fun setClientName(clientName: String)
} | 27.642857 | 57 | 0.744186 |
32e0e5e175e778f2919046cb88f4d9b83a9c54e6 | 1,525 | swift | Swift | AILogManagerDemo/AILogManagerDemo/AILogManager/AILogger.swift | tiantiankaixin/MBlogDemo | 939852f505e1ee39468359388926f30e4617f827 | [
"MIT"
] | 5 | 2020-06-04T07:26:36.000Z | 2021-12-08T08:59:09.000Z | AILogManagerDemo/AILogManagerDemo/AILogManager/AILogger.swift | tiantiankaixin/MBlogDemo | 939852f505e1ee39468359388926f30e4617f827 | [
"MIT"
] | null | null | null | AILogManagerDemo/AILogManagerDemo/AILogManager/AILogger.swift | tiantiankaixin/MBlogDemo | 939852f505e1ee39468359388926f30e4617f827 | [
"MIT"
] | 4 | 2020-06-08T08:09:54.000Z | 2021-05-19T11:08:06.000Z | //
// AILogger.swift
// AILogManagerDemo
//
// Created by mal on 2020/5/21.
// Copyright © 2020 mal. All rights reserved.
//
import Foundation
import CocoaLumberjack
private let kLogPrefix = "AILog"
protocol AILoggerProtocol {
static func directoryPath() -> String?
static func logFilesCount() -> UInt
static func logFileSize() -> UInt64
static func context() -> Int
static func logFileSaveTime() -> TimeInterval
static func log(_ input: Any...)
}
extension AILoggerProtocol {
static func logFileSaveTime() -> TimeInterval {
return 3600 * 24
}
static func logFilesCount() -> UInt {
return 1
}
static func logFileSize() -> UInt64 {
return 1024 * 1024 * 3
}
static func log(_ input: Any...) {
_DDLogMessage("\(input)", level: DDDefaultLogLevel, flag: .info, context: context(), file: #file, function: #function, line: #line, tag: nil, asynchronous: false, ddlog: DDLog.sharedInstance)
}
}
class AIDefaultLogger: AILoggerProtocol {
static func context() -> Int {
1000
}
static func directoryPath() -> String? {
return AIFileManager.createDirectory(dirName: kLogPrefix + "AILog", sandBoxType: .inDocument)
}
}
class AIDownloadLogger: AILoggerProtocol {
static func context() -> Int {
1001
}
static func directoryPath() -> String? {
return AIFileManager.createDirectory(dirName: kLogPrefix + "AIDownloadLog", sandBoxType: .inDocument)
}
}
| 25.416667 | 200 | 0.645902 |
39d8146eece753c3f584a5490496625de86e6021 | 937 | js | JavaScript | build/views/mergeOptions.js | bjnortier/triptych | 3f61ac1b74842ac48d8a1eaef9e71becbcfb1e8a | [
"MIT"
] | null | null | null | build/views/mergeOptions.js | bjnortier/triptych | 3f61ac1b74842ac48d8a1eaef9e71becbcfb1e8a | [
"MIT"
] | null | null | null | build/views/mergeOptions.js | bjnortier/triptych | 3f61ac1b74842ac48d8a1eaef9e71becbcfb1e8a | [
"MIT"
] | null | null | null | 'use strict';
var keys = require('lodash.keys');
module.exports = function (a, b, criteria) {
criteria = criteria || {};
var concatenations = criteria.concatenations || [];
var result = {};
concatenations.forEach(function (key) {
if (a[key] && b[key]) {
result[key] = a[key] + ' ' + b[key];
} else if (a[key]) {
result[key] = a[key];
} else if (b[key]) {
result[key] = b[key];
}
});
keys(a).forEach(function (key) {
if (concatenations.indexOf(key) === -1) {
if (b.hasOwnProperty(key) && b[key] !== a[key]) {
throw new Error('option merge conflict: ' + key);
}
result[key] = a[key];
}
});
keys(b).forEach(function (key) {
if (concatenations.indexOf(key) === -1) {
if (a.hasOwnProperty(key) && a[key] !== b[key]) {
throw new Error('option merge conflict: ' + key);
}
result[key] = b[key];
}
});
return result;
}; | 24.657895 | 57 | 0.530416 |
11c3c96a34780f5e315d74b34076ffab1bf8a2f3 | 224 | sql | SQL | tests/testdata/provider/testdata_pg_hstore.sql | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | null | null | null | tests/testdata/provider/testdata_pg_hstore.sql | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | null | null | null | tests/testdata/provider/testdata_pg_hstore.sql | dyna-mis/Hilabeling | cb7d5d4be29624a20c8a367162dbc6fd779b2b52 | [
"MIT"
] | 1 | 2021-12-25T08:40:30.000Z | 2021-12-25T08:40:30.000Z |
CREATE EXTENSION IF NOT EXISTS hstore;
DROP TABLE IF EXISTS qgis_test.dict;
CREATE TABLE qgis_test.dict
(
pk SERIAL NOT NULL PRIMARY KEY,
value hstore
);
INSERT INTO qgis_test.dict(value)
VALUES
('a=>b,1=>2');
| 14.933333 | 38 | 0.714286 |
aff7c3f1c6048a1e36ebbd7a35699702a2fb5263 | 991 | swift | Swift | igen_templates/login/UseCaseMock.swift | tuan188/MGiGen | 8aa505255b95173adbebe7d1e90f83269bd3343f | [
"MIT"
] | 15 | 2019-04-04T00:57:58.000Z | 2022-02-09T02:35:48.000Z | igen_templates/login/UseCaseMock.swift | tuan188/MGiGen | 8aa505255b95173adbebe7d1e90f83269bd3343f | [
"MIT"
] | null | null | null | igen_templates/login/UseCaseMock.swift | tuan188/MGiGen | 8aa505255b95173adbebe7d1e90f83269bd3343f | [
"MIT"
] | 8 | 2019-05-17T02:39:13.000Z | 2021-12-01T07:17:24.000Z | @testable import {{ project }}
import Dto
import RxSwift
import ValidatedPropertyKit
final class {{ name }}UseCaseMock: {{ name }}UseCaseType {
// MARK: - validateUserName
var validateUserNameCalled = false
var validateUserNameReturnValue = ValidationResult.success(())
func validateUserName(_ username: String) -> ValidationResult {
validateUserNameCalled = true
return validateUserNameReturnValue
}
// MARK: - validatePassword
var validatePasswordCalled = false
var validatePasswordReturnValue = ValidationResult.success(())
func validatePassword(_ password: String) -> ValidationResult {
validatePasswordCalled = true
return validatePasswordReturnValue
}
// MARK: - login
var loginCalled = false
var loginReturnValue = Observable.just(())
func login(dto: LoginDto) -> Observable<Void> {
loginCalled = true
return loginReturnValue
}
}
| 26.078947 | 67 | 0.673058 |
0b5e60b401ce650ac7ee0444f30c678dd26b85f7 | 6,554 | swift | Swift | arcgis-ios-sdk-samples/Scenes/Get elevation at a point/GetElevationPointViewController.swift | mlph-etorozco/gardensbythebay-arcgis-ios | 559f4e171cca60e7ef4a3018356220205c32e2b8 | [
"Apache-2.0"
] | 292 | 2015-01-06T19:14:21.000Z | 2022-03-23T02:35:38.000Z | arcgis-ios-sdk-samples/Scenes/Get elevation at a point/GetElevationPointViewController.swift | feifei-shen/arcgis-runtime-samples-ios | 6967286dbe41bd389f18b9de32b92c4374920a70 | [
"Apache-2.0"
] | 542 | 2015-01-05T23:15:30.000Z | 2022-03-22T17:13:59.000Z | arcgis-ios-sdk-samples/Scenes/Get elevation at a point/GetElevationPointViewController.swift | feifei-shen/arcgis-runtime-samples-ios | 6967286dbe41bd389f18b9de32b92c4374920a70 | [
"Apache-2.0"
] | 359 | 2015-01-10T13:55:34.000Z | 2022-03-16T14:29:34.000Z | // Copyright 2019 Esri.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import UIKit
import ArcGIS
/// A view controller that manages the interface of the Get Elevation at a Point
/// sample.
class GetElevationPointViewController: UIViewController {
/// The scene view managed by the view controller.
@IBOutlet var sceneView: AGSSceneView! {
didSet {
// Initialize a scene.
sceneView.scene = makeScene()
sceneView.touchDelegate = self
// Set scene's viewpoint.
let camera = AGSCamera(latitude: 28.42, longitude: 83.9, altitude: 10000.0, heading: 10.0, pitch: 80.0, roll: 0.0)
sceneView.setViewpointCamera(camera)
sceneView.graphicsOverlays.add(graphicsOverlay)
}
}
/// The graphics overlay used to show a graphic at the tapped point.
private let graphicsOverlay: AGSGraphicsOverlay = {
let graphicsOverlay = AGSGraphicsOverlay()
graphicsOverlay.renderingMode = .dynamic
graphicsOverlay.sceneProperties?.surfacePlacement = .relative
return graphicsOverlay
}()
/// Creates a scene.
///
/// - Returns: A new `AGSScene` object with a base surface configured with
/// an elevation source.
private func makeScene() -> AGSScene {
let scene = AGSScene(basemapType: .imageryWithLabels)
let surface = AGSSurface()
// Create an elevation source.
let elevationURL = URL(string: "https://elevation3d.arcgis.com/arcgis/rest/services/WorldElevation3D/Terrain3D/ImageServer")
let elevationSource = AGSArcGISTiledElevationSource(url: elevationURL!)
// Add the elevation source to the surface.
surface.elevationSources.append(elevationSource)
scene.baseSurface = surface
return scene
}
/// Dismisses the elevation popover and removes the associated graphic.
func dismissElevationPopover() {
guard presentedViewController != nil else { return }
dismiss(animated: false)
sceneView.viewpointChangedHandler = nil
graphicsOverlay.graphics.removeAllObjects()
}
// MARK: UIViewController
override func viewDidLoad() {
super.viewDidLoad()
// Add the source code button item to the right of navigation bar.
(self.navigationItem.rightBarButtonItem as! SourceCodeBarButtonItem).filenames = ["GetElevationPointViewController", "ElevationViewController"]
}
override func viewWillTransition(to size: CGSize, with coordinator: UIViewControllerTransitionCoordinator) {
super.viewWillTransition(to: size, with: coordinator)
dismissElevationPopover()
}
}
// MARK: - AGSGeoViewTouchDelegate
extension GetElevationPointViewController: AGSGeoViewTouchDelegate {
func geoView(_ geoView: AGSGeoView, didTapAtScreenPoint screenPoint: CGPoint, mapPoint: AGSPoint) {
if let relativeSurfacePoint = sceneView?.screen(toBaseSurface: screenPoint) {
dismiss(animated: false)
// Get the tapped point
let point = AGSPoint(x: relativeSurfacePoint.x, y: relativeSurfacePoint.y, spatialReference: .wgs84())
if let graphic = graphicsOverlay.graphics.firstObject as? AGSGraphic {
// Move the symbol to the tapped point.
graphic.geometry = point
} else {
// Create the symbol at the tapped point.
let marker = AGSSimpleMarkerSceneSymbol(style: .sphere, color: .red, height: 100, width: 100, depth: 100, anchorPosition: .center)
let graphic = AGSGraphic(geometry: point, symbol: marker)
graphicsOverlay.graphics.add(graphic)
}
// Get the surface elevation at the surface point.
self.sceneView.scene?.baseSurface!.elevation(for: relativeSurfacePoint) { (results: Double, error: Error?) in
if let error = error {
self.presentAlert(error: error)
} else {
self.showPopover(elevation: results, popoverPoint: screenPoint)
}
}
}
// Dismiss the elevation popover and hide the graphic when the user
// interacts with the scene.
sceneView.viewpointChangedHandler = { [weak self] in
DispatchQueue.main.async {
self?.dismissElevationPopover()
}
}
}
private func showPopover(elevation: Double, popoverPoint: CGPoint) {
guard let elevationViewController = storyboard?.instantiateViewController(withIdentifier: "ElevationViewController") as? ElevationViewController else {
return
}
// Setup the controller to display as a popover.
elevationViewController.modalPresentationStyle = .popover
elevationViewController.elevation = Measurement(value: elevation, unit: UnitLength.meters)
if let popoverPresentationController = elevationViewController.popoverPresentationController {
popoverPresentationController.delegate = self
popoverPresentationController.passthroughViews = [sceneView]
popoverPresentationController.sourceRect = CGRect(origin: popoverPoint, size: .zero)
popoverPresentationController.sourceView = sceneView
}
present(elevationViewController, animated: false)
}
}
extension GetElevationPointViewController: UIAdaptivePresentationControllerDelegate, UIPopoverPresentationControllerDelegate {
func adaptivePresentationStyle(for controller: UIPresentationController, traitCollection: UITraitCollection) -> UIModalPresentationStyle {
return .none
}
func popoverPresentationControllerDidDismissPopover(_ popoverPresentationController: UIPopoverPresentationController) {
// Clear selection when popover is dismissed.
graphicsOverlay.graphics.removeAllObjects()
}
}
| 43.986577 | 159 | 0.675465 |
e153949864508c3793ba5909adf1667096dd6b8f | 594 | kt | Kotlin | app/src/main/java/com/ainsigne/travelappdemo/viewmodels/VenueItemsViewModelFactory.kt | cominteract/TravelAppDemoAndroid | 5f2332f8ac4781c7dc52444432b77c3133cf5744 | [
"MIT"
] | null | null | null | app/src/main/java/com/ainsigne/travelappdemo/viewmodels/VenueItemsViewModelFactory.kt | cominteract/TravelAppDemoAndroid | 5f2332f8ac4781c7dc52444432b77c3133cf5744 | [
"MIT"
] | null | null | null | app/src/main/java/com/ainsigne/travelappdemo/viewmodels/VenueItemsViewModelFactory.kt | cominteract/TravelAppDemoAndroid | 5f2332f8ac4781c7dc52444432b77c3133cf5744 | [
"MIT"
] | null | null | null | package com.ainsigne.travelappdemo.viewmodels
import androidx.lifecycle.ViewModel
import androidx.lifecycle.ViewModelProvider
import com.ainsigne.travelappdemo.data.VenueItemsRepository
import com.ainsigne.travelappdemo.interfaces.ItemRepository
/**
* Factory for creating a [VenueItemsViewModel] with a constructor that takes a [VenueItemsRepository].
*/
class VenueItemsViewModelFactory(var repo: ItemRepository
) : ViewModelProvider.NewInstanceFactory() {
@Suppress("UNCHECKED_CAST")
override fun <T : ViewModel> create(modelClass: Class<T>) = VenueItemsViewModel(repo) as T
}
| 33 | 103 | 0.813131 |
56d05ef03c12938cc7381edb5785cc3b3cd90ba0 | 2,044 | ts | TypeScript | packages/compiler-dom/src/transforms/Transition.ts | btea/vue-next | 0cf9ae62be21a6180f909e03091f087254ae3e52 | [
"MIT"
] | 2,198 | 2022-01-17T10:25:48.000Z | 2022-03-31T16:41:11.000Z | packages/compiler-dom/src/transforms/Transition.ts | btea/vue-next | 0cf9ae62be21a6180f909e03091f087254ae3e52 | [
"MIT"
] | 486 | 2022-01-17T10:59:41.000Z | 2022-03-31T10:28:18.000Z | packages/compiler-dom/src/transforms/Transition.ts | btea/vue-next | 0cf9ae62be21a6180f909e03091f087254ae3e52 | [
"MIT"
] | 555 | 2022-01-17T12:55:31.000Z | 2022-03-31T14:13:40.000Z | import {
NodeTransform,
NodeTypes,
ElementTypes,
ComponentNode,
IfBranchNode
} from '@vue/compiler-core'
import { TRANSITION } from '../runtimeHelpers'
import { createDOMCompilerError, DOMErrorCodes } from '../errors'
export const transformTransition: NodeTransform = (node, context) => {
if (
node.type === NodeTypes.ELEMENT &&
node.tagType === ElementTypes.COMPONENT
) {
const component = context.isBuiltInComponent(node.tag)
if (component === TRANSITION) {
return () => {
if (!node.children.length) {
return
}
// warn multiple transition children
if (hasMultipleChildren(node)) {
context.onError(
createDOMCompilerError(
DOMErrorCodes.X_TRANSITION_INVALID_CHILDREN,
{
start: node.children[0].loc.start,
end: node.children[node.children.length - 1].loc.end,
source: ''
}
)
)
}
// check if it's s single child w/ v-show
// if yes, inject "persisted: true" to the transition props
const child = node.children[0]
if (child.type === NodeTypes.ELEMENT) {
for (const p of child.props) {
if (p.type === NodeTypes.DIRECTIVE && p.name === 'show') {
node.props.push({
type: NodeTypes.ATTRIBUTE,
name: 'persisted',
value: undefined,
loc: node.loc
})
}
}
}
}
}
}
}
function hasMultipleChildren(node: ComponentNode | IfBranchNode): boolean {
// #1352 filter out potential comment nodes.
const children = (node.children = node.children.filter(
c =>
c.type !== NodeTypes.COMMENT &&
!(c.type === NodeTypes.TEXT && !c.content.trim())
))
const child = children[0]
return (
children.length !== 1 ||
child.type === NodeTypes.FOR ||
(child.type === NodeTypes.IF && child.branches.some(hasMultipleChildren))
)
}
| 28.788732 | 77 | 0.559198 |
95950cf4f48799fb06c27c094d5b247eff4fa60b | 1,694 | swift | Swift | Calendule/MainView.swift | alexph9/CalenduleAPP | a5973c26d31124b1954fbe5215bfe6af019d7cc8 | [
"MIT"
] | null | null | null | Calendule/MainView.swift | alexph9/CalenduleAPP | a5973c26d31124b1954fbe5215bfe6af019d7cc8 | [
"MIT"
] | null | null | null | Calendule/MainView.swift | alexph9/CalenduleAPP | a5973c26d31124b1954fbe5215bfe6af019d7cc8 | [
"MIT"
] | null | null | null | //
// MainView.swift
// Calendule
//
// Created by Alex on 5/9/17.
// Copyright © 2017 Talentum. All rights reserved.
//
import UIKit
import FirebaseAuth
class MainView: UIViewController {
@IBOutlet weak var logOutButton: UIButton!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
/*
// MARK: - Navigation
// In a storyboard-based application, you will often want to do a little preparation before navigation
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
// Get the new view controller using segue.destinationViewController.
// Pass the selected object to the new view controller.
}
*/
@IBAction func logOut(_ sender: Any) {
if Auth.auth().currentUser != nil {
print("Main 1 \(Auth.auth().currentUser!)")
do {
try? Auth.auth().signOut()
} catch let logOutError {
print(logOutError)
}
}
print("Main 2 \(Auth.auth().currentUser)")
/*
let storyboard = UIStoryboard(name: "Main", bundle: nil)
let signInVC = storyboard.instantiateViewController(withIdentifier: "LoginViewController")
self.present(signInVC, animated: true, completion: nil)
*/
//Al cerrar esta vista, vuelve a la pantalla de inicio cerrando la sesion de la cuenta de Google
dismiss(animated: true, completion: nil)
}
}
| 27.770492 | 106 | 0.614522 |
e6f7458d9fae3817e01e6c495a9672a40c8a98f8 | 200 | lua | Lua | scripts/doors/locations/AcuityLakefront.lua | SaltContainer/PokemonPlatinumMapRandoTracker | 56238f03fc862f075dae5b064bc492714f094f7f | [
"MIT"
] | null | null | null | scripts/doors/locations/AcuityLakefront.lua | SaltContainer/PokemonPlatinumMapRandoTracker | 56238f03fc862f075dae5b064bc492714f094f7f | [
"MIT"
] | 1 | 2022-01-14T01:36:35.000Z | 2022-01-16T23:18:22.000Z | scripts/doors/locations/AcuityLakefront.lua | SaltContainer/PokemonPlatinumMapRandoTracker | 56238f03fc862f075dae5b064bc492714f094f7f | [
"MIT"
] | null | null | null | local acuity_lakefront_0 = DoorSlot("acuity_lakefront","0")
local acuity_lakefront_0_hub = DoorSlotHub("acuity_lakefront","0",acuity_lakefront_0)
acuity_lakefront_0:setHubIcon(acuity_lakefront_0_hub)
| 50 | 85 | 0.855 |
22d57b6305fc46c5dc2410c753d32601420db007 | 629 | h | C | Sudoku_CLI/Sudoku.h | Simik31/Sudoku_CLI | 4e87e29ae8b912fb52d3d2db68c9205b270049e1 | [
"WTFPL"
] | null | null | null | Sudoku_CLI/Sudoku.h | Simik31/Sudoku_CLI | 4e87e29ae8b912fb52d3d2db68c9205b270049e1 | [
"WTFPL"
] | null | null | null | Sudoku_CLI/Sudoku.h | Simik31/Sudoku_CLI | 4e87e29ae8b912fb52d3d2db68c9205b270049e1 | [
"WTFPL"
] | null | null | null | #pragma once
#ifndef SUDOKU_H
#define SUDOKU_H
#include <vector>
#include <Windows.h>
class Sudoku
{
public:
Sudoku();
Sudoku(const std::vector<int>& initial_state);
bool is_solved();
bool is_filled();
int increment(const COORD& coord, const int value = 1, const bool cls = true);
static int get_state(const Sudoku& sudoku, const COORD& cursor);
static bool get_initial(const Sudoku& sudoku, const COORD& cursor);
void fill_number(const COORD& cursor, const int number, const bool update = true);
void test_if_solved();
private:
bool solved = false;
std::vector<int> state;
std::vector<bool> initial;
};
#endif | 20.966667 | 83 | 0.72973 |
b5f81c343b0e24261b3b783196948064518402de | 6,974 | rs | Rust | backend/server/src/state.rs | hgzimmerman/SWEN344-web-project | 39c7f51d43646c1cf7d8ba4686195ef2c23a2a43 | [
"MIT"
] | 1 | 2020-12-28T01:44:40.000Z | 2020-12-28T01:44:40.000Z | backend/server/src/state.rs | hgzimmerman/SWEN344-web-project | 39c7f51d43646c1cf7d8ba4686195ef2c23a2a43 | [
"MIT"
] | 72 | 2019-01-26T14:34:11.000Z | 2019-04-30T00:27:21.000Z | backend/server/src/state.rs | hgzimmerman/SWEN344-web-project | 39c7f51d43646c1cf7d8ba4686195ef2c23a2a43 | [
"MIT"
] | null | null | null | //! Represents the shared server resources that all requests may utilize.
use crate::{error::Error, server_auth::secret_filter};
use apply::Apply;
use authorization::Secret;
use egg_mode::KeyPair;
use hyper::{
client::{connect::dns::GaiResolver, HttpConnector},
Body, Client,
};
use hyper_tls::HttpsConnector;
use pool::{init_pool, Pool, PoolConfig, PooledConn, DATABASE_URL};
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use std::path::PathBuf;
use warp::{Filter, Rejection};
/// Simplified type for representing a HttpClient.
pub type HttpsClient = Client<HttpsConnector<HttpConnector<GaiResolver>>, Body>;
/// State that is passed around to all of the api handlers.
/// It can be used to acquire connections to the database,
/// or to reference the key that signs the access tokens.
///
/// These entities are acquired by running a filter function that brings them
/// into the scope of the relevant api.
pub struct State {
/// A pool of database connections.
database_connection_pool: Pool,
/// The secret key.
secret: Secret,
/// Https client
https: HttpsClient,
/// Twitter consumer token
twitter_consumer_token: KeyPair,
/// The path to the server directory.
/// This allows file resources to have a common reference point when determining from where to serve assets.
server_lib_root: PathBuf,
/// Is the server running in a production environment
is_production: bool,
}
/// Configuration object for creating the state.
///
/// If unspecified, it will default to a sane default.
#[derive(Debug, Default)]
pub struct StateConfig {
pub secret: Option<Secret>,
pub max_pool_size: Option<u32>,
pub server_lib_root: Option<PathBuf>,
pub is_production: bool,
}
impl State {
/// Creates a new state.
pub fn new(conf: StateConfig) -> Self {
const RANDOM_KEY_LENGTH: usize = 200;
let secret = conf.secret.unwrap_or_else(|| {
// Generate a new random key if none is provided.
thread_rng()
.sample_iter(&Alphanumeric)
.take(RANDOM_KEY_LENGTH)
.collect::<String>()
.apply(|s| Secret::new(&s))
});
let pool_conf = PoolConfig {
max_connections: conf.max_pool_size,
..Default::default()
};
let pool = init_pool(DATABASE_URL, pool_conf);
let https = HttpsConnector::new(4).unwrap();
let client = Client::builder().build::<_, _>(https);
let twitter_con_token = get_twitter_con_token();
let root = conf.server_lib_root.unwrap_or_else(|| PathBuf::from("./"));
State {
database_connection_pool: pool, //db_filter(pool),
secret,
https: client,
twitter_consumer_token: twitter_con_token.clone(),
server_lib_root: root,
is_production: conf.is_production,
}
}
/// Gets a pooled connection to the database.
pub fn db(&self) -> impl Filter<Extract = (PooledConn,), Error = Rejection> + Clone {
/// Filter that exposes connections to the database to individual filter requests
fn db_filter(pool: Pool) -> impl Filter<Extract = (PooledConn,), Error = Rejection> + Clone {
fn get_conn_from_pool(pool: &Pool) -> Result<PooledConn, Rejection> {
pool.clone()
.get() // Will get the connection from the pool, or wait a specified time until one becomes available.
.map_err(|_| {
log::error!("Pool exhausted: could not get database connection.");
Error::DatabaseUnavailable.reject()
})
}
warp::any().and_then(move || -> Result<PooledConn, Rejection> { get_conn_from_pool(&pool) })
}
db_filter(self.database_connection_pool.clone())
}
/// Gets the secret used for authoring JWTs
pub fn secret(&self) -> impl Filter<Extract = (Secret,), Error = Rejection> + Clone {
secret_filter(self.secret.clone())
}
/// Gets the https client used for making dependent api calls.
pub fn https_client(&self) -> impl Filter<Extract = (HttpsClient,), Error = Rejection> + Clone {
/// Function that creates the HttpClient filter.
fn http_filter(
client: HttpsClient,
) -> impl Filter<Extract = (HttpsClient,), Error = Rejection> + Clone {
// This needs to be able to return a Result w/a Rejection, because there is no way to specify the type of
// warp::never::Never because it is private, precluding the possibility of using map instead of and_then().
// This adds space overhead, but not nearly as much as using a boxed filter.
warp::any().and_then(move || -> Result<HttpsClient, Rejection> { Ok(client.clone()) })
}
http_filter(self.https.clone())
}
/// Access the twitter consumer token.
pub fn twitter_consumer_token(&self) -> impl Filter<Extract = (KeyPair,), Error = Rejection> + Clone {
fn twitter_consumer_token_filter(twitter_consumer_token: KeyPair) -> impl Filter<Extract = (KeyPair,), Error = Rejection> + Clone {
warp::any().and_then(move || -> Result<KeyPair, Rejection> { Ok(twitter_consumer_token.clone()) })
}
twitter_consumer_token_filter(self.twitter_consumer_token.clone())
}
pub fn server_lib_root(&self) -> PathBuf {
self.server_lib_root.clone()
}
pub fn is_production(&self) -> bool {
self.is_production
}
/// Creates a new state object from an existing object pool.
/// This is useful if using fixtures.
#[cfg(test)]
pub fn testing_init(pool: Pool, secret: Secret) -> Self {
use std::time::Duration;
let https = HttpsConnector::new(1).unwrap();
let client = Client::builder()
.keep_alive_timeout(Some(Duration::new(12, 0)))
.build::<_, Body>(https);
let twitter_con_token = get_twitter_con_token();
State {
database_connection_pool: pool,
secret,
https: client,
twitter_consumer_token: twitter_con_token,
server_lib_root: PathBuf::from("./"), // THIS makes the assumption that the tests are run from the backend/server dir.
is_production: false,
}
}
}
/// Gets the connection key pair for the serer.
/// This represents the authenticity of the application
fn get_twitter_con_token() -> KeyPair {
// TODO move getting these into a config object, or get them directly from the filesystem.
// These definitely shouldn't be in source code, but I don't care,
// I just want this to work right now. Also, this is a school project.
const KEY: &str = "Pq2sA4Lfbovd4SLQhSQ6UPEVg";
const SECRET: &str = "uK6U7Xqj2QThlm6H3y8dKSH3itZgpo9AVhR5or80X9umZc62ln";
egg_mode::KeyPair::new(KEY, SECRET)
}
| 38.530387 | 139 | 0.640379 |
03238bae7fd8ef8cfd2a51fbb2836df0d34129e6 | 485 | asm | Assembly | Microprocessor_Interfacing_CSE_2006/Applications_Lab_10/divisibility.asm | aadhityasw/VIT-Labs | 2c449f64f4fdd8c0ed5f2b51d05a7c586e6ab2ab | [
"CC0-1.0"
] | 2 | 2021-11-18T05:30:24.000Z | 2022-03-07T06:28:06.000Z | Microprocessor_Interfacing_CSE_2006/Applications_Lab_10/divisibility.asm | aadhityasw/VIT-Labs | 2c449f64f4fdd8c0ed5f2b51d05a7c586e6ab2ab | [
"CC0-1.0"
] | null | null | null | Microprocessor_Interfacing_CSE_2006/Applications_Lab_10/divisibility.asm | aadhityasw/VIT-Labs | 2c449f64f4fdd8c0ed5f2b51d05a7c586e6ab2ab | [
"CC0-1.0"
] | 3 | 2021-10-14T01:10:34.000Z | 2022-03-18T14:33:52.000Z | ASSUME CS:CODE,DS:DATA
DATA SEGMENT
arr db 31d, 23d, 61d, 23d, 44d, 00d
count db 00h
DATA ENDS
CODE SEGMENT
START:
mov ax,DATA
mov ds,ax
mov cx,06h
mov bl,00h
mov bh,04d
mov si, OFFSET arr
L1:
mov ax,0000h
mov al,[si]
add si,01h
div bh
cmp ah,00h
jne L2
inc bl
L2:
loop L1
mov ax,0000h
mov ah,02h
add bl,48d
mov dl,bl
int 21h
hlt
CODE ENDS
END START | 15.645161 | 39 | 0.527835 |
1613963ca0d2273372c4d6ee11c717de43a248a1 | 1,336 | h | C | Source/santad/Logs/SNTSimpleMaildir.h | khangthk/santa | a65c91874b00a325b91c65f3612828b7f242fa39 | [
"Apache-2.0"
] | null | null | null | Source/santad/Logs/SNTSimpleMaildir.h | khangthk/santa | a65c91874b00a325b91c65f3612828b7f242fa39 | [
"Apache-2.0"
] | null | null | null | Source/santad/Logs/SNTSimpleMaildir.h | khangthk/santa | a65c91874b00a325b91c65f3612828b7f242fa39 | [
"Apache-2.0"
] | null | null | null | /// Copyright 2021 Google Inc. All rights reserved.
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
#import <Foundation/Foundation.h>
#import "Source/common/Santa.pbobjc.h"
#import "Source/santad/Logs/SNTLogOutput.h"
NS_ASSUME_NONNULL_BEGIN
@interface SNTSimpleMaildir : NSObject<SNTLogOutput>
- (instancetype)initWithBaseDirectory:(NSString *)baseDirectory
filenamePrefix:(NSString *)filenamePrefix
fileSizeThreshold:(size_t)fileSiszeThreshold
directorySizeThreshold:(size_t)directorySizeThreshold
maxTimeBetweenFlushes:(NSTimeInterval)maxTimeBetweenFlushes
NS_DESIGNATED_INITIALIZER;
- (instancetype)init NS_UNAVAILABLE;
- (void)logEvent:(SNTPBSantaMessage *)message;
- (void)flush;
@end
NS_ASSUME_NONNULL_END
| 33.4 | 79 | 0.730539 |
c69fef60b5c19c6680daf2380323be5cd3081dad | 1,441 | sql | SQL | Portal/Server/Head/src/Service/DataAccessLayer/UpgradeScripts/0016.sql | jackobo/jackobs-code | d028554dfc14b735664737043ad58143a78903ef | [
"MIT"
] | null | null | null | Portal/Server/Head/src/Service/DataAccessLayer/UpgradeScripts/0016.sql | jackobo/jackobs-code | d028554dfc14b735664737043ad58143a78903ef | [
"MIT"
] | 3 | 2022-02-19T06:21:43.000Z | 2022-03-02T02:32:16.000Z | Portal/Server/Head/src/Service/DataAccessLayer/UpgradeScripts/0016.sql | jackobo/jackobs-code | d028554dfc14b735664737043ad58143a78903ef | [
"MIT"
] | null | null | null | sp_rename 'GameVersion_DownloadUri', 'GameVersion_Regulation'
GO
sp_rename 'GamingComponentVersion_DownloadUri', 'GamingComponentVersion_Regulation'
GO
ALTER PROCEDURE [dbo].[GetGamesVersionsAtDate] (@date datetime)
AS
BEGIN
-- SET NOCOUNT ON added to prevent extra result sets from
-- interfering with SELECT statements.
SET NOCOUNT ON;
SELECT Game.MainGameType, Game.GameName, Game.IsExternal, Latest.Technology, GameVersion_1.VersionFolder, GameVersion_1.VersionAsLong,
GameVersion_1.CreatedDate, GameVersion_1.CreatedBy, GameVersion_1.TriggeredBy, GameVersion_Regulation.Regulation,
GameVersion_Regulation.DownloadUri
FROM (SELECT Game_ID, Technology, MAX(CreatedDate) AS LastDate
FROM GameVersion
WHERE (CreatedDate < @date)
GROUP BY Game_ID, Technology) AS Latest INNER JOIN
Game ON Latest.Game_ID = Game.Game_ID INNER JOIN
GameVersion AS GameVersion_1 ON Latest.Game_ID = GameVersion_1.Game_ID AND Latest.Technology = GameVersion_1.Technology AND
Latest.LastDate = GameVersion_1.CreatedDate INNER JOIN
GameVersion_Regulation ON GameVersion_1.GameVersion_ID = GameVersion_Regulation.GameVersion_ID
order by MainGameType, Regulation
END
GO
| 45.03125 | 149 | 0.673838 |
ad08fc5f631a6f411e1dec2737124a4dc6464f47 | 4,580 | rs | Rust | src/sys/statvfs.rs | srikwit/nix | 3492e9f50605f8f3352400bfe5276722967bd174 | [
"MIT"
] | null | null | null | src/sys/statvfs.rs | srikwit/nix | 3492e9f50605f8f3352400bfe5276722967bd174 | [
"MIT"
] | null | null | null | src/sys/statvfs.rs | srikwit/nix | 3492e9f50605f8f3352400bfe5276722967bd174 | [
"MIT"
] | null | null | null | //! Get filesystem statistics
//!
//! See [the man pages](http://pubs.opengroup.org/onlinepubs/9699919799/functions/fstatvfs.html)
//! for more details.
use std::mem;
use std::os::unix::io::AsRawFd;
use libc::{self, c_ulong};
use errno::Errno;
use {NixPath, Result};
libc_bitflags!(
/// File system mount Flags
#[repr(C)]
#[derive(Default)]
pub struct FsFlags: c_ulong {
/// Read Only
ST_RDONLY;
/// Do not allow the set-uid bits to have an effect
ST_NOSUID;
/// Do not interpret character or block-special devices
#[cfg(any(target_os = "android", target_os = "linux"))]
ST_NODEV;
/// Do not allow execution of binaries on the filesystem
#[cfg(any(target_os = "android", target_os = "linux"))]
ST_NOEXEC;
/// All IO should be done synchronously
#[cfg(any(target_os = "android", target_os = "linux"))]
ST_SYNCHRONOUS;
/// Allow mandatory locks on the filesystem
#[cfg(any(target_os = "android", target_os = "linux"))]
ST_MANDLOCK;
/// Write on file/directory/symlink
#[cfg(target_os = "linux")]
ST_WRITE;
/// Append-only file
#[cfg(target_os = "linux")]
ST_APPEND;
/// Immutable file
#[cfg(target_os = "linux")]
ST_IMMUTABLE;
/// Do not update access times on files
#[cfg(any(target_os = "android", target_os = "linux"))]
ST_NOATIME;
/// Do not update access times on files
#[cfg(any(target_os = "android", target_os = "linux"))]
ST_NODIRATIME;
/// Update access time relative to modify/change time
#[cfg(any(target_os = "android", all(target_os = "linux", not(target_env = "musl"))))]
ST_RELATIME;
}
);
/// Wrapper around the POSIX `statvfs` struct
///
/// For more information see the [`statvfs(3)` man pages](http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/sys_statvfs.h.html).
#[repr(transparent)]
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct Statvfs(libc::statvfs);
impl Statvfs {
/// get the file system block size
pub fn block_size(&self) -> c_ulong {
self.0.f_bsize
}
/// Get the fundamental file system block size
pub fn fragment_size(&self) -> c_ulong {
self.0.f_frsize
}
/// Get the number of blocks.
///
/// Units are in units of `fragment_size()`
pub fn blocks(&self) -> libc::fsblkcnt_t {
self.0.f_blocks
}
/// Get the number of free blocks in the file system
pub fn blocks_free(&self) -> libc::fsblkcnt_t {
self.0.f_bfree
}
/// Get the number of free blocks for unprivileged users
pub fn blocks_available(&self) -> libc::fsblkcnt_t {
self.0.f_bavail
}
/// Get the total number of file inodes
pub fn files(&self) -> libc::fsfilcnt_t {
self.0.f_files
}
/// Get the number of free file inodes
pub fn files_free(&self) -> libc::fsfilcnt_t {
self.0.f_ffree
}
/// Get the number of free file inodes for unprivileged users
pub fn files_available(&self) -> libc::fsfilcnt_t {
self.0.f_favail
}
/// Get the file system id
pub fn filesystem_id(&self) -> c_ulong {
self.0.f_fsid
}
/// Get the mount flags
pub fn flags(&self) -> FsFlags {
FsFlags::from_bits_truncate(self.0.f_flag)
}
/// Get the maximum filename length
pub fn name_max(&self) -> c_ulong {
self.0.f_namemax
}
}
/// Return a `Statvfs` object with information about the `path`
pub fn statvfs<P: ?Sized + NixPath>(path: &P) -> Result<Statvfs> {
unsafe {
Errno::clear();
let mut stat = mem::MaybeUninit::<libc::statvfs>::uninit();
let res = path.with_nix_path(|path| libc::statvfs(path.as_ptr(), stat.as_mut_ptr()))?;
Errno::result(res).map(|_| Statvfs(stat.assume_init()))
}
}
/// Return a `Statvfs` object with information about `fd`
pub fn fstatvfs<T: AsRawFd>(fd: &T) -> Result<Statvfs> {
unsafe {
Errno::clear();
let mut stat = mem::MaybeUninit::<libc::statvfs>::uninit();
Errno::result(libc::fstatvfs(fd.as_raw_fd(), stat.as_mut_ptr()))
.map(|_| Statvfs(stat.assume_init()))
}
}
#[cfg(test)]
mod test {
use std::fs::File;
use sys::statvfs::*;
#[test]
fn statvfs_call() {
statvfs("/".as_bytes()).unwrap();
}
#[test]
fn fstatvfs_call() {
let root = File::open("/").unwrap();
fstatvfs(&root).unwrap();
}
}
| 28.987342 | 135 | 0.596507 |
e9b2d4f1d8f53ce5050975bd2b1bd07d9dd5e1a0 | 21,464 | swift | Swift | VernierCaliper2/VCVernierView.swift | jiunwei/VernierCaliper2 | 2b0726cebe41cf63bbb7eb9b33617848e5dd4e09 | [
"Unlicense",
"MIT"
] | null | null | null | VernierCaliper2/VCVernierView.swift | jiunwei/VernierCaliper2 | 2b0726cebe41cf63bbb7eb9b33617848e5dd4e09 | [
"Unlicense",
"MIT"
] | null | null | null | VernierCaliper2/VCVernierView.swift | jiunwei/VernierCaliper2 | 2b0726cebe41cf63bbb7eb9b33617848e5dd4e09 | [
"Unlicense",
"MIT"
] | null | null | null | //
// VCVernierView.swift
// VernierCaliper2
//
// Created by Jiun Wei Chia on 30/1/17.
// Copyright © 2017 Jiun Wei Chia. All rights reserved.
//
import UIKit
@IBDesignable class VCVernierView: UIView {
// MARK: - Type definitions
enum Precision: String {
case point01 = "0.01 cm"
case point005 = "0.005 cm"
}
enum DraggedComponent {
case vernierScale(delta: CGSize)
case object(delta: CGSize)
case none
}
// MARK: - Constants
let margin: CGFloat = 20.0
let width: CGFloat = 580.0
let height: CGFloat = 180.0
let point01Width: CGFloat = 130.0
let point005Width: CGFloat = 230.0
// MARK: - Properties
var precision = Precision.point01 {
didSet {
point01LinesLayer.isHidden = precision != .point01
point005LinesLayer.isHidden = precision != .point005
switch precision {
case .point01:
vWidth = point01Width
redraw(layer: point01LinesLayer)
case .point005:
vWidth = point005Width
redraw(layer: point005LinesLayer)
}
reset(layer: vernierScaleLayer)
redraw(layer: vernierScaleLayer)
positionArrows()
}
}
var zero = 0.0 {
didSet {
mainScaleLinesLayer.position.x = CGFloat(zero) * scale
positionArrows()
}
}
var arrows = true {
didSet {
positionArrows()
topArrowLayer.isHidden = !arrows
bottomArrowLayer.isHidden = !arrows
}
}
// Answer is stored in units of pixels.
var answer = 100.0 {
didSet {
reset(layer: objectLayer)
redraw(layer: objectLayer)
// Need setValue(_:, forKeyPath:) to work around CAEmitterCell and CAEmitterLayer bug.
// http://stackoverflow.com/questions/16749430/caemittercell-does-not-respect-birthrate-change
smokeLayer.setValue(100.0, forKeyPath: "emitterCells.smoke.birthRate")
let deadline = DispatchTime.now() + 0.5
smokeDeadline = deadline
DispatchQueue.main.asyncAfter(deadline: deadline, execute: {
if deadline == self.smokeDeadline {
self.smokeLayer.setValue(0.0, forKeyPath: "emitterCells.smoke.birthRate")
}
})
}
}
var scale: CGFloat = 1.0
var origin = CGPoint(x: 0.0, y: 0.0)
var translateUp = false
private var vWidth: CGFloat = 130.0
private var draggedComponent = DraggedComponent.none
private var mainScale = UIBezierPath()
private var mainScaleLines = UIBezierPath()
private var vernierScales = [Precision: UIBezierPath]()
private var point01Lines = UIBezierPath()
private var point005Lines = UIBezierPath()
private var topArrow = UIBezierPath()
private var bottomArrow = UIBezierPath()
private var mainScaleLayer = CALayer()
private var mainScaleLinesLayer = CALayer()
private var objectLayer = CALayer()
private var vernierScaleLayer = CALayer()
private var point01LinesLayer = CALayer()
private var point005LinesLayer = CALayer()
private var topArrowLayer = CALayer()
private var bottomArrowLayer = CALayer()
private var smokeLayer = CAEmitterLayer()
private var smokeCell = CAEmitterCell()
private var smokeDeadline = DispatchTime.distantFuture
private var leftEdge: CGFloat {
if #available(iOS 11.0, *) {
return safeAreaInsets.left + margin
} else {
return margin
}
}
private var rightEdge: CGFloat {
if #available(iOS 11.0, *) {
return bounds.width - safeAreaInsets.right - margin
} else {
return bounds.width - margin
}
}
// MARK: - Initializers
override init(frame: CGRect) {
super.init(frame: frame)
baseInit()
}
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
baseInit()
}
private func baseInit() {
backgroundColor = UIColor.clear
isOpaque = false
var x: Double
var index: Int
// Create main scale bezier path.
mainScale.move(to: CGPoint(x: 0, y: 10))
mainScale.addLine(to: CGPoint(x: width, y: 10))
mainScale.addLine(to: CGPoint(x: width, y: 70))
mainScale.addLine(to: CGPoint(x: 30, y: 70))
mainScale.addLine(to: CGPoint(x: 30, y: 130))
mainScale.addLine(to: CGPoint(x: 20, y: 130))
mainScale.addLine(to: CGPoint(x: 0, y: 70))
mainScale.close()
// Create main scale lines.
x = 50.0
index = 0
while CGFloat(x) < width {
mainScaleLines.move(to: CGPoint(x: x, y: 70 - 1))
mainScaleLines.addLine(to: CGPoint(x: x, y: index % 5 == 0 ? 45 : 55))
x += 10.0
index += 1
}
// Create vernier scale bezier paths.
var path: UIBezierPath
path = UIBezierPath()
path.move(to: CGPoint.zero)
path.addLine(to: CGPoint(x: point01Width, y: 0))
path.addLine(to: CGPoint(x: point01Width, y: 15))
path.addLine(to: CGPoint(x: 0, y: 15))
path.close()
path.move(to: CGPoint(x: 0, y: 65))
path.addLine(to: CGPoint(x: point01Width, y: 65))
path.addLine(to: CGPoint(x: point01Width, y: 100))
path.addLine(to: CGPoint(x: 20, y: 100))
path.addLine(to: CGPoint(x: 10, y: 130))
path.addLine(to: CGPoint(x: 0, y: 130))
path.close()
vernierScales[.point01] = path
path = UIBezierPath()
path.move(to: CGPoint.zero)
path.addLine(to: CGPoint(x: point005Width, y: 0))
path.addLine(to: CGPoint(x: point005Width, y: 15))
path.addLine(to: CGPoint(x: 0, y: 15))
path.close()
path.move(to: CGPoint(x: 0, y: 65))
path.addLine(to: CGPoint(x: point005Width, y: 65))
path.addLine(to: CGPoint(x: point005Width, y: 100))
path.addLine(to: CGPoint(x: 20, y: 100))
path.addLine(to: CGPoint(x: 10, y: 130))
path.addLine(to: CGPoint(x: 0, y: 130))
path.close()
vernierScales[.point005] = path
// Create 0.01 cm precision lines.
x = 20.0
for index in 0...10 {
point01Lines.move(to: CGPoint(x: x, y: 65 + 1))
point01Lines.addLine(to: CGPoint(x: x, y: index % 5 == 0 ? 80 : 75))
x += 9.0
}
// Create 0.005 cm precision lines.
x = 20.0
for index in 0...20 {
point005Lines.move(to: CGPoint(x: x, y: 65 + 1))
point005Lines.addLine(to: CGPoint(x: x, y: index % 5 == 0 ? 80 : 75))
x += 9.5
}
// Create top arrow bezier path.
topArrow.move(to: CGPoint(x: 6, y: 30))
topArrow.addLine(to: CGPoint(x: 0, y: 18))
topArrow.addLine(to: CGPoint(x: 4, y: 18))
topArrow.addLine(to: CGPoint(x: 4, y: 0))
topArrow.addLine(to: CGPoint(x: 8, y: 0))
topArrow.addLine(to: CGPoint(x: 8, y: 18))
topArrow.addLine(to: CGPoint(x: 12, y: 18))
topArrow.close()
// Create bottom arrow bezier path.
bottomArrow.move(to: CGPoint(x: 6, y: 0))
bottomArrow.addLine(to: CGPoint(x: 0, y: 12))
bottomArrow.addLine(to: CGPoint(x: 4, y: 12))
bottomArrow.addLine(to: CGPoint(x: 4, y: 30))
bottomArrow.addLine(to: CGPoint(x: 8, y: 30))
bottomArrow.addLine(to: CGPoint(x: 8, y: 12))
bottomArrow.addLine(to: CGPoint(x: 12, y: 12))
bottomArrow.close()
setup(layer: mainScaleLayer)
layer.addSublayer(mainScaleLayer)
setup(layer: mainScaleLinesLayer)
mainScaleLayer.addSublayer(mainScaleLinesLayer)
setup(layer: objectLayer)
layer.addSublayer(objectLayer)
setup(layer: vernierScaleLayer)
layer.addSublayer(vernierScaleLayer)
setup(layer: point01LinesLayer)
vernierScaleLayer.addSublayer(point01LinesLayer)
setup(layer: point005LinesLayer)
point005LinesLayer.isHidden = true
vernierScaleLayer.addSublayer(point005LinesLayer)
setup(layer: topArrowLayer, anchor: CGPoint(x: 0.5, y: 1.0))
layer.addSublayer(topArrowLayer)
setup(layer: bottomArrowLayer, anchor: CGPoint(x: 0.5, y: 0.0))
vernierScaleLayer.addSublayer(bottomArrowLayer)
smokeCell.lifetime = 1.0
smokeCell.alphaSpeed = -1.0
smokeCell.spin = 5.0
smokeCell.contents = UIImage(named: "smoke")?.cgImage
// Need cell name to work around CAEmitterCell and CAEmitterLayer bug.
// http://stackoverflow.com/questions/16749430/caemittercell-does-not-respect-birthrate-change
smokeCell.name = "smoke"
setup(layer: smokeLayer, actions: [
"position": NSNull(),
"bounds": NSNull(),
"emitterSize": NSNull(),
"emitterPosition": NSNull()
])
smokeLayer.emitterShape = kCAEmitterLayerRectangle
smokeLayer.emitterMode = kCAEmitterLayerOutline
smokeLayer.emitterCells = [smokeCell]
objectLayer.addSublayer(smokeLayer)
}
// MARK: - UIView overrides
override func layoutSubviews() {
super.layoutSubviews()
// Store positions of object and vernier scale for restoration later.
let oldScale = scale
let oldOrigin = origin
let mainScaleLinesX = mainScaleLinesLayer.position.x / scale
let objectY = (objectLayer.position.y - origin.y) / scale
let vernierScaleX = (vernierScaleLayer.position.x - origin.x) / scale
let point01LinesX = point01LinesLayer.position.x / scale
let point005LinesX = point005LinesLayer.position.x / scale
// Calculate new scale and origin since bounds may have changed.
let widthScale = (rightEdge - leftEdge) / width
let heightScale = (bounds.height - 2.0 * margin) / height
scale = min(widthScale, heightScale)
let scaledWidth = width * scale
let scaledHeight = height * scale
origin.x = (bounds.width - scaledWidth) / 2.0
origin.y = (bounds.height - scaledHeight) / 2.0
if translateUp {
origin.y = 20.0
}
// Only redraw layers if scale has changed.
if scale != oldScale {
redrawAll()
}
// Only fix positions if scale and/or origin have changed.
if origin != oldOrigin || scale != oldScale {
resetAll()
mainScaleLinesLayer.position.x = mainScaleLinesX * scale
objectLayer.position.y = origin.y + objectY * scale
vernierScaleLayer.position.x = origin.x + vernierScaleX * scale
point01LinesLayer.position.x = point01LinesX * scale
point005LinesLayer.position.x = point005LinesX * scale
positionArrows()
}
}
// MARK: - Actions
@IBAction func handleDrags(_ sender: UIGestureRecognizer) {
let location = sender.location(in: self)
switch sender.state {
case .began:
if objectLayer.frame.contains(location) {
let delta = CGSize(width: location.x - objectLayer.frame.minX, height: location.y - objectLayer.frame.minY)
draggedComponent = .object(delta: delta)
} else if vernierScaleLayer.frame.contains(location) {
let delta = CGSize(width: location.x - vernierScaleLayer.frame.minX, height: location.y - vernierScaleLayer.frame.minY)
draggedComponent = .vernierScale(delta: delta)
} else {
draggedComponent = .none
}
case .changed:
switch draggedComponent {
case .object(let delta):
var newPosition = CGPoint(x: objectLayer.position.x, y: location.y - delta.height)
newPosition.y = max(newPosition.y, origin.y + (70.0 + 10.0) * scale)
if vernierScaleLayer.position.x < objectLayer.frame.maxX {
newPosition.y = max(newPosition.y, origin.y + 130.0 * scale)
}
newPosition.y = min(newPosition.y, bounds.height - 40.0 * scale - margin)
objectLayer.position = newPosition
case .vernierScale(let delta):
var newPosition = CGPoint(x: location.x - delta.width, y: origin.y)
newPosition.x = max(newPosition.x, objectLayer.position.x)
if objectLayer.position.y < origin.y + 130.0 * scale {
newPosition.x = max(newPosition.x, objectLayer.frame.maxX)
}
newPosition.x = min(newPosition.x, origin.x + (width - vWidth) * scale)
vernierScaleLayer.position = newPosition
positionArrows()
default:
break
}
default:
draggedComponent = .none
}
}
// MARK: - Methods
func resetAll() {
reset(layer: mainScaleLayer)
reset(layer: mainScaleLinesLayer)
reset(layer: objectLayer)
reset(layer: vernierScaleLayer)
reset(layer: point01LinesLayer)
reset(layer: point005LinesLayer)
reset(layer: topArrowLayer)
reset(layer: bottomArrowLayer)
reset(layer: smokeLayer)
}
private func setup(layer: CALayer, anchor: CGPoint = CGPoint.zero,
actions: [String: CAAction] = ["position": NSNull()]) {
// Disable implicit animation when position is changed.
layer.actions = actions
layer.anchorPoint = anchor
reset(layer: layer)
}
private func reset(layer: CALayer) {
switch layer {
case mainScaleLayer:
layer.position = origin
case objectLayer:
layer.position = CGPoint(x: origin.x + 30.0 * scale, y: origin.y + (height - 40.0) * scale)
case vernierScaleLayer:
layer.position = CGPoint(x: origin.x + (width - vWidth) * scale, y: origin.y)
case topArrowLayer:
layer.position.y = origin.y + 50.0 * scale
case bottomArrowLayer:
layer.position.y = 80.0 * scale
default:
layer.position = CGPoint.zero
}
}
private func scaleDraw(layer: CALayer, path: UIBezierPath, size: CGSize, fill: UIColor?, stroke: UIColor?,
width: CGFloat = 2.0, clip: Bool = true, custom: (() -> Void)? = nil) {
let scaledSize = CGSize(width: size.width * scale, height: size.height * scale)
layer.bounds.size = scaledSize
UIGraphicsBeginImageContextWithOptions(scaledSize, false, UIScreen.main.scale * 2.0)
let context = UIGraphicsGetCurrentContext()!
context.saveGState()
context.scaleBy(x: scale, y: scale)
if clip {
path.addClip()
}
if let f = fill {
f.setFill()
path.fill()
}
if let s = stroke {
s.setStroke()
path.lineWidth = width
path.stroke()
}
if let c = custom {
c()
}
context.restoreGState()
layer.contents = UIGraphicsGetImageFromCurrentImageContext()?.cgImage
UIGraphicsEndImageContext()
}
private func redraw(layer: CALayer) {
let mainScaleTextAttributes = [NSAttributedStringKey.font: UIFont.systemFont(ofSize: 14), NSAttributedStringKey.foregroundColor: UIColor.white]
let vernierScaleTextAttributes = [NSAttributedStringKey.font: UIFont.systemFont(ofSize: 12), NSAttributedStringKey.foregroundColor: UIColor.white]
switch layer {
case mainScaleLayer:
scaleDraw(layer: layer, path: mainScale, size: CGSize(width: width, height: 130.0),
fill: UIColor.gray, stroke: UIColor.black, custom: {
NSString(string: "cm").draw(at: CGPoint(x: self.width - 30.0, y: 20.0), withAttributes: mainScaleTextAttributes)
})
case mainScaleLinesLayer:
scaleDraw(layer: layer, path: mainScaleLines, size: CGSize(width: width, height: 130.0),
fill: nil, stroke: UIColor.white, width: 1.0, clip: false, custom: {
NSString(string: "0").draw(at: CGPoint(x: 46.0, y: 20.0), withAttributes: mainScaleTextAttributes)
NSString(string: "1").draw(at: CGPoint(x: 146.0, y: 20.0), withAttributes: mainScaleTextAttributes)
NSString(string: "2").draw(at: CGPoint(x: 246.0, y: 20.0), withAttributes: mainScaleTextAttributes)
NSString(string: "3").draw(at: CGPoint(x: 346.0, y: 20.0), withAttributes: mainScaleTextAttributes)
NSString(string: "4").draw(at: CGPoint(x: 446.0, y: 20.0), withAttributes: mainScaleTextAttributes)
})
case objectLayer:
let objectSize = CGSize(width: CGFloat(answer), height: 40.0)
scaleDraw(layer: layer, path: UIBezierPath(rect: CGRect(origin: CGPoint.zero, size: objectSize)),
size: objectSize, fill: UIColor.red, stroke: UIColor.black, clip: false)
// Also size smokeLayer to fit objectLayer.
smokeLayer.bounds.size = layer.bounds.size
smokeLayer.emitterSize = layer.bounds.size
smokeLayer.emitterPosition = CGPoint(x: layer.bounds.midX, y: layer.bounds.midY)
case vernierScaleLayer:
scaleDraw(layer: layer, path: vernierScales[precision]!, size: CGSize(width: vWidth, height: 130.0),
fill: UIColor.blue, stroke: UIColor.black)
case point01LinesLayer:
scaleDraw(layer: layer, path: point01Lines, size: CGSize(width: vWidth, height: 130.0),
fill: nil, stroke: UIColor.white, width: 1.0, clip: false, custom: {
NSString(string: "0").draw(at: CGPoint(x: 16.0, y: 80.0), withAttributes: vernierScaleTextAttributes)
NSString(string: "5").draw(at: CGPoint(x: 61.0, y: 80.0), withAttributes: vernierScaleTextAttributes)
NSString(string: "10").draw(at: CGPoint(x: 104.0, y: 80.0), withAttributes: vernierScaleTextAttributes)
})
case point005LinesLayer:
scaleDraw(layer: layer, path: point005Lines, size: CGSize(width: vWidth, height: 130.0),
fill: nil, stroke: UIColor.white, width: 1.0, clip: false, custom: {
NSString(string: "0").draw(at: CGPoint(x: 16.0, y: 80.0), withAttributes: vernierScaleTextAttributes)
NSString(string: "5").draw(at: CGPoint(x: 111.0, y: 80.0), withAttributes: vernierScaleTextAttributes)
NSString(string: "10").draw(at: CGPoint(x: 204.0, y: 80.0), withAttributes: vernierScaleTextAttributes)
})
case topArrowLayer:
scaleDraw(layer: layer, path: topArrow, size: CGSize(width: 12.0, height: 30.0),
fill: UIColor.red, stroke: UIColor.black)
case bottomArrowLayer:
scaleDraw(layer: layer, path: bottomArrow, size: CGSize(width: 12.0, height: 30.0),
fill: UIColor.red, stroke: UIColor.black)
default:
break
}
setNeedsDisplay()
}
private func redrawAll() {
redraw(layer: mainScaleLayer)
redraw(layer: mainScaleLinesLayer)
redraw(layer: objectLayer)
redraw(layer: vernierScaleLayer)
redraw(layer: point01LinesLayer)
redraw(layer: point005LinesLayer)
redraw(layer: topArrowLayer)
redraw(layer: bottomArrowLayer)
}
private func positionArrows() {
if !arrows {
return
}
let main0 = origin.x + CGFloat(50.0 + zero) * scale
let vernier0 = vernierScaleLayer.position.x + 20.0 * scale
let length = (vernier0 - main0) / scale
// Top and bottom arrows must agree on a quantized value of length.
// This prevents top arrow from shifting back while bottom arrow is still pointing at 0 on the vernier scale.
// This occurs if we use some calculation shortcuts based on the unquantized length.
var intervals: Int
switch precision {
case .point01:
intervals = Int(round(length))
topArrowLayer.position.x = main0 + CGFloat(floor(Double(intervals) / 10.0)) * 10.0 * scale
bottomArrowLayer.position.x = (20.0 + CGFloat((intervals + 10) % 10) * 9.0) * scale
case .point005:
intervals = Int(round(length * 2))
topArrowLayer.position.x = main0 + CGFloat(floor(Double(intervals) / 20.0)) * 10.0 * scale
bottomArrowLayer.position.x = (20.0 + CGFloat((intervals + 20) % 20) * 9.5) * scale
}
}
}
| 38.884058 | 154 | 0.580367 |
9434062ab51402dfb42801c0b8f937e7dfadc7e4 | 512 | swift | Swift | LLWebViewController/Classes/LLBridgeWebViewController.swift | ZHK1024/LLWebViewController | 1f3ac4efb6e050e2caa9358770113e7b4712a2ba | [
"MIT"
] | null | null | null | LLWebViewController/Classes/LLBridgeWebViewController.swift | ZHK1024/LLWebViewController | 1f3ac4efb6e050e2caa9358770113e7b4712a2ba | [
"MIT"
] | null | null | null | LLWebViewController/Classes/LLBridgeWebViewController.swift | ZHK1024/LLWebViewController | 1f3ac4efb6e050e2caa9358770113e7b4712a2ba | [
"MIT"
] | null | null | null | //
// LLBridgeWebViewController.swift
// LLBridgeWebViewController
//
// Created by ZHK on 2021/5/6.
//
//
import UIKit
open class LLBridgeWebViewController<B: LLWebViewBridgable>: LLWebViewController {
private lazy var bridge = B(webView: webView)
open override func viewDidLoad() {
super.viewDidLoad()
load(url: bridge.requestURL)
}
open override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
webView.frame = view.bounds
}
}
| 20.48 | 82 | 0.671875 |
16a7edb7930d43c729edd6213d5fcebd69768f77 | 6,595 | c | C | src/peripherals/ds18x20.c | qtoggle/espqtoggle | 265d5749975908403aa0f1b66148e0ee0c2bfa68 | [
"Apache-2.0"
] | 3 | 2020-05-01T20:26:22.000Z | 2021-11-11T10:04:31.000Z | src/peripherals/ds18x20.c | qtoggle/espqtoggle | 265d5749975908403aa0f1b66148e0ee0c2bfa68 | [
"Apache-2.0"
] | 13 | 2020-04-19T22:28:31.000Z | 2021-02-03T21:52:34.000Z | src/peripherals/ds18x20.c | qtoggle/espqtoggle | 265d5749975908403aa0f1b66148e0ee0c2bfa68 | [
"Apache-2.0"
] | 3 | 2019-07-27T15:08:09.000Z | 2021-04-21T05:13:11.000Z |
/*
* Copyright 2019 The qToggle Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <c_types.h>
#include "espgoodies/common.h"
#include "espgoodies/drivers/onewire.h"
#include "espgoodies/utils.h"
#include "common.h"
#include "peripherals.h"
#include "ports.h"
#include "peripherals/ds18x20.h"
#define PARAM_NO_PIN 0
#define MODEL_DS18S20 0x10
#define MODEL_DS18B20 0x28
#define MODEL_DS1822 0x22
#define MODEL_DS1825 0x25
#define MODEL_DS28EA00 0x42
#define ERROR_VALUE 85
#define MIN_SAMP_INT 1000 /* Milliseconds */
#define DEF_SAMP_INT 1000 /* Milliseconds */
#define MAX_SAMP_INT 3600000 /* Milliseconds */
#define MIN_TEMP -55 /* Degrees C */
#define MAX_TEMP 125 /* Degrees C */
typedef struct {
one_wire_t *one_wire;
} user_data_t;
static bool ICACHE_FLASH_ATTR valid_address(uint8 *addr);
static bool ICACHE_FLASH_ATTR valid_family(uint8 *addr);
#if defined(_DEBUG) && defined(_DEBUG_DS18X20)
static char ICACHE_FLASH_ATTR *get_model_str(uint8 *addr);
#endif
static void ICACHE_FLASH_ATTR configure(port_t *port, bool enabled);
static double ICACHE_FLASH_ATTR read_value(port_t *port);
static void ICACHE_FLASH_ATTR init(peripheral_t *peripheral);
static void ICACHE_FLASH_ATTR cleanup(peripheral_t *peripheral);
static void ICACHE_FLASH_ATTR make_ports(peripheral_t *peripheral, port_t **ports, uint8 *ports_len);
peripheral_type_t peripheral_type_ds18x20 = {
.init = init,
.cleanup = cleanup,
.make_ports = make_ports
};
bool valid_address(uint8 *addr) {
return (one_wire_crc8(addr, 7) == addr[7]);
}
bool valid_family(uint8 *addr) {
switch (addr[0]) {
case MODEL_DS18S20:
case MODEL_DS18B20:
case MODEL_DS1822:
case MODEL_DS1825:
case MODEL_DS28EA00:
return TRUE;
default:
return FALSE;
}
}
#if defined(_DEBUG) && defined(_DEBUG_DS18X20)
char *get_model_str(uint8 *addr) {
switch (addr[0]) {
case MODEL_DS18S20:
return "DS18S20";
case MODEL_DS18B20:
return "DS18B20";
case MODEL_DS1822:
return "DS1822";
case MODEL_DS1825:
return "DS1825";
case MODEL_DS28EA00:
return "DS28EA00";
}
return NULL;
}
#endif
void configure(port_t *port, bool enabled) {
peripheral_t *peripheral = port->peripheral;
user_data_t *user_data = peripheral->user_data;
if (enabled) {
uint8 addr[8] = {0, 0, 0, 0, 0, 0, 0, 0};
one_wire_setup(user_data->one_wire);
DEBUG_DS18X20(peripheral, "searching for sensor");
one_wire_search_reset(user_data->one_wire);
while (one_wire_search(user_data->one_wire, addr)) {
DEBUG_DS18X20(
peripheral,
"found sensor at %02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X",
addr[0],
addr[1],
addr[2],
addr[3],
addr[4],
addr[5],
addr[6],
addr[7]
);
if (valid_address(addr)) {
DEBUG_DS18X20(peripheral, "address is valid");
if (valid_family(addr)) {
DEBUG_DS18X20(peripheral, "sensor model is %s", get_model_str(addr));
break;
}
else {
DEBUG_DS18X20(peripheral, "unknown sensor family");
}
}
}
if (!addr[0]) {
DEBUG_DS18X20(peripheral, "no sensor found");
one_wire_search_reset(user_data->one_wire);
}
}
else {
one_wire_search_reset(user_data->one_wire);
}
}
double read_value(port_t *port) {
peripheral_t *peripheral = port->peripheral;
user_data_t *user_data = peripheral->user_data;
one_wire_t *one_wire = user_data->one_wire;
if (!one_wire->rom[0]) {
return UNDEFINED;
}
one_wire_reset(one_wire);
one_wire_write(one_wire, ONE_WIRE_CMD_SKIP_ROM, /* parasitic = */ FALSE);
one_wire_write(one_wire, ONE_WIRE_CMD_CONVERT_T, /* parasitic = */ FALSE);
os_delay_us(750);
one_wire_reset(one_wire);
one_wire_write(one_wire, ONE_WIRE_CMD_SKIP_ROM, /* parasitic = */ FALSE);
one_wire_write(one_wire, ONE_WIRE_CMD_READ_SCRATCHPAD, /* parasitic = */ FALSE);
uint8 i, data[9];
for (i = 0; i < 9; i++) {
data[i] = one_wire_read(one_wire);
}
if (one_wire_crc8(data, 8) != data[8]) {
DEBUG_DS18X20(peripheral, "invalid CRC while reading scratch pad");
return UNDEFINED;
}
uint16 value = (data[1] << 8) + data[0];
double temperature = value * 0.0625; // TODO: use temperature resolution according to each specific model
temperature = round_to(temperature, 1);
DEBUG_DS18X20(peripheral, "got temperature: %s", dtostr(temperature, 1));
if (temperature == ERROR_VALUE) {
DEBUG_DS18X20(peripheral, "temperature read error");
return UNDEFINED;
}
return temperature;
}
void init(peripheral_t *peripheral) {
user_data_t *user_data = zalloc(sizeof(user_data_t));
user_data->one_wire = malloc(sizeof(one_wire_t));
user_data->one_wire->pin_no = PERIPHERAL_PARAM_UINT8(peripheral, PARAM_NO_PIN);
peripheral->user_data = user_data;
DEBUG_DS18X20(peripheral, "using GPIO %d", user_data->one_wire->pin_no);
}
void cleanup(peripheral_t *peripheral) {
user_data_t *user_data = peripheral->user_data;
free(user_data->one_wire);
}
void make_ports(peripheral_t *peripheral, port_t **ports, uint8 *ports_len) {
port_t *port = port_new();
port->slot = -1;
port->type = PORT_TYPE_NUMBER;
port->min = MIN_TEMP;
port->max = MAX_TEMP;
port->unit = "C";
port->min_sampling_interval = MIN_SAMP_INT;
port->max_sampling_interval = MAX_SAMP_INT;
port->def_sampling_interval = DEF_SAMP_INT;
port->configure = configure;
port->read_value = read_value;
ports[(*ports_len)++] = port;
}
| 27.028689 | 109 | 0.640637 |
70a547c889e4b5b48eca378d5aad56ca8199089f | 142 | h | C | AudioFile_Wizard/AudioFile_Wizard/stdafx.h | LentilSoup/-Place-name- | 1cec3a3f08275e1b4745ab7943d9c7d259458905 | [
"MIT"
] | null | null | null | AudioFile_Wizard/AudioFile_Wizard/stdafx.h | LentilSoup/-Place-name- | 1cec3a3f08275e1b4745ab7943d9c7d259458905 | [
"MIT"
] | null | null | null | AudioFile_Wizard/AudioFile_Wizard/stdafx.h | LentilSoup/-Place-name- | 1cec3a3f08275e1b4745ab7943d9c7d259458905 | [
"MIT"
] | null | null | null | #pragma once
#include <iostream>
#include <string>
#include <vector>
#include <fstream>
#include <experimental/filesystem>
#include <ostream>
| 17.75 | 34 | 0.760563 |
14778b72a4fd28ec085e589ae14717db68df20e7 | 10,865 | sql | SQL | Making RLS and DDM Work for You/Demos/RLSAndDDM/RLSAndDDM/DDM_Demo01.sql | johnqmartin/Community-Sessions | 84905475e12500d12f9d6545d094628334243448 | [
"MIT"
] | 1 | 2020-10-27T19:14:47.000Z | 2020-10-27T19:14:47.000Z | Making RLS and DDM Work for You/Demos/RLSAndDDM/RLSAndDDM/DDM_Demo01.sql | johnqmartin/Community-Sessions | 84905475e12500d12f9d6545d094628334243448 | [
"MIT"
] | null | null | null | Making RLS and DDM Work for You/Demos/RLSAndDDM/RLSAndDDM/DDM_Demo01.sql | johnqmartin/Community-Sessions | 84905475e12500d12f9d6545d094628334243448 | [
"MIT"
] | 2 | 2018-02-21T09:30:12.000Z | 2020-10-27T19:14:48.000Z | -------------------------------------------------------------
------------------------ DDM Demo 01 ------------------------
-------------------------------------------------------------
--// Create a local user to execute in the context
--// as for the purposes of the demo.
CREATE USER [MaskedUser]
WITHOUT LOGIN
;
GO
--// Grant select on the tables we need.
GRANT SELECT ON HumanResources.Employee TO [MaskedUser];
GRANT SELECT ON Person.Person TO [MaskedUser];
GRANT SELECT ON Person.EmailAddress TO [MaskedUser];
GRANT SELECT ON Person.BusinessEntityAddress TO [MaskedUser];
GRANT SELECT ON dbo.fnGetFormattedAddress TO [MaskedUser];
GO
EXECUTE AS USER = 'MaskedUser'
--// Basic Query for sensitive business data
SELECT p.FirstName,
p.LastName,
p.PersonType,
e.HireDate,
e.JobTitle,
e.NationalIDNumber,
em.EmailAddress,
e.SickLeaveHours,
fa.FormattedAddress
FROM HumanResources.Employee AS e
JOIN Person.Person AS p
ON e.BusinessEntityID = p.BusinessEntityID
JOIN Person.EmailAddress AS em
ON p.BusinessEntityID = em.BusinessEntityID
JOIN person.BusinessEntityAddress AS bea
ON bea.BusinessEntityID = e.BusinessEntityID
CROSS APPLY dbo.fnGetFormattedAddress(bea.AddressID) AS fa
;
REVERT
-------------------------------------------------------------
----------------------- Masking Data ------------------------
-------------------------------------------------------------
--// Core employee Data
ALTER TABLE HumanResources.Employee
ALTER COLUMN NationalIDNumber ADD MASKED WITH(FUNCTION='default()')
;
ALTER TABLE HumanResources.Employee
ALTER COLUMN SickLeaveHours ADD MASKED WITH(FUNCTION='random(1,100)')
;
ALTER TABLE Person.EmailAddress
ALTER COLUMN EmailAddress ADD MASKED WITH(FUNCTION='email()')
;
EXECUTE AS USER = 'MaskedUser'
--// Basic Query for sensitive business data
SELECT p.FirstName,
p.LastName,
p.PersonType,
e.HireDate,
e.JobTitle,
e.NationalIDNumber,
em.EmailAddress,
e.SickLeaveHours,
fa.FormattedAddress
FROM HumanResources.Employee AS e
JOIN Person.Person AS p
ON e.BusinessEntityID = p.BusinessEntityID
JOIN Person.EmailAddress AS em
ON p.BusinessEntityID = em.BusinessEntityID
JOIN person.BusinessEntityAddress AS bea
ON bea.BusinessEntityID = e.BusinessEntityID
CROSS APPLY dbo.fnGetFormattedAddress(bea.AddressID) AS fa
;
REVERT
--// Lets alter the NationalIdNumber mask.
ALTER TABLE HumanResources.Employee
ALTER COLUMN NationalIDNumber ADD MASKED WITH(FUNCTION='partial(1,"-XXXX-",2)')
;
GO
--// Now re-run the query.
-------------------------------------------------------------
-------------------------------------------------------------
--// But, how do we handle the address?
--// Data is returned from a function.
ALTER TABLE Person.Address
ALTER COLUMN AddressLine1 ADD MASKED WITH (FUNCTION='default()')
;
ALTER TABLE Person.Address
ALTER COLUMN PostalCode ADD MASKED WITH (FUNCTION='default()')
;
GO
EXECUTE AS USER = 'MaskedUser'
--// Basic Query for sensitive business data
SELECT p.FirstName,
p.LastName,
p.PersonType,
e.HireDate,
e.JobTitle,
e.NationalIDNumber,
em.EmailAddress,
e.SickLeaveHours,
fa.FormattedAddress
FROM HumanResources.Employee AS e
JOIN Person.Person AS p
ON e.BusinessEntityID = p.BusinessEntityID
JOIN Person.EmailAddress AS em
ON p.BusinessEntityID = em.BusinessEntityID
JOIN person.BusinessEntityAddress AS bea
ON bea.BusinessEntityID = e.BusinessEntityID
CROSS APPLY dbo.fnGetFormattedAddress(bea.AddressID) AS fa
;
REVERT
--// The concatenation of the masked and un-masked columns takes
--// the most restrictive result.
-------------------------------------------------------------
-------------------------------------------------------------
--// Lets change the query.
EXECUTE AS USER = 'MaskedUser';
WITH _AddressCTE
AS
(
SELECT a.AddressID,
a.AddressLine1 + ',' + CHAR(13) +
a.City + ',' + CHAR(13) +
sp.Name + ',' + CHAR(13) +
a.PostalCode + ',' + CHAR(13) +
cr.Name AS FormattedAddress
FROM person.[Address] AS a
JOIN Person.StateProvince AS sp ON a.StateProvinceID = sp.StateProvinceID
JOIN Person.CountryRegion AS cr ON sp.CountryRegionCode = cr.CountryRegionCode
)
SELECT p.FirstName,
p.LastName,
p.PersonType,
e.HireDate,
e.JobTitle,
e.NationalIDNumber,
em.EmailAddress,
e.SickLeaveHours,
ac.FormattedAddress
FROM HumanResources.Employee AS e
JOIN Person.Person AS p
ON e.BusinessEntityID = p.BusinessEntityID
JOIN Person.EmailAddress AS em
ON p.BusinessEntityID = em.BusinessEntityID
JOIN person.BusinessEntityAddress AS bea
ON bea.BusinessEntityID = e.BusinessEntityID
JOIN _AddressCTE AS ac ON bea.AddressID = ac.AddressID
;
REVERT
--// We now need additional permissions onthe tables we are querying.
GRANT SELECT ON Person.CountryRegion TO MaskedUser;
GRANT SELECT ON Person.StateProvince TO MaskedUser;
GRANT SELECT ON Person.Address TO MaskedUser;
GO
--// Now re-run the query!
-------------------------------------------------------------
-------------------------------------------------------------
--// Lets rewrite it, again!
EXECUTE AS USER = 'MaskedUser';
WITH _AddressCTE
AS
(
SELECT a.AddressID,
a.AddressLine1,
a.City,
sp.Name AS StateProvinceName,
a.PostalCode,
cr.Name AS CountryName
FROM person.[Address] AS a
JOIN Person.StateProvince AS sp ON a.StateProvinceID = sp.StateProvinceID
JOIN Person.CountryRegion AS cr ON sp.CountryRegionCode = cr.CountryRegionCode
)
SELECT p.FirstName,
p.LastName,
p.PersonType,
e.HireDate,
e.JobTitle,
e.NationalIDNumber,
em.EmailAddress,
e.SickLeaveHours,
ac.AddressLine1,
ac.City,
ac.StateProvinceName,
ac.PostalCode,
ac.CountryName
FROM HumanResources.Employee AS e
JOIN Person.Person AS p
ON e.BusinessEntityID = p.BusinessEntityID
JOIN Person.EmailAddress AS em
ON p.BusinessEntityID = em.BusinessEntityID
JOIN person.BusinessEntityAddress AS bea
ON bea.BusinessEntityID = e.BusinessEntityID
JOIN _AddressCTE AS ac ON bea.AddressID = ac.AddressID
;
REVERT
--// Now we can see the masked and un-masked columns.
-------------------------------------------------------------
----------------------- DDM Security ------------------------
-------------------------------------------------------------
--// SQL Server 2012+, only requires SELECT on an object to view Statistics..
EXECUTE AS USER = 'MaskedUser'
DBCC SHOW_STATISTICS('HumanResources.Employee','AK_Employee_NationalIDNumber')
REVERT
--// Even a low level user can get to the stats tables..
EXECUTE AS USER = 'MaskedUser'
SELECT OBJECT_NAME(s.object_id) AS objectName,
s.name AS StatsName,
col.name
FROM sys.stats AS s
JOIN sys.stats_columns AS sc ON s.object_id = sc.object_id
AND s.stats_id = sc.stats_id
JOIN sys.columns AS col ON sc.column_id = col.column_id
AND sc.object_id = col.object_id
WHERE s.object_id = OBJECT_ID('Person.Address')
;
REVERT
--// Lets create some stats.
EXECUTE AS USER ='MaskedUser'
SELECT *
FROM Person.Address
WHERE PostalCode = 1
;
REVERT
--// We get an error, the data is masked.
--// But! Run the stats query again....
EXECUTE AS USER = 'MaskedUser'
DBCC SHOW_STATISTICS('Person.Address','_WA_Sys_00000006_5EBF139D')
REVERT
-------------------------------------------------------------
-------------------------------------------------------------
--// How to work around this.
--// Views, Procedures etc.
CREATE VIEW dbo.EmployeeDetails
AS
SELECT p.FirstName,
p.LastName,
p.PersonType,
e.HireDate,
e.JobTitle,
e.NationalIDNumber,
em.EmailAddress,
e.SickLeaveHours,
bea.AddressID
FROM HumanResources.Employee AS e
JOIN Person.Person AS p
ON e.BusinessEntityID = p.BusinessEntityID
JOIN Person.EmailAddress AS em
ON p.BusinessEntityID = em.BusinessEntityID
JOIN person.BusinessEntityAddress AS bea
ON bea.BusinessEntityID = e.BusinessEntityID
;
GO
CREATE VIEW dbo.EmployeeAddress
AS
SELECT a.AddressID,
a.AddressLine1,
a.City,
sp.Name AS StateProvinceName,
a.PostalCode,
cr.Name AS CountryName
FROM person.[Address] AS a
JOIN Person.StateProvince AS sp ON a.StateProvinceID = sp.StateProvinceID
JOIN Person.CountryRegion AS cr ON sp.CountryRegionCode = cr.CountryRegionCode
;
GO
CREATE PROCEDURE dbo.GetEmployeeDetails
AS
BEGIN
WITH _AddressCTE
AS
(
SELECT a.AddressID,
a.AddressLine1,
a.City,
sp.Name AS StateProvinceName,
a.PostalCode,
cr.Name AS CountryName
FROM person.[Address] AS a
JOIN Person.StateProvince AS sp ON a.StateProvinceID = sp.StateProvinceID
JOIN Person.CountryRegion AS cr ON sp.CountryRegionCode = cr.CountryRegionCode
)
SELECT p.FirstName,
p.LastName,
p.PersonType,
e.HireDate,
e.JobTitle,
e.NationalIDNumber,
em.EmailAddress,
e.SickLeaveHours,
ac.AddressLine1,
ac.City,
ac.StateProvinceName,
ac.PostalCode,
ac.CountryName
FROM HumanResources.Employee AS e
JOIN Person.Person AS p
ON e.BusinessEntityID = p.BusinessEntityID
JOIN Person.EmailAddress AS em
ON p.BusinessEntityID = em.BusinessEntityID
JOIN person.BusinessEntityAddress AS bea
ON bea.BusinessEntityID = e.BusinessEntityID
JOIN _AddressCTE AS ac ON bea.AddressID = ac.AddressID
;
END
--// Revoke select permissions on the objects from earlier.
REVOKE SELECT ON HumanResources.Employee TO [MaskedUser];
REVOKE SELECT ON Person.Person TO [MaskedUser];
REVOKE SELECT ON Person.EmailAddress TO [MaskedUser];
REVOKE SELECT ON Person.BusinessEntityAddress TO [MaskedUser];
REVOKE SELECT ON dbo.fnGetFormattedAddress TO [MaskedUser];
REVOKE SELECT ON Person.CountryRegion TO MaskedUser;
REVOKE SELECT ON Person.StateProvince TO MaskedUser;
REVOKE SELECT ON Person.Address TO MaskedUser;
GO
--// Now grant rights to views and Procedure
GRANT EXECUTE ON dbo.GetEmployeeDetails TO MaskedUser;
GRANT SELECT ON dbo.EmployeeDetails TO MaskedUser;
GRANT SELECT ON dbo.EmployeeAddress TO MaskedUser;
--// Now get the data
EXECUTE AS USER = 'MaskedUser'
EXEC dbo.GetEmployeeDetails;
SELECT *
FROM dbo.EmployeeDetails AS ed
JOIN dbo.EmployeeAddress AS ea ON ed.AddressId = ea.AddressID
WHERE ea.PostalCode = '98052'
;
REVERT
-------------------------------------------------------------
----------------------- Removing DDM ------------------------
-------------------------------------------------------------
--// Removing the masks from the columns
ALTER TABLE HumanResources.Employee
ALTER COLUMN NationalIDNumber DROP MASKED;
GO
EXECUTE AS USER = 'MaskedUser'
EXEC dbo.GetEmployeeDetails;
REVERT
-------------------------------------------------------------
-------------------------------------------------------------
--// Granting unmask ability to user.
GRANT UNMASK TO [MaskedUser];
GO
EXECUTE AS USER = 'MaskedUser'
EXEC dbo.GetEmployeeDetails;
REVERT
--// Problem! - This is global, there is no granularity to it :-( | 26.893564 | 80 | 0.667556 |
bb7c838586d528abd3ae0bb7eaa7682412d1668b | 56 | rs | Rust | src/chip8/mod.rs | SpiritLooper/Chip8-emulator | 406f3312a2956c78337b8767989d317d9f63318e | [
"WTFPL"
] | null | null | null | src/chip8/mod.rs | SpiritLooper/Chip8-emulator | 406f3312a2956c78337b8767989d317d9f63318e | [
"WTFPL"
] | null | null | null | src/chip8/mod.rs | SpiritLooper/Chip8-emulator | 406f3312a2956c78337b8767989d317d9f63318e | [
"WTFPL"
] | null | null | null | pub mod cpu;
pub mod rom;
mod font;
mod gpu;
mod input;
| 9.333333 | 12 | 0.696429 |
2812b634046958a93de87b3068cf934cf002f819 | 240 | rb | Ruby | lib/diplomacy/order/support.rb | jreut/diplomacy | 83f7e706f8b0dfb3d074c6618acd34abb2d3a22c | [
"MIT"
] | null | null | null | lib/diplomacy/order/support.rb | jreut/diplomacy | 83f7e706f8b0dfb3d074c6618acd34abb2d3a22c | [
"MIT"
] | null | null | null | lib/diplomacy/order/support.rb | jreut/diplomacy | 83f7e706f8b0dfb3d074c6618acd34abb2d3a22c | [
"MIT"
] | null | null | null | # frozen_string_literal: true
module Diplomacy
module Order
class Support # :nodoc:
include Anima.new :unit, :at, :target
def to_s
@to_s ||= "#{unit.to_s[0].upcase} #{at} S #{target}"
end
end
end
end
| 17.142857 | 60 | 0.591667 |
0272a689599088425b398c8538e4aa7b6b521ec4 | 356 | h | C | src/Nodes/Trigger/ResetTrigger.h | potrepka/DSP | 0ffe314196efcd016cdb4ffff27ada0f326e50c3 | [
"MIT"
] | 1 | 2020-11-17T20:29:45.000Z | 2020-11-17T20:29:45.000Z | src/Nodes/Trigger/ResetTrigger.h | nspotrepka/DSP | 0ffe314196efcd016cdb4ffff27ada0f326e50c3 | [
"MIT"
] | null | null | null | src/Nodes/Trigger/ResetTrigger.h | nspotrepka/DSP | 0ffe314196efcd016cdb4ffff27ada0f326e50c3 | [
"MIT"
] | null | null | null | #pragma once
#include "../Core/Producer.h"
namespace dsp {
class ResetTrigger : public Producer {
public:
ResetTrigger();
void reset();
void reset(size_t channel);
protected:
void setNumOutputChannelsNoLock(size_t numChannels) override;
void processNoLock() override;
private:
std::vector<int> state;
};
} // namespace dsp
| 14.833333 | 65 | 0.69382 |
d5b18a91683c505919f3d1872da295eed7313986 | 575 | c | C | latex/src/av7/z3.c | kikojumkd/SP | 217cf66bdce28342b211198bac28d4571311265c | [
"MIT"
] | 2 | 2019-05-23T20:41:05.000Z | 2019-05-24T13:51:28.000Z | latex/src/av7/z3.c | kikojumkd/SP | 217cf66bdce28342b211198bac28d4571311265c | [
"MIT"
] | null | null | null | latex/src/av7/z3.c | kikojumkd/SP | 217cf66bdce28342b211198bac28d4571311265c | [
"MIT"
] | null | null | null | #include <stdio.h>
int e_prost(int n) {
int i;
if (n < 4)
return 1;
else if ((n % 2) == 0)
return 0;
else {
i = 3;
while (i * i <= n) {
if (n % i == 0)
return 0;
i += 2;
}
}
return 1;
}
int zbir_cifri(int n) {
int zbir = 0;
while (n > 0) {
zbir += (n % 10);
n /= 10;
}
return zbir;
}
int main() {
int br = 0, i;
for (i = 2; i <= 9999; i++) {
if (e_prost(i) && e_prost(zbir_cifri(i))) {
printf("Brojot %d go zadovoluva uslovot\n", i);
br++;
}
}
printf("Pronajdeni se %d broevi koi go zadovoluvaat uslovot\n", br);
return 0;
}
| 15.131579 | 69 | 0.509565 |
e4764ea7f2588d515a7fe06765fdfbf6dc4885a5 | 901 | swift | Swift | PDFToKeynote/Libraries/ModernUIKit/Extensions/Foundation/CGSize+Math.swift | LumingYin/PDFToKeynote-iOS | d342a5380d722c961429211557bc299a5f58e74e | [
"MIT"
] | 8 | 2019-06-02T14:53:15.000Z | 2022-03-11T01:08:31.000Z | PDFToKeynote/Libraries/ModernUIKit/Extensions/Foundation/CGSize+Math.swift | LumingYin/PDFToKeynote-iOS | d342a5380d722c961429211557bc299a5f58e74e | [
"MIT"
] | 1 | 2020-06-01T09:36:49.000Z | 2020-06-01T09:36:49.000Z | PDFToKeynote/Libraries/ModernUIKit/Extensions/Foundation/CGSize+Math.swift | LumingYin/PDFToKeynote-iOS | d342a5380d722c961429211557bc299a5f58e74e | [
"MIT"
] | 4 | 2019-07-18T23:00:02.000Z | 2022-02-10T10:36:42.000Z | //
// CGSize+Math.swift
// ModernUIKit
//
// Created by Cliff Panos on 1/7/19.
// Copyright © 2019 Clifford Panos. All rights reserved.
//
import CoreGraphics
extension CGSize {
///
/// Whether or not the size and the receiver have matching dimensions regardless of orientation
/// Returns two if the two sizes are equal or if self.width == size.height && self.height == size.width
///
public func isCongruent(to size: CGSize) -> Bool {
return (self == size) || (self.width == size.height && self.height == size.width)
}
}
// MARK: - Hashable
extension CGSize: Hashable {
public var hashValue: Int {
var hasher = Hasher()
self.hash(into: &hasher)
return hasher.finalize()
}
public func hash(into hasher: inout Hasher) {
hasher.combine(self.width)
hasher.combine(self.height)
}
}
| 22.525 | 107 | 0.617092 |
a5548b951899328459e4744cab1e1b55ad8090c7 | 9,768 | sql | SQL | db/sistem_siperma.sql | Firofa/siperma | 294743c9842b8efaa2399817929cd7bed93e5f45 | [
"MIT"
] | 1 | 2021-11-23T06:40:00.000Z | 2021-11-23T06:40:00.000Z | db/sistem_siperma.sql | Firofa/siperma | 294743c9842b8efaa2399817929cd7bed93e5f45 | [
"MIT"
] | null | null | null | db/sistem_siperma.sql | Firofa/siperma | 294743c9842b8efaa2399817929cd7bed93e5f45 | [
"MIT"
] | null | null | null | -- phpMyAdmin SQL Dump
-- version 5.0.3
-- https://www.phpmyadmin.net/
--
-- Host: 127.0.0.1
-- Generation Time: Nov 17, 2020 at 08:28 AM
-- Server version: 10.4.14-MariaDB
-- PHP Version: 7.4.11
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
START TRANSACTION;
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
--
-- Database: `sistem_siperma`
--
-- --------------------------------------------------------
--
-- Table structure for table `barang_masuk`
--
CREATE TABLE `barang_masuk` (
`id_barang_masuk` int(255) NOT NULL,
`nama_barang_masuk` varchar(255) NOT NULL,
`jumlah_barang_masuk` int(255) NOT NULL,
`harga_satuan_barang` int(255) NOT NULL,
`total_harga` int(255) NOT NULL,
`nota_barang_masuk` varchar(255) NOT NULL,
`created_at` int(11) NOT NULL,
`updated_at` int(11) DEFAULT NULL,
`is_deleted` enum('Yes','No') NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
--
-- Dumping data for table `barang_masuk`
--
INSERT INTO `barang_masuk` (`id_barang_masuk`, `nama_barang_masuk`, `jumlah_barang_masuk`, `harga_satuan_barang`, `total_harga`, `nota_barang_masuk`, `created_at`, `updated_at`, `is_deleted`) VALUES
(3, 'Alat Tulis Kantor', 10, 60000, 500000, '10 ATK', 1605191269, 1605594745, 'No'),
(4, 'Laptop', 2, 3000000, 15000000, '5 Laptop', 1605191329, 1605191329, 'Yes'),
(5, 'Mesin Fotokopi', 12, 2000000, 24000000, 'Pembelian 12 Mesin Fotokopi', 1605595352, 1605595352, 'No'),
(6, 'Penghapus', 10, 500, 5000, 'Beli 10 Penghapus', 1605595456, 1605595456, 'No');
-- --------------------------------------------------------
--
-- Table structure for table `level_access`
--
CREATE TABLE `level_access` (
`id_level_access` int(255) NOT NULL,
`access_level` varchar(255) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
--
-- Dumping data for table `level_access`
--
INSERT INTO `level_access` (`id_level_access`, `access_level`) VALUES
(1, 'Admin PJ'),
(2, 'Admin Barang'),
(3, 'Pegawai');
-- --------------------------------------------------------
--
-- Table structure for table `permintaan_barang`
--
CREATE TABLE `permintaan_barang` (
`id_permintaan_barang` int(255) NOT NULL,
`user_id` int(255) NOT NULL,
`barang_id` int(255) NOT NULL,
`periode_permintaan` varchar(128) NOT NULL,
`jumlah_permintaan` int(11) NOT NULL,
`created_at` int(11) NOT NULL,
`status_permintaan` enum('Disetujui','Ditolak','Pending','Barang Tidak Ada') NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
--
-- Dumping data for table `permintaan_barang`
--
INSERT INTO `permintaan_barang` (`id_permintaan_barang`, `user_id`, `barang_id`, `periode_permintaan`, `jumlah_permintaan`, `created_at`, `status_permintaan`) VALUES
(1, 6, 3, '2010-12-12', 1, 1605403526, 'Disetujui'),
(2, 6, 4, '2010-12-12', 2, 1605403526, 'Barang Tidak Ada'),
(3, 6, 4, '2020-12-15', 2, 1605403936, 'Barang Tidak Ada'),
(4, 6, 3, '2020-12-15', 5, 1605403936, 'Ditolak'),
(5, 6, 3, '2020-10-14', 12, 1605404079, 'Disetujui'),
(6, 6, 4, '2020-10-08', 1, 1605404437, 'Disetujui'),
(7, 6, 4, '2020-11-11', 12, 1605404944, 'Ditolak'),
(9, 6, 4, '2020-11-10', 6, 1605434925, 'Barang Tidak Ada');
-- --------------------------------------------------------
--
-- Table structure for table `ruangan`
--
CREATE TABLE `ruangan` (
`id_ruangan` int(255) NOT NULL,
`ruangan` varchar(255) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
--
-- Dumping data for table `ruangan`
--
INSERT INTO `ruangan` (`id_ruangan`, `ruangan`) VALUES
(1, 'Ruangan A'),
(2, 'Ruangan B');
-- --------------------------------------------------------
--
-- Table structure for table `users`
--
CREATE TABLE `users` (
`id_users` int(255) NOT NULL,
`name` varchar(255) NOT NULL,
`password` varchar(255) NOT NULL,
`level_access_id` int(255) NOT NULL,
`username` varchar(255) NOT NULL,
`tahun` int(11) NOT NULL,
`work_unit_id` int(255) NOT NULL,
`nip` varchar(255) NOT NULL,
`jabatan` varchar(255) NOT NULL,
`ruangan_id` int(255) NOT NULL,
`pangkat` varchar(255) NOT NULL,
`is_active` int(11) NOT NULL,
`created_at` int(11) NOT NULL,
`updated_at` int(11) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
--
-- Dumping data for table `users`
--
INSERT INTO `users` (`id_users`, `name`, `password`, `level_access_id`, `username`, `tahun`, `work_unit_id`, `nip`, `jabatan`, `ruangan_id`, `pangkat`, `is_active`, `created_at`, `updated_at`) VALUES
(3, 'Super Admin', '$2y$10$tcn4gcC5IbLx6yPYbG8fMOvJm7n9CgdckBVbR0gBDDXl8JgID3v5y', 1, 'superadmin123', 2020, 1, '123456789', 'Super Admin', 1, 'Super Admin', 1, 1604766884, 1605595657),
(4, 'admin suradmin', '$2y$10$trhDRJtGgO6Ujjxp8VcYP.Vk.8iJ44vBbns9Qg92mUpmnsLw9jk3W', 2, 'admin123456', 2020, 1, '123654123', 'Admin Barang', 1, 'Eselon', 1, 1604767481, 1605452064),
(6, 'Asep Sutarman', '$2y$10$HDhMPkXD/cUAMcw5Zg.mgufMywaLdqm8VI5RIb4URT0KQWbbXYNOK', 3, 'pengguna123', 2020, 1, '10904032', 'Pegawai', 1, 'Eselon 1', 1, 1605150941, 1605595902),
(8, 'Entis Sutisna', '$2y$10$VeTwRTEYp86R3kVL/9Kco.UMYYPTmb5blTRJJJMDFRLKcQPCw0W6W', 3, 'karyawan123', 2020, 2, '10104012', 'Staff', 2, 'Karyawan', 1, 1605156707, 0),
(10, 'Asep Sutarmana', '$2y$10$4RMGUAsFYyMWUjptvMpXEeBoRDDGhMVH2SXZk1QVvEWdE3yR9B70m', 2, 'pegawai456', 2020, 1, '10904032', 'Kepala Bagian', 1, 'Karyawan', 1, 1605592061, 0);
-- --------------------------------------------------------
--
-- Table structure for table `work_unit`
--
CREATE TABLE `work_unit` (
`id_work_unit` int(255) NOT NULL,
`work_unit` varchar(255) NOT NULL,
`kode_satker` varchar(255) NOT NULL,
`alamat` varchar(255) NOT NULL,
`no_telp` varchar(255) NOT NULL,
`ketua` varchar(255) NOT NULL,
`wakil_ketua` varchar(255) NOT NULL,
`sekretaris` varchar(255) NOT NULL,
`pj_barang_persediaan` varchar(255) NOT NULL,
`logo_kantor` varchar(255) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
--
-- Dumping data for table `work_unit`
--
INSERT INTO `work_unit` (`id_work_unit`, `work_unit`, `kode_satker`, `alamat`, `no_telp`, `ketua`, `wakil_ketua`, `sekretaris`, `pj_barang_persediaan`, `logo_kantor`) VALUES
(1, 'SDM', '001', 'Kampung Durian Runtuh', '0812313325', 'Dadang Sunandar', 'Asep Alliando', 'Riri Suririni', 'Roku Kutisna', 'abc4.jpg'),
(2, 'Humas', '002', 'Desa Ombak Laut', '08112343647', 'Karim Sukarim', 'Suparman Batiman', 'Entis Sutisna', 'Parto Suparto', 'logo.jpg'),
(3, 'KEMA', '003', 'Surabaya', '083424332', 'Entis', 'Susi', 'Alfin', 'Syuaib', 'logo.jpg'),
(4, 'SEKPER', '004', 'Bandung', '082128835432', 'Bayu', 'Billy', 'Rosa', 'Kasino', 'abc1.jpg');
--
-- Indexes for dumped tables
--
--
-- Indexes for table `barang_masuk`
--
ALTER TABLE `barang_masuk`
ADD PRIMARY KEY (`id_barang_masuk`);
--
-- Indexes for table `level_access`
--
ALTER TABLE `level_access`
ADD PRIMARY KEY (`id_level_access`);
--
-- Indexes for table `permintaan_barang`
--
ALTER TABLE `permintaan_barang`
ADD PRIMARY KEY (`id_permintaan_barang`),
ADD KEY `permintaan_barang_user_id-Users_id_users` (`user_id`),
ADD KEY `permintaan_barang_barang_id-barang_masuk_id_barang_masuk` (`barang_id`);
--
-- Indexes for table `ruangan`
--
ALTER TABLE `ruangan`
ADD PRIMARY KEY (`id_ruangan`);
--
-- Indexes for table `users`
--
ALTER TABLE `users`
ADD PRIMARY KEY (`id_users`),
ADD UNIQUE KEY `username` (`username`),
ADD KEY `Users_id_level_access-level_access_id_level_access` (`level_access_id`),
ADD KEY `Users_work_unit_id-work_unit_id_work_unit` (`work_unit_id`),
ADD KEY `Users_id_ruangan-ruangan_id_ruangan` (`ruangan_id`);
--
-- Indexes for table `work_unit`
--
ALTER TABLE `work_unit`
ADD PRIMARY KEY (`id_work_unit`);
--
-- AUTO_INCREMENT for dumped tables
--
--
-- AUTO_INCREMENT for table `barang_masuk`
--
ALTER TABLE `barang_masuk`
MODIFY `id_barang_masuk` int(255) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=7;
--
-- AUTO_INCREMENT for table `level_access`
--
ALTER TABLE `level_access`
MODIFY `id_level_access` int(255) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=5;
--
-- AUTO_INCREMENT for table `permintaan_barang`
--
ALTER TABLE `permintaan_barang`
MODIFY `id_permintaan_barang` int(255) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=10;
--
-- AUTO_INCREMENT for table `ruangan`
--
ALTER TABLE `ruangan`
MODIFY `id_ruangan` int(255) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=5;
--
-- AUTO_INCREMENT for table `users`
--
ALTER TABLE `users`
MODIFY `id_users` int(255) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=11;
--
-- AUTO_INCREMENT for table `work_unit`
--
ALTER TABLE `work_unit`
MODIFY `id_work_unit` int(255) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=5;
--
-- Constraints for dumped tables
--
--
-- Constraints for table `permintaan_barang`
--
ALTER TABLE `permintaan_barang`
ADD CONSTRAINT `permintaan_barang_barang_id-barang_masuk_id_barang_masuk` FOREIGN KEY (`barang_id`) REFERENCES `barang_masuk` (`id_barang_masuk`),
ADD CONSTRAINT `permintaan_barang_user_id-Users_id_users` FOREIGN KEY (`user_id`) REFERENCES `users` (`id_users`);
--
-- Constraints for table `users`
--
ALTER TABLE `users`
ADD CONSTRAINT `Users_id_level_access-level_access_id_level_access` FOREIGN KEY (`level_access_id`) REFERENCES `level_access` (`id_level_access`),
ADD CONSTRAINT `Users_id_ruangan-ruangan_id_ruangan` FOREIGN KEY (`ruangan_id`) REFERENCES `ruangan` (`id_ruangan`),
ADD CONSTRAINT `Users_work_unit_id-work_unit_id_work_unit` FOREIGN KEY (`work_unit_id`) REFERENCES `work_unit` (`id_work_unit`);
COMMIT;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
| 33.337884 | 199 | 0.687858 |
22a5c17346a17b65dd5db8525464e1f498f84f93 | 23 | html | HTML | src/app/modules/courses/components/comments/comments.component.html | youssefdridi95/aloui | 9c9274a9fb75d38b2510f451749a40df69a45341 | [
"MIT"
] | 1 | 2020-11-07T05:04:07.000Z | 2020-11-07T05:04:07.000Z | src/app/modules/courses/components/comments/comments.component.html | youssefdridi95/aloui | 9c9274a9fb75d38b2510f451749a40df69a45341 | [
"MIT"
] | 10 | 2020-07-26T08:16:34.000Z | 2020-07-26T08:26:01.000Z | src/app/modules/courses/components/comments/comments.component.html | youssefdridi95/aloui | 9c9274a9fb75d38b2510f451749a40df69a45341 | [
"MIT"
] | 2 | 2020-10-18T17:54:19.000Z | 2020-10-18T17:54:48.000Z | <p>comments works!</p>
| 11.5 | 22 | 0.652174 |
b2feb55d6f844492c6231b317cce3362c8ea498f | 69 | py | Python | Bronze/Bronze_V/17496.py | masterTyper/baekjoon_solved_ac | b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c | [
"MIT"
] | null | null | null | Bronze/Bronze_V/17496.py | masterTyper/baekjoon_solved_ac | b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c | [
"MIT"
] | null | null | null | Bronze/Bronze_V/17496.py | masterTyper/baekjoon_solved_ac | b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c | [
"MIT"
] | null | null | null | N, T, C, P = map(int, input().split())
print(((N - 1) // T) * C * P) | 23 | 38 | 0.434783 |
4a4bc248e97d268720af8ab30e49f4ce7289297b | 4,073 | js | JavaScript | src/client/src/containers/Game/FinishPage.js | Ektoplasme/Red-Tetris | 3fdb78564caddf0703bebb17747b79b3314dead7 | [
"MIT"
] | null | null | null | src/client/src/containers/Game/FinishPage.js | Ektoplasme/Red-Tetris | 3fdb78564caddf0703bebb17747b79b3314dead7 | [
"MIT"
] | null | null | null | src/client/src/containers/Game/FinishPage.js | Ektoplasme/Red-Tetris | 3fdb78564caddf0703bebb17747b79b3314dead7 | [
"MIT"
] | null | null | null | import React from 'react'
import { withStyles } from '@material-ui/styles'
import {GameStyle} from '../../styles/Game-style.js'
import {Block, colorTab} from '../../components/Block.js'
export const FinishComponent = ({classes, chat, chatInput, setChatInput, level, score, rows, resetGame, gameState, solo, winHeight, returnLobby}) => {
var shadowBlockSize = Math.trunc(winHeight / 80)
return (
<div className='flex column center alignCenter' style={{height: '100hw'}}>
<div className={`flex column center alignCenter ${classes.gameOverContainer}`}>
<div className={classes.finishGameTitle}>Game finished !</div>
<div className={`flex column center alignCenter ${classes.scoreContainer}`}>
<div className={classes.finishGameInfo}>Your Score: {score === 0 ? '-' : score}</div>
<div className={classes.finishGameInfo}>Level: {level}</div>
<div className={classes.finishGameInfo}>Rows: {rows === 0 ? '-' : rows}</div>
</div>
{gameState.winScore || solo
? null
: <div>
<div className={classes.finishGameTitle}>still in game:</div>
<div className={'flex row center'} style={{marginTop: '10px'}}>
{gameState.playTab && gameState.playTab.map((player, index)=>{
if (player && player.id !== gameState.playerId) return <div key={index} className={`relative`} style={{padding: '2px'}}>
<div className={classes.finishGameInfo}>{player.username}</div>
{player.shadow && player.shadow.map((line, index)=>{
return <div style={{display: 'flex'}} key={index}>
{
line.map((col, index) => {
return <div key={index}>
{col > 0
? <Block blockSize={shadowBlockSize} color={colorTab[col - 1]}/>
: <Block blockSize={shadowBlockSize} empty/>
}
</div>
})
}
</div>
})
}
{player.playing
? null
: <div
className={'absolute flex center alignCenter'}
style={player.win ? {top: '32px', width: (shadowBlockSize + 1) * 10, height: (shadowBlockSize + 1) * 20, backgroundColor: 'rgba(137, 226, 40, 0.57)'} : {top: '32px', width: (shadowBlockSize + 1) * 10, height: (shadowBlockSize + 1) * 20, backgroundColor: 'rgba(235, 26, 26, 0.48)'}}
>
{player.win
? <p>Win</p>
: <p>Over</p>
}
</div>
}
</div>
else return null
})}
</div>
</div>
}
{solo
? <div className={`flex column center alignCenter ${classes.restartButton}`}>
<div className={classes.restartLabel} onClick={resetGame}>
RESTART
</div>
</div>
: null
}
{gameState.winScore
? <div>
<div className={classes.winLabel}>
{gameState.winScore.id === gameState.playerId ? 'You' : gameState.winScore.winner} win !
</div>
{gameState.winScore.id === gameState.playerId
? null
: <div>Score: {gameState.winScore.score}</div>
}
</div>
: null
}
{gameState.endOfGame
? gameState.isHost
? <div className={classes.restartLabel} onClick={returnLobby}>
RETURN LOBBY
</div>
: <div className={classes.finishGameRestart}>
Waiting for host to restart...
</div>
: null
}
{solo
? null
: <div style={{width: '100%', marginTop: '10px'}}>
<p className={classes.chatLabel}>
Chat ↴
</p>
<input
id='chatInput'
className={`fullWidth ${classes.input}`}
style={{width: '50%', borderColor: 'white', backgroundColor: 'pink'}}
value={chatInput}
onKeyDown={(e)=>{
if (e.keyCode === 13){
if (chatInput.length > 0){
chat(chatInput)
setChatInput('')
}
}
}}
onChange={(e)=>{setChatInput(e.target.value)}}
/>
</div>
}
</div>
</div>
)
}
export default withStyles(GameStyle)(FinishComponent) | 35.112069 | 295 | 0.554137 |
652b20126adf656e7c0e377add67d9a07ddfd0b2 | 1,175 | swift | Swift | RxLocation/Services/Location/LocationServiceProtocol.swift | vadimue/RxLocation | 8053d9eabe0d5c2953393c83dc2fdd21f786e4c0 | [
"MIT"
] | 1 | 2018-07-03T14:49:21.000Z | 2018-07-03T14:49:21.000Z | RxLocation/Services/Location/LocationServiceProtocol.swift | vadimue/RxLocation | 8053d9eabe0d5c2953393c83dc2fdd21f786e4c0 | [
"MIT"
] | null | null | null | RxLocation/Services/Location/LocationServiceProtocol.swift | vadimue/RxLocation | 8053d9eabe0d5c2953393c83dc2fdd21f786e4c0 | [
"MIT"
] | null | null | null | import Foundation
import CoreLocation
import RxSwift
protocol LocationServiceProtocol {
typealias LocationManagerConfigurator = (CLLocationManager) -> Void
func location(_ managerFactory: LocationManagerConfigurator?) -> Observable<CLLocation>
func singleLocation(_ managerFactory: LocationManagerConfigurator?) -> Observable<CLLocation>
func permissionRequest(targetLevel: LocationAuthorizationLevel,
_ managerFactory: LocationManagerConfigurator?)
-> Observable<LocationAuthorizationLevel>
}
extension LocationServiceProtocol {
public func location(_ managerFactory: LocationManagerConfigurator? = nil) -> Observable<CLLocation> {
return location(managerFactory)
}
public func singleLocation(_ managerFactory: LocationManagerConfigurator? = nil) -> Observable<CLLocation> {
return singleLocation(managerFactory)
}
public func permissionRequest(targetLevel: LocationAuthorizationLevel,
_ managerFactory: LocationManagerConfigurator? = nil)
-> Observable<LocationAuthorizationLevel> {
return permissionRequest(targetLevel: targetLevel, managerFactory)
}
}
| 35.606061 | 110 | 0.761702 |
b99c3d5c16f244bdab4a73b9bb62a0836a880e29 | 287 | c | C | estudando_c/creating_header/my_head_main.c | rodrigowe1988/basecamp_42 | 6861755ea7e035a6156129ca7c16c6006b51f172 | [
"MIT"
] | null | null | null | estudando_c/creating_header/my_head_main.c | rodrigowe1988/basecamp_42 | 6861755ea7e035a6156129ca7c16c6006b51f172 | [
"MIT"
] | null | null | null | estudando_c/creating_header/my_head_main.c | rodrigowe1988/basecamp_42 | 6861755ea7e035a6156129ca7c16c6006b51f172 | [
"MIT"
] | null | null | null | #include <stdio.h>
#include "my_head.h"
int main()
{
add(4, 6);
/*This calls add function written in my_head.h
and therefore no compilation error.*/
multiply(5, 5);
// Same for the multiply function in my_head.h
printf("BYE!See you Soon");
return 0;
} | 20.5 | 52 | 0.623693 |
9ad5c07eb1447c1a9182b541856fc79f00835cec | 913 | css | CSS | data/usercss/63221.user.css | 33kk/uso-archive | 2c4962d1d507ff0eaec6dcca555efc531b37a9b4 | [
"MIT"
] | 118 | 2020-08-28T19:59:28.000Z | 2022-03-26T16:28:40.000Z | data/usercss/63221.user.css | 33kk/uso-archive | 2c4962d1d507ff0eaec6dcca555efc531b37a9b4 | [
"MIT"
] | 38 | 2020-09-02T01:08:45.000Z | 2022-01-23T02:47:24.000Z | data/usercss/63221.user.css | 33kk/uso-archive | 2c4962d1d507ff0eaec6dcca555efc531b37a9b4 | [
"MIT"
] | 21 | 2020-08-19T01:12:43.000Z | 2022-03-15T21:55:17.000Z | /* ==UserStyle==
@name S4L-Alaplaya Korea-BG
@namespace USO Archive
@author Beffel
@description `If you hate the new s4-league-season3-blade-background at alaplaya.net, than use this to replace it with the korean style`
@version 20120328.8.6
@license NO-REDISTRIBUTION
@preprocessor uso
==/UserStyle== */
@namespace url(http://www.w3.org/1999/xhtml);
@-moz-document domain("s4.de.alaplaya.net"), domain("s4.en.alaplaya.net"), domain("s4.fr.alaplaya.net"), domain("s4.es.alaplaya.net"), domain("s4.it.alaplaya.net"), domain("s4.pt.alaplaya.net"), domain("s4.tr.alaplaya.net"), domain("s4.nl.alaplaya.net"), domain("s4.pl.alaplaya.net"), domain("s4.ru.alaplaya.net") {
#background { background: url("http://s1.directupload.net/images/120328/uc4lhgvd.png" ) !important; no-repeat !important; background-position: center 20px !important; height:1033px !important; }
} | 60.866667 | 316 | 0.700986 |
750bfac9e04d0e74faa461d354a530350249e8f7 | 380 | h | C | casa-oop-polymorphism/BaseViewController.h | objbee/casa-oop-polymorphism | 7c5f8aa1e0e58d55c060949274fda1ee3204a7d0 | [
"MIT"
] | null | null | null | casa-oop-polymorphism/BaseViewController.h | objbee/casa-oop-polymorphism | 7c5f8aa1e0e58d55c060949274fda1ee3204a7d0 | [
"MIT"
] | null | null | null | casa-oop-polymorphism/BaseViewController.h | objbee/casa-oop-polymorphism | 7c5f8aa1e0e58d55c060949274fda1ee3204a7d0 | [
"MIT"
] | null | null | null | //
// BaseViewController.h
// casa-oop-polymorphism
//
// Created by yuanye on 2017/7/11.
// Copyright © 2017年 yuanye. All rights reserved.
//
#import <UIKit/UIKit.h>
@protocol BaseViewControllerProtocol <NSObject>
@optional
- (void)log;
@end
@interface BaseViewController : UIViewController
// 子类方法重写父类方法时必须使用 super 来调用父类的对应方法
- (void)mustLog NS_REQUIRES_SUPER;
@end
| 15.2 | 50 | 0.736842 |
71b82fe2e6b24844c1446ea48860dacffa6b7e69 | 157 | ts | TypeScript | dist/users/dtos/update-user.dto.d.ts | xitao-moura/absoluta-api | a84886d81d8ac3e8e149d1e350c91bb90397cb0e | [
"MIT"
] | null | null | null | dist/users/dtos/update-user.dto.d.ts | xitao-moura/absoluta-api | a84886d81d8ac3e8e149d1e350c91bb90397cb0e | [
"MIT"
] | null | null | null | dist/users/dtos/update-user.dto.d.ts | xitao-moura/absoluta-api | a84886d81d8ac3e8e149d1e350c91bb90397cb0e | [
"MIT"
] | null | null | null | export declare class UpdateUserDto {
nome: string;
email: string;
password: string;
tipo: string;
status: string;
updatedAt: Date;
}
| 17.444444 | 36 | 0.643312 |
d146f5aab7b7a11dc3c28e6060f4bedfeba1db1c | 19,774 | sql | SQL | java/org.apache.derby.tests/org/apache/derbyTesting/functionTests/tests/lang/union.sql | addstone/derby | 4253dcf4aa37dc64cf7235d494cd2f00f72e678a | [
"Apache-2.0"
] | 282 | 2015-01-06T02:30:11.000Z | 2022-03-23T06:40:17.000Z | java/org.apache.derby.tests/org/apache/derbyTesting/functionTests/tests/lang/union.sql | 7k8m/derby | 4253dcf4aa37dc64cf7235d494cd2f00f72e678a | [
"Apache-2.0"
] | null | null | null | java/org.apache.derby.tests/org/apache/derbyTesting/functionTests/tests/lang/union.sql | 7k8m/derby | 4253dcf4aa37dc64cf7235d494cd2f00f72e678a | [
"Apache-2.0"
] | 163 | 2015-01-07T00:07:53.000Z | 2022-03-07T08:35:03.000Z | --
-- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements. See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the "License"); you may not use this file except in compliance with
-- the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
--
-- this test shows union functionality
--
-- create the tables
create table t1 (i int, s smallint, d double precision, r real, c10 char(10),
c30 char(30), vc10 varchar(10), vc30 varchar(30));
create table t2 (i int, s smallint, d double precision, r real, c10 char(10),
c30 char(30), vc10 varchar(10), vc30 varchar(30));
create table dups (i int, s smallint, d double precision, r real, c10 char(10),
c30 char(30), vc10 varchar(10), vc30 varchar(30));
-- populate the tables
insert into t1 values (null, null, null, null, null, null, null, null);
insert into t1 values (1, 1, 1e1, 1e1, '11111', '11111 11', '11111',
'11111 11');
insert into t1 values (2, 2, 2e1, 2e1, '22222', '22222 22', '22222',
'22222 22');
insert into t2 values (null, null, null, null, null, null, null, null);
insert into t2 values (3, 3, 3e1, 3e1, '33333', '33333 33', '33333',
'33333 33');
insert into t2 values (4, 4, 4e1, 4e1, '44444', '44444 44', '44444',
'44444 44');
insert into dups select * from t1 union all select * from t2;
-- simple cases
values (1, 2, 3, 4) union values (5, 6, 7, 8);
values (1, 2, 3, 4) union values (1, 2, 3, 4);
values (1, 2, 3, 4) union distinct values (5, 6, 7, 8);
values (1, 2, 3, 4) union distinct values (1, 2, 3, 4);
values (1, 2, 3, 4) union values (5, 6, 7, 8) union values (9, 10, 11, 12);
values (1, 2, 3, 4) union values (1, 2, 3, 4) union values (1, 2, 3, 4);
select * from t1 union select * from t2;
select * from t1 union select * from t1;
select * from t1 union select * from t2 union select * from dups;
select * from t1 union select i, s, d, r, c10, c30, vc10, vc30 from t2;
select * from t1 union select i, s, d, r, c10, c30, vc10, vc30 from t2
union select * from dups;
-- derived tables
select * from (values (1, 2, 3, 4) union values (5, 6, 7, 8)) a;
select * from (values (1, 2, 3, 4) union values (5, 6, 7, 8) union
values (1, 2, 3, 4)) a;
-- mix unions and union alls
select i from t1 union select i from t2 union all select i from dups;
(select i from t1 union select i from t2) union all select i from dups;
select i from t1 union (select i from t2 union all select i from dups);
select i from t1 union all select i from t2 union select i from dups;
(select i from t1 union all select i from t2) union select i from dups;
select i from t1 union all (select i from t2 union select i from dups);
-- joins
select a.i, b.i from t1 a, t2 b union select b.i, a.i from t1 a, t2 b;
values (9, 10) union
select a.i, b.i from t1 a, t2 b union select b.i, a.i from t1 a, t2 b;
select a.i, b.i from t1 a, t2 b union
select b.i, a.i from t1 a, t2 b union values (9, 10);
-- non-correlated subqueries
-- positive tests
select i from t1 where i = (values 1 union values 1);
select i from t1 where i = (values 1 union values 1 union values 1);
-- expression subquery
select i from t1 where i = (select 1 from t2 union values 1);
-- in subquery
select i from t1 where i in (select i from t2 union values 1 union values 2);
select i from t1 where i in
(select a from (select i from t2 union values 1 union values 2) a (a));
-- not in subquery
select i from t1 where i not in (select i from t2 union values 1 union values 2);
select i from t1 where i not in (select i from t2 where i is not null union
values 1 union values 22);
select i from t1 where i not in
(select a from (select i from t2 where i is not null union
values 111 union values 2) a (a));
-- correlated union subquery
select i from t1 a where i in (select i from t2 where 1 = 0 union
select a.i from t2 where a.i < i);
select i from t1 a where i in (select a.i from t2 where a.i < i union
select i from t2 where 1 < 0);
-- exists subquery
select i from t1 where exists (select * from t2 union select * from t2);
select i from t1 where exists (select 1 from t2 union select 2 from t2);
select i from t1 where exists (select 1 from t2 where 1 = 0 union
select 2 from t2 where t1.i < i);
select i from t1 where exists (select i from t2 where t1.i < i union
select i from t2 where 1 = 0 union
select i from t2 where t1.i < i union
select i from t2 where 1 = 0);
-- These next two should fail because left/right children do not have
-- the same number of result columns.
select i from t1 where exists (select 1 from t2 where 1 = 0 union
select * from t2 where t1.i < i);
select i from t1 where exists (select i from t2 where t1.i < i union
select * from t2 where 1 = 0 union
select * from t2 where t1.i < i union
select i from t2 where 1 = 0);
-- order by tests
select i from t1 union select i from dups order by i desc;
select i, s from t1 union select s as i, 1 as s from dups order by s desc, i;
-- insert tests
create table insert_test (i int, s smallint, d double precision, r real,
c10 char(10), c30 char(30), vc10 varchar(10), vc30 varchar(30));
-- simple tests
insert into insert_test select * from t1 union select * from dups;
select * from insert_test;
delete from insert_test;
insert into insert_test (s, i) values (2, 1) union values (4, 3);
select * from insert_test;
delete from insert_test;
-- test type dominance/length/nullability
insert into insert_test (vc30) select vc10 from t1 union select c30 from t2;
select * from insert_test;
delete from insert_test;
insert into insert_test (c30)
select vc10 from t1
union
select c30 from t2
union
select c10 from t1;
select * from insert_test;
delete from insert_test;
-- test NormalizeResultSet generation
select i, d from t1 union select d, i from t2;
select vc10, c30 from t1 union select c30, vc10 from t2;
create table insert_test2 (s smallint not null, vc30 varchar(30) not null);
-- the following should fail due to null constraint
insert into insert_test2 select s, c10 from t1 union select s, c30 from t2;
select * from insert_test2;
-- negative tests
-- ? in select list of union
select ? from insert_test union select vc30 from insert_test;
select vc30 from insert_test union select ? from insert_test;
-- DB2 requires matching target and result column for insert
insert into insert_test values (1, 2) union values (3, 4);
-- try some unions of different types.
-- types should be ok if comparable.
values (1) union values (1.1);
values (1) union values (1.1e1);
values (1.1) union values (1);
values (1.1e1) union values (1);
-- negative cases
values (x'aa') union values (1);
-- drop the tables
drop table t1;
drop table t2;
drop table dups;
drop table insert_test;
drop table insert_test2;
--
-- this test shows the current supported union all functionality
--
-- RESOLVE - whats not tested
-- type compatability
-- nullability of result
-- type dominance
-- correlated subqueries
-- table constructors
-- create the tables
create table t1 (i int, s smallint, d double precision, r real, c10 char(10),
c30 char(30), vc10 varchar(10), vc30 varchar(30));
create table t2 (i int, s smallint, d double precision, r real, c10 char(10),
c30 char(30), vc10 varchar(10), vc30 varchar(30));
-- populate the tables
insert into t1 values (null, null, null, null, null, null, null, null);
insert into t1 values (1, 1, 1e1, 1e1, '11111', '11111 11', '11111',
'11111 11');
insert into t1 values (2, 2, 2e1, 2e1, '22222', '22222 22', '22222',
'22222 22');
insert into t2 values (null, null, null, null, null, null, null, null);
insert into t2 values (3, 3, 3e1, 3e1, '33333', '33333 33', '33333',
'33333 33');
insert into t2 values (4, 4, 4e1, 4e1, '44444', '44444 44', '44444',
'44444 44');
-- negative tests
-- non matching number of columns
select * from t1 union all select * from t1, t2;
select * from t1 union all values (1, 2, 3, 4);
values (1, 2, 3, 4) union all select * from t1;
-- simple cases
values (1, 2, 3, 4) union all values (5, 6, 7, 8);
values (1, 2, 3, 4) union all values (5, 6, 7, 8) union all values (9, 10, 11, 12);
select * from t1 union all select * from t2;
select * from t1 union all select i, s, d, r, c10, c30, vc10, vc30 from t2;
-- derived tables
select * from (values (1, 2, 3, 4) union all values (5, 6, 7, 8)) a;
select * from (values (1, 2, 3, 4) union all values (5, 6, 7, 8)) a (a, b, c, d);
select b, d from (values (1, 2, 3, 4) union all values (5, 6, 7, 8)) a (a, b, c, d);
select * from (select i, s, c10, vc10 from t1 union all select i, s, c10, vc10 from t2) a;
select * from (select i, s, c10, vc10 from t1 union all
select i, s, c10, vc10 from t2) a (j, k, l, m),
(select i, s, c10, vc10 from t1 union all
select i, s, c10, vc10 from t2) b (j, k, l, m)
where a.j = b.j;
-- joins
select a.i, b.i from t1 a, t2 b union all select b.i, a.i from t1 a, t2 b;
values (9, 10) union all
select a.i, b.i from t1 a, t2 b union all select b.i, a.i from t1 a, t2 b;
select a.i, b.i from t1 a, t2 b union all
select b.i, a.i from t1 a, t2 b union all values (9, 10);
-- incompatible types
select date('9999-11-11') from t1 union all select time('11:11:11') from t2;
-- non-correlated subqueries
-- negative tests
-- select * in subquery
select i from t1 where i = (select * from t2 union all select 1 from t1);
select i from t1 where i = (select 1 from t2 union all select * from t1);
-- too many columns
select i from t1 where i = (values (1, 2, 3) union all values (1, 2, 3));
select i from t1 where i = (select i, s from t2 union all select i, s from t1);
-- cardinality violation
select i from t1 where i = (values 1 union all values 1);
-- both sides of union have same type, which is incompatible with LHS
select i from t1 where i in (select date('1999-02-04') from t2 union all select date('1999-03-08') from t2);
-- positive tests
-- expression subquery
select i from t1 where i = (select i from t2 where 1 = 0 union all values 1);
-- in subquery
select i from t1 where i in (select i from t2 union all values 1 union all values 2);
select i from t1 where i in
(select a from (select i from t2 union all values 1 union all values 2) a (a));
-- not in subquery
select i from t1 where i not in (select i from t2 union all values 1 union all values 2);
select i from t1 where i not in (select i from t2 where i is not null union all
values 1 union all values 22);
select i from t1 where i not in
(select a from (select i from t2 where i is not null union all
values 111 union all values 2) a (a));
-- correlated union subquery
select i from t1 a where i in (select i from t2 where 1 = 0 union all
select a.i from t2 where a.i < i);
select i from t1 a where i in (select a.i from t2 where a.i < i union all
select i from t2 where 1 < 0);
-- exists subquery
select i from t1 where exists (select * from t2 union all select * from t2);
select i from t1 where exists (select 1 from t2 union all select 2 from t2);
select i from t1 where exists (select 1 from t2 where 1 = 0 union all
select 2 from t2 where t1.i < i);
select i from t1 where exists (select i from t2 where t1.i < i union all
select i from t2 where 1 = 0 union all
select i from t2 where t1.i < i union all
select i from t2 where 1 = 0);
-- These next two should fail because left/right children do not have
-- the same number of result columns.
select i from t1 where exists (select 1 from t2 where 1 = 0 union all
select * from t2 where t1.i < i);
select i from t1 where exists (select i from t2 where t1.i < i union all
select * from t2 where 1 = 0 union all
select * from t2 where t1.i < i union all
select i from t2 where 1 = 0);
-- insert tests
create table insert_test (i int, s smallint, d double precision, r real, c10 char(10),
c30 char(30), vc10 varchar(10), vc30 varchar(30));
-- simple tests
insert into insert_test select * from t1 union all select * from t2;
select * from insert_test;
delete from insert_test;
insert into insert_test (s, i) values (2, 1) union all values (4, 3);
select * from insert_test;
delete from insert_test;
-- type conversions between union all and target table
insert into insert_test select s, i, r, d, vc10, vc30, c10, c30 from t1 union all
select s, i, r, d, vc10, vc30, c10, vc30 from t2;
select * from insert_test;
delete from insert_test;
-- test type dominance/length/nullability
select vc10 from t1 union all select c30 from t2;
insert into insert_test (vc30) select vc10 from t1 union all select c30 from t2;
select * from insert_test;
delete from insert_test;
insert into insert_test (c30)
select vc10 from t1
union all
select c30 from t2
union all
select c10 from t1;
select * from insert_test;
delete from insert_test;
-- test NormalizeResultSet generation
select i, d from t1 union all select d, i from t2;
select vc10, c30 from t1 union all select c30, vc10 from t2;
create table insert_test2 (s smallint not null, vc30 varchar(30) not null);
-- the following should fail due to null constraint
insert into insert_test2 select s, c10 from t1 union all select s, c30 from t2;
select * from insert_test2;
-- negative tests
-- ? in select list of union
select ? from insert_test union all select vc30 from insert_test;
select vc30 from insert_test union all select ? from insert_test;
-- DB2 requires matching target and result columns
insert into insert_test values (1, 2) union all values (3, 4);
-- Beetle 4454 - test multiple union alls in a subquery
select vc10 from (select vc10 from t1 union all
select vc10 from t1 union all
select vc10 from t1 union all
select vc10 from t1 union all
select vc10 from t1 union all
select vc10 from t1 union all
select vc10 from t1) t;
-- force union all on right side
select vc10 from (select vc10 from t1 union all (select vc10 from t1 union all
select vc10 from t1)) t;
-- drop the tables
drop table t1;
drop table t2;
drop table insert_test;
drop table insert_test2;
-- DERBY-1967
-- NULLIF with UNION throws SQLSTATE 23502.
create table a (f1 varchar(10));
create table b (f2 varchar(10));
insert into b values('test');
-- this used to throw 23502
select nullif('x','x') as f0, f1 from a
union all
select nullif('x','x') as f0, nullif('x','x') as f1 from b;
drop table a;
drop table b;
create table a (f1 int);
create table b (f2 int);
insert into b values(1);
-- ok
select nullif('x','x') as f0, f1 from a
union all
select nullif('x','x') as f0, nullif(1,1) as f1 from b;
drop table a;
drop table b;
-- DERBY-681. Check union with group by/having
create table o (name varchar(20), ord int);
create table a (ord int, amount int);
create view v1 (vx, vy)
as select name, sum(ord) from o where ord > 0 group by name, ord
having ord <= ANY (select ord from a);
select vx, vy from v1
union select vx, sum(vy) from v1 group by vx, vy having (vy / 2) > 15;
drop view v1;
drop table o;
drop table a;
-- DERBY-1852: Incorrect results when a UNION U1 (with no "ALL") appears
-- in the FROM list of a SELECT query, AND there are duplicate rows
-- across the left and/or right result sets of U1, AND U1 is the left or
-- right child of another set operator.
create table t1 (i int, j int);
create table t2 (i int, j int);
insert into t1 values (1, 2), (2, 4), (3, 6), (4, 8), (5, 10);
insert into t2 values (1, 2), (2, -4), (3, 6), (4, -8), (5, 10);
insert into t2 values (3, 6), (4, 8), (3, -6), (4, -8);
-- U1 is left child of another UNION; top-level query.
select * from t1 union select * from t2 union select * from t1;
-- U1 is left child of another UNION; subquery in FROM list.
select * from
(select * from t1 union select * from t2 union select * from t1) x;
-- Same kind of thing, but in the form of a view (which is a
-- more likely use-ccase).
create view uv as
select * from t1 union select * from t2 union select * from t1;
select * from uv;
drop view uv;
-- U1 is left child of a UNION ALL; top-level query.
select * from t1 union select * from t2 union all select * from t1;
-- U1 is left child of a UNION ALL; subquery in FROM list.
select * from
(select * from t1 union select * from t2 union all select * from t1) x;
-- U1 is left child of an EXCEPT; top-level query.
select * from t1 union select * from t2 except select * from t1;
-- U1 is left child of an EXCEPT; subquery in FROM list.
select * from
(select * from t1 union select * from t2 except select * from t1) x;
-- U1 is left child of an EXCEPT ALL; top-level query.
select * from t1 union select * from t2 except all select * from t1;
-- U1 is left child of an EXCEPT ALL; subquery in FROM list.
select * from
(select * from t1 union select * from t2 except all select * from t1) x;
-- U1 is left child of an INTERSECT; top-level query.
-- Note: intersect has higher precedence than union so we have to use
-- quotes to force the UNION to be a child of the intersect.
(select * from t1 union select * from t2) intersect select * from t2;
-- U1 is left child of an INTERSECT; subquery in FROM list.
create view iv as
(select * from t1 union select * from t2) intersect select * from t2;
select * from iv;
drop view iv;
-- U1 is left child of an INTERSECT ALL; top-level query.
(select * from t1 union select * from t2) intersect all select * from t2;
-- U1 is left child of an INTERSECT ALL; subquery in FROM list.
create view iv as
(select * from t1 union select * from t2) intersect all select * from t2;
select * from iv;
drop view iv;
-- Just as a sanity check, make sure things work if U1 is a child of
-- an explicit JoinNode (since JoinNode is an instanceof TableOperatorNode
-- and TableOperatorNode is where the bug for DERBY-1852 was fixed).
select * from
(select * from t1 union select * from t2) x2 left join t2 on x2.i = t2.i;
-- cleanup.
drop table t1;
drop table t2;
-- Regression test for DERBY-4391. These UNION queries used to throw a
-- NullPointerException during compilation. Now all of them should compile
-- successfully, but some of them fail during execution if their subqueries
-- return more than one row.
create table d4391(a int not null primary key, b int);
insert into d4391 values (0, 4), (1, 3), (2, 2), (3, 1), (4, 0);
select * from d4391 where a < (values 2 union values 2);
select * from d4391 where a < (select 4 from d4391 union select b from d4391);
select * from d4391 where a < (select a+b from d4391 union select 4 from d4391);
select * from d4391 where a < (select a+b from d4391 union select a from d4391);
select * from d4391 where a < (select sum(a) from d4391 union select sum(b) from d4391);
drop table d4391;
-- Regression test for DERBY-4411. The predicate 1=0 used to be lost when the
-- SELECT statement was compiled, and the statement would fail with a message
-- saying that a scalar sub-query should return exactly one row.
create table d4411(a int primary key, b int);
insert into d4411 values (0, 4), (1, 3), (2, 2), (3, 1), (4, 0);
select * from d4411 where a < (values 2 union select b from d4411 where 1=0);
drop table d4411;
| 38.545809 | 108 | 0.689137 |
b2f1d9ab07a35f78efc77316abd28bebe9c01e76 | 4,004 | py | Python | tests/test_nameko_prometheus.py | alfaro28/nameko-prometheus | 0f50006b1510eef375712a1b7c4bd00d5f08eb1b | [
"Apache-2.0"
] | null | null | null | tests/test_nameko_prometheus.py | alfaro28/nameko-prometheus | 0f50006b1510eef375712a1b7c4bd00d5f08eb1b | [
"Apache-2.0"
] | null | null | null | tests/test_nameko_prometheus.py | alfaro28/nameko-prometheus | 0f50006b1510eef375712a1b7c4bd00d5f08eb1b | [
"Apache-2.0"
] | null | null | null | import pytest
from nameko.events import EventDispatcher, event_handler
from nameko.rpc import rpc
from nameko.testing.services import entrypoint_hook, entrypoint_waiter
from nameko.web.handlers import http
from prometheus_client import REGISTRY, Counter
from nameko_prometheus import PrometheusMetrics
@pytest.fixture
def config(rabbit_config, web_config):
# merge nameko-provided fixtures in one config for container_factory
config = rabbit_config.copy()
config.update(web_config)
return config
@pytest.fixture(autouse=True)
def reset_prometheus_registry():
collectors_to_unregister = []
for collector, names in REGISTRY._collector_to_names.items():
if any(name.startswith("my_service") for name in names):
collectors_to_unregister.append(collector)
for collector in collectors_to_unregister:
REGISTRY.unregister(collector)
my_counter = Counter("my_counter", "My counter")
class MyService:
name = "my_service"
metrics = PrometheusMetrics()
dispatcher = EventDispatcher()
@rpc
def update_counter(self):
my_counter.inc()
@http("GET", "/metrics")
def expose_metrics(self, request):
return self.metrics.expose_metrics(request)
@http("GET", "/error")
def raise_error(self, request):
raise ValueError("poof")
@rpc
def emit_event(self):
self.dispatcher("my_event", {"foo": "bar"})
@event_handler("my_service", "my_event")
def handle_event(self, payload):
return f"handled: {payload}"
def test_expose_default_metrics(config, container_factory, web_session):
container = container_factory(MyService, config)
container.start()
with entrypoint_hook(container, "update_counter") as update_counter:
update_counter()
update_counter()
response = web_session.get("/metrics")
# assert that default metrics are exposed in Prometheus text format
assert f"TYPE {MyService.name}_rpc_requests_total counter" in response.text
assert (
f'{MyService.name}_rpc_requests_total{{method_name="update_counter"}} 2.0'
in response.text
)
def test_expose_custom_metrics(config, container_factory, web_session):
container = container_factory(MyService, config)
container.start()
with entrypoint_hook(container, "update_counter") as update_counter:
update_counter()
update_counter()
response = web_session.get("/metrics")
assert "my_counter_total" in response.text
def test_expose_event_handler_metrics(config, container_factory, web_session):
container = container_factory(MyService, config)
container.start()
with entrypoint_waiter(container, "handle_event"):
with entrypoint_hook(container, "emit_event") as emit_event:
emit_event()
response = web_session.get("/metrics")
assert f"TYPE {MyService.name}_events_total counter" in response.text
assert f"TYPE {MyService.name}_events_latency_seconds histogram" in response.text
assert (
f'{MyService.name}_events_total{{event_type="my_event",source_service="my_service"}} 1.0'
in response.text
)
def test_http_metrics_collected_on_exception(config, container_factory, web_session):
container = container_factory(MyService, config)
container.start()
web_session.get("/error")
response = web_session.get("/metrics")
assert (
f'{MyService.name}_http_requests_total{{endpoint="/error",http_method="GET",status_code="500"}} 1.0'
in response.text
)
def test_override_default_metric_prefix(config, container_factory, web_session):
prefix = "my_prefix"
config.update({"PROMETHEUS": {MyService.name: {"prefix": prefix}}})
container = container_factory(MyService, config)
container.start()
with entrypoint_hook(container, "update_counter") as update_counter:
update_counter()
response = web_session.get("/metrics")
assert f"TYPE {prefix}_rpc_requests_total counter" in response.text
| 33.932203 | 108 | 0.727273 |
bcadfeee49382d5f9a0b72089c70c1bde2e0675c | 2,223 | js | JavaScript | src/pages/blog.js | juancarloselorriaga/dnl | cb603ef3b2f61bfde1b53c40679251abed725f3e | [
"MIT"
] | 1 | 2019-10-05T17:54:30.000Z | 2019-10-05T17:54:30.000Z | src/pages/blog.js | juancarloselorriaga/dnl | cb603ef3b2f61bfde1b53c40679251abed725f3e | [
"MIT"
] | null | null | null | src/pages/blog.js | juancarloselorriaga/dnl | cb603ef3b2f61bfde1b53c40679251abed725f3e | [
"MIT"
] | null | null | null | import React from "react"
import { graphql, useStaticQuery } from "gatsby"
import styled from "styled-components"
import Layout from "../components/layout/layout.component"
import Hero from "../components/hero/hero.component"
import HeroTitle from "../components/hero-title/hero-title.component"
import Subscribe from "../components/subscribe/subscribe.component"
import Seccion from "../components/seccion/seccion.component"
import Center from "../components/center/center.component"
import Card from "../components/card/card.component"
import Title from "../components/title/title.component"
import SEO from "../components/SEO/SEO.component"
const getBlogPosts = graphql`
query {
blogImage: file(relativePath: { eq: "blog.jpg" }) {
childImageSharp {
fluid {
...GatsbyImageSharpFluid
}
}
}
blogPosts: allContentfulPosts {
edges {
node {
id: contentful_id
title
slug
publicado
autor
resumen {
resumen
}
img: imagen {
fluid {
...GatsbyContentfulFluid
}
}
}
}
}
}
`
const AreaTemplate = ({ className }) => {
const response = useStaticQuery(getBlogPosts)
const blogPosts = response.blogPosts.edges
const blogImage = response.blogImage
return (
<Layout>
<SEO title="Blog" />
<Hero centered extraBottom img={blogImage.childImageSharp} className={className}>
<HeroTitle
centered
smaller
title="Noticias, tendencias y blog"
subtitle="Encuentra información relevante para mejorar tu negocio o servicio."
/>
</Hero>
<Seccion white arched>
<Center white>
<Title title="Nuestro blog" />
{blogPosts.map(({ node }) => {
return (
<Card
key={node.id}
content={node.resumen.resumen}
items={node}
textoBoton="Ver post"
/>
)
})}
</Center>
</Seccion>
<Subscribe />
</Layout>
)
}
export default styled(AreaTemplate)`
.boton {
margin-top: 1.5rem;
}
`
| 25.551724 | 88 | 0.582996 |
6548c87fb2cf655fda082e4ff6638d4c0183f910 | 3,477 | kt | Kotlin | src/main/kotlin/com/github/devcordde/devcordbot/commands/owners/CleanupCommand.kt | T1Il/DevcordBot | f7c48cfd6421d847b4ad75e07661ed2417c9a4a7 | [
"Apache-2.0"
] | null | null | null | src/main/kotlin/com/github/devcordde/devcordbot/commands/owners/CleanupCommand.kt | T1Il/DevcordBot | f7c48cfd6421d847b4ad75e07661ed2417c9a4a7 | [
"Apache-2.0"
] | null | null | null | src/main/kotlin/com/github/devcordde/devcordbot/commands/owners/CleanupCommand.kt | T1Il/DevcordBot | f7c48cfd6421d847b4ad75e07661ed2417c9a4a7 | [
"Apache-2.0"
] | null | null | null | /*
* Copyright 2020 Daniel Scherf & Michael Rittmeister & Julian König
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.devcordde.devcordbot.commands.owners
import com.github.devcordde.devcordbot.command.AbstractCommand
import com.github.devcordde.devcordbot.command.CommandCategory
import com.github.devcordde.devcordbot.command.CommandPlace
import com.github.devcordde.devcordbot.command.context.Context
import com.github.devcordde.devcordbot.command.permission.Permission
import com.github.devcordde.devcordbot.constants.Embeds
import com.github.devcordde.devcordbot.database.DatabaseDevCordUser
import com.github.devcordde.devcordbot.database.Tag
import mu.KotlinLogging
import net.dv8tion.jda.api.entities.Guild
import net.dv8tion.jda.api.entities.User
import org.jetbrains.exposed.sql.transactions.transaction
/**
* Cleanup Command
*/
class CleanupCommand : AbstractCommand() {
override val aliases: List<String> = listOf("cleanup")
override val displayName: String = "Cleanup"
override val description: String = "Entfernt die Level von ungültigen Membern"
override val usage: String = "<tagname>"
override val category: CommandCategory = CommandCategory.BOT_OWNER
override val permission: Permission = Permission.BOT_OWNER
override val commandPlace: CommandPlace = CommandPlace.ALL
private val logger = KotlinLogging.logger {}
override suspend fun execute(context: Context) {
val guild = context.bot.guild
val cleanedUsers = cleanupRanks(guild)
val cleanedTags = cleanupTags(guild, context.bot.jda.selfUser)
return context.respond(
Embeds.info(
"Erfolgreich ausgeführt!",
"""
Entfernte User: $cleanedUsers
Veränderte Tags: $cleanedTags
"""
)
).queue()
}
private fun cleanupRanks(guild: Guild): Int {
var clearedEntries = 0
transaction {
DatabaseDevCordUser.all().forEach {
if (!isMemberOfGuild(guild, it.userID)) {
logger.info { "User gelöscht: ID ${it.userID}, Level: ${it.level}, XP: ${it.experience}" }
it.delete()
clearedEntries++
}
}
}
return clearedEntries
}
private fun cleanupTags(guild: Guild, selfUser: User): Int {
var movedEntries = 0
transaction {
Tag.all().forEach {
if (!isMemberOfGuild(guild, it.author)) {
logger.info { "Autor geändert: Alter Author: ${it.author}, Name: ${it.name}" }
it.author = selfUser.idLong
movedEntries++
}
}
}
return movedEntries
}
private fun isMemberOfGuild(guild: Guild, userID: Long): Boolean {
return guild.getMemberById(userID) != null
}
}
| 36.989362 | 110 | 0.657463 |
2b62c2c5dd9bc7a54501bb31912c50937f9203c9 | 1,352 | swift | Swift | Exampel/DemoController.swift | wufeiyue/TLPageView | 05206ab5649a04226199028f92a323dcf24a485c | [
"MIT"
] | 2 | 2018-07-12T15:19:53.000Z | 2018-12-13T03:49:00.000Z | Exampel/DemoController.swift | wufeiyue/TLPageView | 05206ab5649a04226199028f92a323dcf24a485c | [
"MIT"
] | null | null | null | Exampel/DemoController.swift | wufeiyue/TLPageView | 05206ab5649a04226199028f92a323dcf24a485c | [
"MIT"
] | 1 | 2019-12-17T12:30:41.000Z | 2019-12-17T12:30:41.000Z | //
// DemoController.swift
// Exampel
//
// Created by Charles on 2018/7/12.
// Copyright © 2018 Charles. All rights reserved.
//
import UIKit
class DemoController: UIViewController {
override func viewDidLoad() {
super.viewDidLoad()
let desc = UILabel()
desc.text = self.title
desc.frame = CGRect(x: 50, y: 200, width: 100, height: 30)
view.addSubview(desc)
let btn = UIButton(type: .custom)
btn.setTitle("点击 push", for: .normal)
btn.addTarget(self, action: #selector(pushClick), for: .touchUpInside)
btn.frame = CGRect(x: 50, y: 100, width: 90 , height: 60)
view.addSubview(btn)
// Do any additional setup after loading the view.
}
@objc private func pushClick() {
let vc = UIViewController()
vc.view.backgroundColor = .red
vc.navigationItem.title = "push title"
navigationController?.show(UIViewController(), sender: nil)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
print("viewWillAppear == " + (navigationItem.title ?? ""))
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
print("viewWillDisappear == " + (navigationItem.title ?? ""))
}
}
| 27.04 | 78 | 0.607988 |
cb2eb9c379ba8f1b243b56d2570d91967aa512c5 | 1,458 | go | Go | go-snippets/go-routines-example.go | ferralucho/mercado-libre-accelerator | fee70eadd708b73383f2c9314ff3d0d8fd359f6a | [
"MIT"
] | null | null | null | go-snippets/go-routines-example.go | ferralucho/mercado-libre-accelerator | fee70eadd708b73383f2c9314ff3d0d8fd359f6a | [
"MIT"
] | null | null | null | go-snippets/go-routines-example.go | ferralucho/mercado-libre-accelerator | fee70eadd708b73383f2c9314ff3d0d8fd359f6a | [
"MIT"
] | null | null | null | package main
import (
"fmt"
"net/http"
"sync"
)
func miFuncion(wg *sync.WaitGroup) {
fmt.Println("Dentro de la goroutine")
wg.Done()
}
/*
func main() {
fmt.Println("Inicio del programa")
var wg sync.WaitGroup
wg.Add(1)
//cuenta cuantos hilos de ejecucion espera que todos los procesos terminen
go miFuncion(&wg)
wg.Wait()
fmt.Printf("Fin del programa")
}
*/
/*
func main() {
fmt.Println("Hello World")
var waitgroup sync.WaitGroup
waitgroup.Add(1)
go func() {
fmt.Println("Inside my goroutine")
waitgroup.Done()
}()
waitgroup.Wait()
fmt.Println("Finished Execution")
}
*/
/*
go func(url string) {
fmt.Println(url)
}(url)
*/
var urls = []string {
"https://www.google.com",
"https://www.lavoz.com.ar",
"https://www.mercadolibre.com",
}
func recuperar(url string, wg *sync.WaitGroup) {
fmt.Println(url)
res, err := http.Get(url)
if err != nil {
fmt.Println(err)
}
wg.Done()
fmt.Println(res.Status)
}
func enviarRequest(w http.ResponseWriter, r *http.Request){
fmt.Println("Enviamos request al endpoint")
var waitgroup sync.WaitGroup
for _, url := range urls {
waitgroup.Add(1)
go recuperar(url, &waitgroup)
}
waitgroup.Wait()
fmt.Println("Devuelve una respuesta")
fmt.Println("Proceso terminado")
fmt.Fprint(w, "Proceso terminado")
}
func handleRequest() {
http.HandleFunc("/", enviarRequest)
http.ListenAndServe(":8080", nil)
}
func main() {
handleRequest()
}
| 16.953488 | 75 | 0.663923 |
b987fd5f95ece0daf5b38a3605fec49589a42050 | 1,068 | c | C | SetWindowTitles.c | khval/BetterFakeMode | aace8a1355e36a6cf2591a88c2859987f506c4bc | [
"MIT"
] | 1 | 2021-05-30T19:48:11.000Z | 2021-05-30T19:48:11.000Z | SetWindowTitles.c | khval/BetterFakeMode | aace8a1355e36a6cf2591a88c2859987f506c4bc | [
"MIT"
] | 25 | 2021-04-23T21:02:44.000Z | 2021-08-03T22:15:51.000Z | SetWindowTitles.c | khval/BetterFakeMode | aace8a1355e36a6cf2591a88c2859987f506c4bc | [
"MIT"
] | null | null | null |
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <proto/exec.h>
#include <proto/dos.h>
#include <proto/layers.h>
#include <proto/intuition.h>
#include <proto/graphics.h>
#include <exec/emulation.h>
#include "common.h"
#include "helper/screen.h"
extern APTR video_mutex;
extern void RenderWindow(struct Window *win);
void fake_SetWindowTitles( struct Window *win, const char *winStr, const char *srcStr )
{
FPrintf( output,"%s:%s:%ld\n",__FILE__,__FUNCTION__,__LINE__);
MutexObtain(video_mutex);
FPrintf( output,"Title: %08lx, ScreenTitle: %08lx\n", win -> Title, win -> ScreenTitle);
if (winStr)
{
if (win -> Title) free(win -> Title);
win -> Title = strdup( winStr );
}
FPrintf( output,"%s:%s:%ld\n",__FILE__,__FUNCTION__,__LINE__);
if (srcStr)
{
if (win -> ScreenTitle) free(win -> ScreenTitle);
win -> ScreenTitle = strdup( srcStr );
}
FPrintf( output,"%s:%s:%ld\n",__FILE__,__FUNCTION__,__LINE__);
RenderWindow(win);
MutexRelease(video_mutex);
FPrintf( output,"%s:%s:%ld\n",__FILE__,__FUNCTION__,__LINE__);
}
| 21.36 | 88 | 0.694757 |
dd46f1ea57e286ad26cc41c600738b42a43ed68c | 120 | kt | Kotlin | magic-modules-plugin/src/main/kotlin/io/labs/dotanuki/magicmodules/internal/model/CanonicalModuleName.kt | DevSrSouza/magic-modules | f22bf9f7ac74f1ebbf31410f95a10a189a8d28a4 | [
"MIT"
] | 43 | 2020-04-22T01:04:56.000Z | 2021-09-17T19:45:51.000Z | magic-modules-plugin/src/main/kotlin/io/labs/dotanuki/magicmodules/internal/model/CanonicalModuleName.kt | DevSrSouza/magic-modules | f22bf9f7ac74f1ebbf31410f95a10a189a8d28a4 | [
"MIT"
] | 2 | 2020-08-24T19:31:16.000Z | 2022-01-02T14:39:16.000Z | magic-modules-plugin/src/main/kotlin/io/labs/dotanuki/magicmodules/internal/model/CanonicalModuleName.kt | DevSrSouza/magic-modules | f22bf9f7ac74f1ebbf31410f95a10a189a8d28a4 | [
"MIT"
] | 3 | 2020-04-24T03:00:28.000Z | 2020-07-18T02:12:10.000Z | package io.labs.dotanuki.magicmodules.internal.model
internal inline class CanonicalModuleName(
val value: String
) | 24 | 52 | 0.816667 |
4bbe31d6277493df0adb783218f3e2b796fe89e2 | 350 | lua | Lua | src/premake5.lua | psiberx/RED4.RTTIDumper | 12e5088d1ca8aac99e64d71452dc9b2300e3f9d9 | [
"MIT"
] | 2 | 2021-04-20T01:44:50.000Z | 2022-03-26T21:06:14.000Z | src/premake5.lua | rfuzzo/RED4.RTTIDumper | 749b02ca74bb8e8bbdf0cf36c4d33733f63689da | [
"MIT"
] | 5 | 2021-03-05T23:22:34.000Z | 2021-06-17T20:37:11.000Z | src/premake5.lua | rfuzzo/RED4.RTTIDumper | 749b02ca74bb8e8bbdf0cf36c4d33733f63689da | [
"MIT"
] | 5 | 2021-03-12T09:11:43.000Z | 2022-03-26T21:06:15.000Z | project("RED4.RTTIDumper")
targetdir(paths.build())
kind("SharedLib")
language("C++")
pchheader("stdafx.hpp")
pchsource("stdafx.cpp")
includedirs(
{
".",
paths.deps("json", "single_include"),
paths.deps("red4ext.sdk", "include")
})
files(
{
"**.cpp",
"**.hpp"
})
| 16.666667 | 45 | 0.494286 |