hexsha stringlengths 40 40 | size int64 5 1.05M | ext stringclasses 98
values | lang stringclasses 21
values | max_stars_repo_path stringlengths 3 945 | max_stars_repo_name stringlengths 4 118 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 945 | max_issues_repo_name stringlengths 4 118 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 134k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 945 | max_forks_repo_name stringlengths 4 135 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 1.05M | avg_line_length float64 1 1.03M | max_line_length int64 2 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dfcd55bc957fca5ef90262ee5eef33232228baff | 762 | ts | TypeScript | src/repositories/mysql/authentication.mysql.repository.ts | ithai5/StockManagmentAPI | 99895e91fabd24846b87ec7d05f47bc21bb360a1 | [
"MIT"
] | null | null | null | src/repositories/mysql/authentication.mysql.repository.ts | ithai5/StockManagmentAPI | 99895e91fabd24846b87ec7d05f47bc21bb360a1 | [
"MIT"
] | null | null | null | src/repositories/mysql/authentication.mysql.repository.ts | ithai5/StockManagmentAPI | 99895e91fabd24846b87ec7d05f47bc21bb360a1 | [
"MIT"
] | null | null | null | import { InterfaceAuthentication } from "../interface-authentication.repository";
import { PlayerDto } from "../../models/dto/player.dto";
import { prismaMySql } from "../../database-connection/mysql.database-connection";
import { SignupDto } from "../../models/dto/signup.dto";
export const authenticationMySql: InterfaceAuthentication = {
signupPlayer(signupDto: SignupDto): Promise<PlayerDto> {
return prismaMySql.player.create({ data: signupDto }).catch((reason) => {
throw Error(reason);
});
},
loginPlayer(email: string): Promise<PlayerDto | null> {
return prismaMySql.player
.findUnique({
where: {
email: email,
},
})
.catch((reason) => {
throw Error(reason);
});
},
};
| 31.75 | 82 | 0.639108 |
e51e96650379da19d23b73ca6b7e943b66e5d48a | 2,592 | ts | TypeScript | src/chat/api/chat.gateway.ts | ArmNem/fullstackDev2021-Backend-master | ba6287756af8510b5b431ab5d7d6ae3ed04cfa77 | [
"MIT"
] | null | null | null | src/chat/api/chat.gateway.ts | ArmNem/fullstackDev2021-Backend-master | ba6287756af8510b5b431ab5d7d6ae3ed04cfa77 | [
"MIT"
] | null | null | null | src/chat/api/chat.gateway.ts | ArmNem/fullstackDev2021-Backend-master | ba6287756af8510b5b431ab5d7d6ae3ed04cfa77 | [
"MIT"
] | null | null | null | import {
ConnectedSocket,
MessageBody,
OnGatewayConnection,
OnGatewayDisconnect,
SubscribeMessage,
WebSocketGateway,
WebSocketServer,
} from '@nestjs/websockets';
import { Socket } from 'socket.io';
import { ChatService } from '../core/services/chat.service';
import { WelcomeDto } from './dto/welcome.dto';
import {
IChatService,
IChatServiceProvider,
} from '../core/primary-ports/chat.service.interface';
import { Inject } from '@nestjs/common';
import { JoinChatDto } from './dto/join-chat.dto';
import { ChatClientModule } from '../core/models/chat.client.module';
@WebSocketGateway()
export class ChatGateway implements OnGatewayConnection, OnGatewayDisconnect {
constructor(
@Inject(IChatServiceProvider) private chatService: IChatService,
) {}
@WebSocketServer() server;
@SubscribeMessage('message')
handleChatEvent(
@MessageBody() message: string,
@ConnectedSocket() client: Socket,
): void {
const chatMessage = this.chatService.newMessage(message, client.id);
this.server.emit('newmessages', chatMessage);
}
@SubscribeMessage('typing')
handleTypingEvent(
@MessageBody() typing: boolean,
@ConnectedSocket() client: Socket,
): void {
const chatClient = this.chatService.updateTyping(typing, client.id);
if (chatClient) {
this.server.emit('clientTyping', chatClient);
}
}
@SubscribeMessage('joinchat')
async handleJoinChatEvent(
@MessageBody() joinChatClientDto: JoinChatDto,
@ConnectedSocket() client: Socket,
): Promise<void> {
try {
let chatClient: ChatClientModule = JSON.parse(
JSON.stringify(joinChatClientDto),
);
chatClient = await this.chatService.newClient(chatClient);
const chatClients = await this.chatService.getClients();
const welcome: WelcomeDto = {
clients: chatClients,
messages: this.chatService.getMessages(),
client: chatClient,
};
client.emit('welcome', welcome);
this.server.emit('clients', chatClients);
} catch (e) {
client.error(e.message);
}
}
async handleConnection(client: Socket, ...args: any[]): Promise<any> {
console.log('Client Connect', client.id);
client.emit('allMessages', this.chatService.getMessages());
this.server.emit('clients', await this.chatService.getClients());
}
async handleDisconnect(client: Socket): Promise<any> {
await this.chatService.delete(client.id);
this.server.emit('clients', this.chatService.getClients());
console.log('Client Disconnect', await this.chatService.getClients());
}
}
| 30.857143 | 78 | 0.697531 |
c2c5f61434c2d54f75d71d33466c7cfc20c009f0 | 312 | sql | SQL | www/html/app/addons/affiliate/database/demo_groups_ru.sql | YotpoLtd/vagrant-cscart | 26b74900c77e79b6dfe2d02133d4c3093b558781 | [
"MIT"
] | null | null | null | www/html/app/addons/affiliate/database/demo_groups_ru.sql | YotpoLtd/vagrant-cscart | 26b74900c77e79b6dfe2d02133d4c3093b558781 | [
"MIT"
] | null | null | null | www/html/app/addons/affiliate/database/demo_groups_ru.sql | YotpoLtd/vagrant-cscart | 26b74900c77e79b6dfe2d02133d4c3093b558781 | [
"MIT"
] | null | null | null | REPLACE INTO ?:aff_group_descriptions (group_id, name, lang_code) VALUES ('1', 'Группа категории', 'ru');
REPLACE INTO ?:aff_group_descriptions (group_id, name, lang_code) VALUES ('2', 'Группа продукта', 'ru');
REPLACE INTO ?:aff_group_descriptions (group_id, name, lang_code) VALUES ('3', 'url группа 2', 'ru'); | 104 | 105 | 0.730769 |
1ce61a5405731d664cc5b3678dba17549caad538 | 3,022 | swift | Swift | GitIt/Modules/Support/Search/Search Coordinator/Model/SearchCoordinatorExtensions.swift | loay-ashraf/GitIt | 9ea99b24e80537b99ee41f4ae6b5f6f898828b1e | [
"MIT"
] | 2,292 | 2021-12-18T22:33:53.000Z | 2022-03-01T13:50:25.000Z | GitIt/Modules/Support/Search/Search Coordinator/Model/SearchCoordinatorExtensions.swift | loay-ashraf/GitIt | 9ea99b24e80537b99ee41f4ae6b5f6f898828b1e | [
"MIT"
] | 14 | 2021-12-10T11:51:05.000Z | 2022-02-28T15:45:12.000Z | GitIt/Modules/Support/Search/Search Coordinator/Model/SearchCoordinatorExtensions.swift | loay-ashraf/GitIt | 9ea99b24e80537b99ee41f4ae6b5f6f898828b1e | [
"MIT"
] | null | null | null | //
// SearchCoordinatorExtensions.swift
// GitIt
//
// Created by Loay Ashraf on 01/01/2022.
//
import UIKit
extension SearchCoordinator {
// MARK: - View Helper Methods
func render(_ state: SearchViewState) {
switch state {
case .searching: showResults()
resetNavigationController()
case .idle: hideResults()
resetNavigationController()
}
}
func resetNavigationController() {
navigationController.popToRootViewController(animated: false)
}
func resetControllers() {
searchHistoryController.reset()
searchResultsController.reset()
}
func showResults() {
UIView.transition(with: searchHistoryController.view, duration: 0.3, options: .transitionCrossDissolve, animations: {
self.searchHistoryController.addChild(self.searchResultsController)
self.searchHistoryController.view.addSubview(self.searchResultsController.view)
self.searchResultsController.didMove(toParent: self.searchHistoryController!)
self.searchResultsController.view.frame = self.searchHistoryController.view.frame
}, completion: nil)
}
func hideResults() {
UIView.transition(with: searchHistoryController.view, duration: 0.3, options: .transitionCrossDissolve, animations: {
self.searchResultsController.willMove(toParent: nil)
self.searchResultsController.removeFromParent()
self.searchResultsController.view.removeFromSuperview()
}, completion: nil)
}
}
extension SearchCoordinator: SearchControllerDelegate {
// MARK: - Search Controller Delegate Methods
func didBeginSearchingSession() {
render(.idle)
}
func didEndSearchingSession() {
query = ""
render(.idle)
resetControllers()
}
func willSearch() {
render(.searching)
searchHistoryController.addQuery(with: query)
searchResultsController.search(with: query)
}
func didSearch() {
render(.idle)
resetControllers()
}
}
extension SearchCoordinator: SearchHistoryDelegate {
// MARK: - History Delegate Methods
func reloadQuery(with query: String) {
self.query = query
render(.searching)
searchResultsController.search(with: query)
}
func dismissHistoryKeyboard() {
searchController.searchBar.searchTextField.resignFirstResponder()
}
}
extension SearchCoordinator: SearchResultsDelegate {
// MARK: - Results Delegate Methods
func addObject<T: TableCellViewModel>(with cellViewModel: T) {
let collectionCellViewModel = cellViewModel.collectionCellViewModel()
searchHistoryController.addObject(with: collectionCellViewModel)
}
func dismissResultsKeyboard() {
searchController.searchBar.searchTextField.resignFirstResponder()
}
}
| 27.981481 | 125 | 0.66049 |
25648e5dbf5e12b5db6d5a7fec163e25baa3ff25 | 5,870 | asm | Assembly | Transynther/x86/_processed/AVXALIGN/_un_/i9-9900K_12_0xa0.log_1_463.asm | ljhsiun2/medusa | 67d769b8a2fb42c538f10287abaf0e6dbb463f0c | [
"MIT"
] | 9 | 2020-08-13T19:41:58.000Z | 2022-03-30T12:22:51.000Z | Transynther/x86/_processed/AVXALIGN/_un_/i9-9900K_12_0xa0.log_1_463.asm | ljhsiun2/medusa | 67d769b8a2fb42c538f10287abaf0e6dbb463f0c | [
"MIT"
] | 1 | 2021-04-29T06:29:35.000Z | 2021-05-13T21:02:30.000Z | Transynther/x86/_processed/AVXALIGN/_un_/i9-9900K_12_0xa0.log_1_463.asm | ljhsiun2/medusa | 67d769b8a2fb42c538f10287abaf0e6dbb463f0c | [
"MIT"
] | 3 | 2020-07-14T17:07:07.000Z | 2022-03-21T01:12:22.000Z | .global s_prepare_buffers
s_prepare_buffers:
push %r11
push %r12
push %r15
push %r9
push %rax
push %rbp
push %rcx
lea addresses_WT_ht+0x52cc, %rcx
nop
nop
nop
cmp %r9, %r9
movups (%rcx), %xmm0
vpextrq $0, %xmm0, %r12
nop
nop
nop
and $16032, %rcx
lea addresses_UC_ht+0xb3ec, %r11
nop
nop
nop
and %rbp, %rbp
mov (%r11), %ax
nop
nop
nop
cmp %rbp, %rbp
lea addresses_UC_ht+0xe32c, %rbp
nop
nop
nop
nop
nop
dec %r15
movb (%rbp), %cl
nop
nop
and $52090, %r15
lea addresses_WT_ht+0x1080, %r15
nop
nop
nop
nop
nop
dec %rcx
movb $0x61, (%r15)
nop
nop
cmp %rax, %rax
lea addresses_D_ht+0x163ec, %rbp
nop
nop
nop
add %r15, %r15
mov (%rbp), %r9w
nop
nop
nop
xor $32265, %rbp
lea addresses_normal_ht+0x1d63c, %rcx
clflush (%rcx)
nop
nop
inc %r11
mov $0x6162636465666768, %r12
movq %r12, %xmm1
movups %xmm1, (%rcx)
nop
nop
xor $20664, %rbp
lea addresses_WT_ht+0x6bec, %r9
nop
dec %rax
mov (%r9), %ebp
nop
sub $45183, %rax
lea addresses_WC_ht+0x9fec, %r9
nop
nop
nop
nop
inc %r11
mov (%r9), %rbp
nop
nop
nop
and $59838, %r11
lea addresses_WC_ht+0x1bbec, %rax
xor %r9, %r9
vmovups (%rax), %ymm1
vextracti128 $0, %ymm1, %xmm1
vpextrq $1, %xmm1, %r15
nop
nop
nop
nop
add $45012, %rax
lea addresses_WC_ht+0x14a8c, %rcx
nop
nop
nop
nop
add %r12, %r12
mov $0x6162636465666768, %r15
movq %r15, %xmm4
and $0xffffffffffffffc0, %rcx
vmovntdq %ymm4, (%rcx)
nop
nop
nop
nop
add $58874, %rax
pop %rcx
pop %rbp
pop %rax
pop %r9
pop %r15
pop %r12
pop %r11
ret
.global s_faulty_load
s_faulty_load:
push %r11
push %r12
push %r14
push %r9
push %rax
push %rbx
push %rdx
// Store
lea addresses_US+0x53ec, %rdx
clflush (%rdx)
nop
and %rax, %rax
movb $0x51, (%rdx)
nop
nop
nop
dec %r12
// Store
lea addresses_WT+0x1e6ac, %rbx
clflush (%rbx)
nop
and $34931, %r9
movw $0x5152, (%rbx)
nop
nop
nop
nop
nop
cmp $17574, %r9
// Store
lea addresses_normal+0x3bc4, %rbx
nop
nop
nop
and $22624, %r11
mov $0x5152535455565758, %r12
movq %r12, %xmm7
movups %xmm7, (%rbx)
nop
and $50597, %r9
// Store
lea addresses_WC+0x1c3ec, %r9
nop
nop
nop
sub %rdx, %rdx
mov $0x5152535455565758, %rax
movq %rax, %xmm3
movups %xmm3, (%r9)
nop
nop
nop
nop
inc %r11
// Store
lea addresses_PSE+0x1f284, %rbx
nop
nop
nop
cmp %rdx, %rdx
movl $0x51525354, (%rbx)
nop
cmp $33334, %rbx
// Store
mov $0xbec, %rax
clflush (%rax)
sub $3916, %r14
movw $0x5152, (%rax)
nop
nop
nop
sub %r14, %r14
// Store
lea addresses_A+0x17fec, %r11
nop
nop
nop
sub %r12, %r12
movw $0x5152, (%r11)
nop
nop
nop
nop
nop
sub $1829, %r9
// Store
lea addresses_D+0x17ec, %rax
cmp %r11, %r11
movb $0x51, (%rax)
nop
nop
nop
and %rax, %rax
// Store
mov $0x1843750000000dac, %r14
nop
nop
nop
and %r12, %r12
mov $0x5152535455565758, %rdx
movq %rdx, (%r14)
nop
cmp %r12, %r12
// Store
lea addresses_A+0x71ac, %r9
nop
cmp %rdx, %rdx
mov $0x5152535455565758, %r12
movq %r12, %xmm4
vmovups %ymm4, (%r9)
nop
nop
nop
nop
sub $9972, %rax
// Faulty Load
lea addresses_US+0xcbec, %rax
nop
cmp %rdx, %rdx
mov (%rax), %r9w
lea oracles, %r12
and $0xff, %r9
shlq $12, %r9
mov (%r12,%r9,1), %r9
pop %rdx
pop %rbx
pop %rax
pop %r9
pop %r14
pop %r12
pop %r11
ret
/*
<gen_faulty_load>
[REF]
{'src': {'NT': False, 'same': False, 'congruent': 0, 'type': 'addresses_US', 'AVXalign': False, 'size': 2}, 'OP': 'LOAD'}
{'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 11, 'type': 'addresses_US', 'AVXalign': False, 'size': 1}}
{'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 5, 'type': 'addresses_WT', 'AVXalign': False, 'size': 2}}
{'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 2, 'type': 'addresses_normal', 'AVXalign': False, 'size': 16}}
{'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 10, 'type': 'addresses_WC', 'AVXalign': False, 'size': 16}}
{'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 3, 'type': 'addresses_PSE', 'AVXalign': False, 'size': 4}}
{'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 11, 'type': 'addresses_P', 'AVXalign': False, 'size': 2}}
{'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 10, 'type': 'addresses_A', 'AVXalign': False, 'size': 2}}
{'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 9, 'type': 'addresses_D', 'AVXalign': False, 'size': 1}}
{'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 4, 'type': 'addresses_NC', 'AVXalign': False, 'size': 8}}
{'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 6, 'type': 'addresses_A', 'AVXalign': False, 'size': 32}}
[Faulty Load]
{'src': {'NT': True, 'same': True, 'congruent': 0, 'type': 'addresses_US', 'AVXalign': False, 'size': 2}, 'OP': 'LOAD'}
<gen_prepare_buffer>
{'src': {'NT': False, 'same': False, 'congruent': 4, 'type': 'addresses_WT_ht', 'AVXalign': False, 'size': 16}, 'OP': 'LOAD'}
{'src': {'NT': False, 'same': False, 'congruent': 10, 'type': 'addresses_UC_ht', 'AVXalign': False, 'size': 2}, 'OP': 'LOAD'}
{'src': {'NT': False, 'same': False, 'congruent': 5, 'type': 'addresses_UC_ht', 'AVXalign': False, 'size': 1}, 'OP': 'LOAD'}
{'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 1, 'type': 'addresses_WT_ht', 'AVXalign': False, 'size': 1}}
{'src': {'NT': False, 'same': False, 'congruent': 11, 'type': 'addresses_D_ht', 'AVXalign': False, 'size': 2}, 'OP': 'LOAD'}
{'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 4, 'type': 'addresses_normal_ht', 'AVXalign': False, 'size': 16}}
{'src': {'NT': False, 'same': False, 'congruent': 10, 'type': 'addresses_WT_ht', 'AVXalign': False, 'size': 4}, 'OP': 'LOAD'}
{'src': {'NT': False, 'same': False, 'congruent': 10, 'type': 'addresses_WC_ht', 'AVXalign': False, 'size': 8}, 'OP': 'LOAD'}
{'src': {'NT': False, 'same': False, 'congruent': 11, 'type': 'addresses_WC_ht', 'AVXalign': False, 'size': 32}, 'OP': 'LOAD'}
{'OP': 'STOR', 'dst': {'NT': True, 'same': False, 'congruent': 3, 'type': 'addresses_WC_ht', 'AVXalign': False, 'size': 32}}
{'2a': 1}
2a
*/
| 19.501661 | 129 | 0.63833 |
b9753292b0dbf21a2de32b994e5c10ab32e8e271 | 1,175 | c | C | src/lib9/mingw/time.c | aks2161989/pf9 | d738ecdcbbed54ddf6cfebcb3cf76b1a2ac8fadc | [
"LPL-1.02"
] | 17 | 2019-08-08T18:00:37.000Z | 2021-01-07T11:00:03.000Z | src/lib9/mingw/time.c | aks2161989/pf9 | d738ecdcbbed54ddf6cfebcb3cf76b1a2ac8fadc | [
"LPL-1.02"
] | null | null | null | src/lib9/mingw/time.c | aks2161989/pf9 | d738ecdcbbed54ddf6cfebcb3cf76b1a2ac8fadc | [
"LPL-1.02"
] | 5 | 2019-08-08T17:37:49.000Z | 2019-12-13T10:35:48.000Z | #include <u.h>
#include <mingw32.h>
#include <time.h>
#define NOPLAN9DEFINES
#include <libc.h>
/*
* Definition of gettimeofday by Wu Yongwei, taken from
* see http://mywebpage.netscape.com/yongweiwu/timeval.h.txt
* In the public domain.
*/
#define EPOCHFILETIME (116444736000000000LL)
static int
gettimeofday(struct timeval *tv)
{
FILETIME ft;
LARGE_INTEGER li;
__int64 t;
if (tv) {
GetSystemTimeAsFileTime(&ft);
li.LowPart = ft.dwLowDateTime;
li.HighPart = ft.dwHighDateTime;
t = li.QuadPart; /* In 100-nanosecond intervals */
t -= EPOCHFILETIME; /* Offset to the Epoch time */
t /= 10; /* In microseconds */
tv->tv_sec = (long)(t / 1000000);
tv->tv_usec = (long)(t % 1000000);
}
return 0;
}
long
p9times(long *t)
{
/* BUG */
return -1;
}
double
p9cputime(void)
{
long t[4];
double d;
if(p9times(t) < 0)
return -1.0;
d = (double)t[0]+(double)t[1]+(double)t[2]+(double)t[3];
return d/1000.0;
}
vlong
p9nsec(void)
{
struct timeval tv;
if(gettimeofday(&tv) < 0)
return -1;
return (vlong)tv.tv_sec*1000*1000*1000 + tv.tv_usec*1000;
}
long
p9time(long *tt)
{
long t;
t = time(0);
if(tt)
*tt = t;
return t;
}
| 14.506173 | 60 | 0.645106 |
75e46f8926a7f127c69eeb2025144abdd7ce0d36 | 1,147 | php | PHP | backend/views/shows-download-queue/index.php | golden283219/lm-backoffice | 85ffdc37207e7f34d2915c51ed7349dc6b890de8 | [
"BSD-3-Clause"
] | null | null | null | backend/views/shows-download-queue/index.php | golden283219/lm-backoffice | 85ffdc37207e7f34d2915c51ed7349dc6b890de8 | [
"BSD-3-Clause"
] | null | null | null | backend/views/shows-download-queue/index.php | golden283219/lm-backoffice | 85ffdc37207e7f34d2915c51ed7349dc6b890de8 | [
"BSD-3-Clause"
] | null | null | null | <?php
use yii\helpers\Html;
use yii\grid\GridView;
/* @var $this yii\web\View */
/* @var $searchModel common\models\queue\ShowsSearch */
/* @var $dataProvider yii\data\ActiveDataProvider */
$this->title = 'Shows';
$this->params['breadcrumbs'][] = $this->title;
?>
<div class="shows-index">
<?php // echo $this->render('_search', ['model' => $searchModel]); ?>
<p>
<?php echo Html::a('Create Shows', ['create'], ['class' => 'btn btn-success']) ?>
</p>
<?php echo GridView::widget([
'dataProvider' => $dataProvider,
'filterModel' => $searchModel,
'columns' => [
['class' => 'yii\grid\SerialColumn'],
'id_tvshow',
'title',
'first_air_date',
'imdb_id',
'tmdb_id',
// 'tvmaze_id',
// 'total_episodes',
// 'total_seasons',
// 'episode_duration',
// 'in_production',
// 'status',
// 'date_added',
// 'data:ntext',
// 'original_language',
['class' => 'yii\grid\ActionColumn'],
],
]); ?>
</div>
| 24.404255 | 89 | 0.489974 |
7feadbfa81660307abf786f8203e5feb0dbaef98 | 3,640 | go | Go | gaehttpjsonrpc/gaehttpjsonrpcServer.go | ThePiachu/Go-HTTP-JSON-RPC | 8c3d5d9c928b9551d7080863414185352067af16 | [
"BSD-3-Clause"
] | 26 | 2015-03-15T08:43:36.000Z | 2019-11-08T00:12:55.000Z | gaehttpjsonrpc/gaehttpjsonrpcServer.go | ThePiachu/Go-HTTP-JSON-RPC | 8c3d5d9c928b9551d7080863414185352067af16 | [
"BSD-3-Clause"
] | 1 | 2019-05-15T11:45:59.000Z | 2019-05-15T11:45:59.000Z | gaehttpjsonrpc/gaehttpjsonrpcServer.go | ThePiachu/Go-HTTP-JSON-RPC | 8c3d5d9c928b9551d7080863414185352067af16 | [
"BSD-3-Clause"
] | 12 | 2015-04-01T09:25:44.000Z | 2019-05-29T12:15:48.000Z | package gaehttpjsonrpc
// Copyright 2011-2014 ThePiachu. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
import (
"appengine"
"encoding/json"
"github.com/ThePiachu/Go/Log"
"io/ioutil"
"net/http"
)
func init() {
mainMux.m = map[string]func(http.ResponseWriter, *http.Request, map[string]interface{}) map[string]interface{}{}
}
//multiplexer that keeps track of every function to be called on specific rpc call
type ServeMux struct {
m map[string]func(http.ResponseWriter, *http.Request, map[string]interface{}) map[string]interface{}
defaultFunction func(http.ResponseWriter, *http.Request)
}
//an instance of the multiplexer
var mainMux ServeMux
//a function to register functions to be called for specific rpc calls
func HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request, map[string]interface{}) map[string]interface{}) {
mainMux.m[pattern] = handler
}
//a function to be called if the request is not a HTTP JSON RPC call
func SetDefaultFunc(def func(http.ResponseWriter, *http.Request)) {
mainMux.defaultFunction = def
}
//this is the funciton that should be called in order to answer an rpc call
//should be registered like "http.HandleFunc("/", httpjsonrpc.Handle)"
func Handle(w http.ResponseWriter, r *http.Request) {
//read the body of the request
c := appengine.NewContext(r)
Log.Debugf(c, "HTTP JSON RPC Handle - Request - %v", r)
//JSON RPC commands should be POSTs
if r.Method != "POST" {
if mainMux.defaultFunction != nil {
Log.Debugf(c, "HTTP JSON RPC Handle - Method!=\"POST\"")
mainMux.defaultFunction(w, r)
return
} else {
Log.Warningf(c, "HTTP JSON RPC Handle - Method!=\"POST\"")
return
}
}
//We must check if there is Request Body to read
if r.Body == nil {
if mainMux.defaultFunction != nil {
Log.Debugf(c, "HTTP JSON RPC Handle - Request body is nil")
mainMux.defaultFunction(w, r)
return
} else {
Log.Warningf(c, "HTTP JSON RPC Handle - Request body is nil")
return
}
}
body, err := ioutil.ReadAll(r.Body)
Log.Debugf(c, "Body - %s", body)
if err != nil {
Log.Errorf(c, "ioutil.ReadAll: %v", err)
return
}
request := make(map[string]interface{})
//unmarshal the request
err = json.Unmarshal(body, &request)
if err != nil {
Log.Warningf(c, "HTTP JSON RPC Handle - json.Unmarshal: %v", err)
return
}
//log.Println(request["method"])
//get the corresponding function
function, ok := mainMux.m[request["method"].(string)]
if ok { //if the function exists, it is called
response := function(w, r, request)
//response from the program is encoded
data, err := json.Marshal(response)
if err != nil {
Log.Errorf(c, "HTTP JSON RPC Handle - json.Marshal: %v", err)
return
}
//result is printed to the output
w.Write(data)
} else { //if the function does not exist
Log.Warningf(c, "HTTP JSON RPC Handle - No function to call for", request["method"])
/*
//if you don't want to send an error, send something else:
data, err := json.Marshal(map[string]interface{}{
"result": "OK!",
"error": nil,
"id": request["id"],
})*/
//an error json is created
data, err := json.Marshal(map[string]interface{}{
"result": nil,
"error": map[string]interface{}{
"code": -32601,
"message": "Method not found",
"data": "The called method was not found on the server",
},
"id": request["id"],
})
if err != nil {
Log.Errorf(c, "HTTP JSON RPC Handle - json.Marshal: %v", err)
return
}
//it is printed
w.Write(data)
}
}
| 29.354839 | 130 | 0.671703 |
746803f23bb46cb207ebbe480b3affd98259db0a | 117,871 | h | C | thirdparty/moab/itaps/imesh/iMeshP.h | yumin/SMTK | d280f10c5b70953b2a0196f71832955c7fc75e7f | [
"BSD-3-Clause-Clear"
] | null | null | null | thirdparty/moab/itaps/imesh/iMeshP.h | yumin/SMTK | d280f10c5b70953b2a0196f71832955c7fc75e7f | [
"BSD-3-Clause-Clear"
] | 4 | 2016-11-10T15:49:51.000Z | 2017-02-06T23:24:16.000Z | thirdparty/moab/itaps/imesh/iMeshP.h | yumin/SMTK | d280f10c5b70953b2a0196f71832955c7fc75e7f | [
"BSD-3-Clause-Clear"
] | null | null | null |
#ifndef _ITAPS_iMeshP
#define _ITAPS_iMeshP
#include "imesh_export.h"
#include "iMesh.h"
#include "iMeshP_protos.h"
#include "moab_mpi.h"
#ifdef __cplusplus
extern "C" {
#endif
/** Handles needed in iMeshP */
typedef struct iMeshP_PartitionHandle_Private* iMeshP_PartitionHandle;
typedef struct iMeshP_RequestHandle_Private* iMeshP_RequestHandle;
/* Since we allow overloading of iMesh functions' entity set handles with
* part handles, iMeshP_PartHandle must be defined the same as
* iBase_EntitySetHandle. */
typedef iBase_EntitySetHandle iMeshP_PartHandle;
typedef unsigned iMeshP_Part;
/** Types for classifying entities within a part. */
enum iMeshP_EntStatus
{
iMeshP_INTERNAL, /**< An owned entity that is not on a part boundary. */
iMeshP_BOUNDARY, /**< A shared entity on a part boundary. */
iMeshP_GHOST /**< An entity copy that is not a shared boundary entity. */
};
/** Part ID number indicating information should be returned about all parts. */
#define iMeshP_ALL_PARTS -1
/** \page imeshp iMeshP: ITAPS Parallel Mesh Interface
iMeshP.h -- ITAPS Parallel Mesh Interface
Release 0.1; October 2008
\section ADM Abstract Data Model
- The term "mesh" refers to an abstraction in the data model;
it does not imply a serial or parallel distribution.
- The term "partition" refers to an assignment of a set of entities to
subsets; like a "mesh," it does not imply a serial or parallel
implementation.
- An application may use one or more meshes.
- Partitions can create subsets of entities from one or more meshes.
- Meshes can be subdivided by one or more partitions.
- Partitions contain parts. Parts contain the subsets of entities in the
partition.
\section PAR Parallelism
- A "process" can be thought of as an MPI process. The
number of processes can be considered to be the result of MPI_Comm_size.
The rank of a process can be thought of as the result of MPI_Comm_rank.
We will think in terms of processes rather than processors. Initial
implementations of the parallel interface will likely use MPI terminology
directly; future implementations may accommodate other communication
paradigms and libraries.
- Partitions have communicators associated with them. These communicators
can be thought of as MPI communicators.
- "Global" operations are operations performed with respect to a
partition's communicator.
- "Local" operations are operations performed with respect to a part or
a mesh instance within a process.
- Part A "neighbors" Part B if Part A has copies of entities owned by Part B
and/or if Part B has copies of entities owned by Part A.
\section INT Interfaces
- Each process has one or more "mesh instances." A mesh instance can be
thought of as a mesh database. An implementation should support the
existence of more than one mesh instance per process (e.g., it should
always associate mesh data with a mesh instance). However, we expect
applications would most often use only one mesh instance per process.
- There is one root set per mesh instance.
- Each process may have one or more partition handles.
- A partition assigns entities from one mesh instance to parts.
- Entities in a mesh instance can be partitioned by one or more partitions.
Mesh instances know which partitions they contain.
- Parts are uniquely identified globally by part IDs of type iMeshP_Part.
Local parts can also be accessed by part handles that provide more
direct access to a part.
Functions accepting part handles operate correctly on only local
parts (parts on the calling process); they will return an error
for remote (off-process) parts.
- Generation and management of global IDs for entities
is not included in the iMeshP interface. It can
be provided as a service above the iMeshP interface.
Uniqueness of global IDs is managed at the partition level.
\section PRT Using Parts
- Each part is wholly contained within a process.
- A process may have zero, one or multiple parts.
- For each entity that is copied onto remote parts, the owning part knows
both the remote part ID and remote entity handle of all copies.
- All parts with copies of a boundary entity know the remote part ID
and remote entity handle of all copies of the entity.
- All parts with copies of any entity know the part ID and
entity handle corresponding to the owner of the entity.
- Functions that return entity information for a part, set or mesh
instance return the information for all entities (including copies and
ghosts) in that part, set or mesh instance. Applications can check
whether an entity is owned or a ghost using iMeshP_isEntOwner or
iMeshP_getEntStatus.
- Many iMesh functions that accept an iBase_EntitySetHandle
are also useful in the context of a iMeshP_PartHandle.
These functions are reinterpreted so that they can accept either an
iBase_EntitySetHandle or an iMeshP_PartHandle.
- In particular, entities are added to and removed from local parts via
the same functions that are used to manipulate entity sets.
That is, given a mesh instance, an entity handle, and a part handle,
the entity is added to or removed from the part via calls to
the following functions with the part handle passed as the entity set handle:
- Add entity to part --> iMesh_addEntToSet
- Remove entity from part --> iMesh_rmvEntFromSet
- Add array of entities to part --> iMesh_addEntArrToSet
- Remove array of entities from part --> iMesh_rmvEntArrFromSet
\section CMM Communication
- Each function description includes its communication requirements. The
options are described here:
- COMMUNICATION: Collective -- the function must be called by all
processes in the partition's communicator. (These functions have the
suffix "All" to indicate collective communication is done.)
- COMMUNICATION: Point-to-Point -- communication is used, but the
communication is from one process to only one other process. The
receiving process must issue an appropriate receive call to receive
the message.
- COMMUNICATION: None -- the function does not use communication; only
local operations are performed.
- COMMUNICATION: None++ -- no communication is done; the values
are precomputed by iMeshP_syncPartitionAll or iMeshP_syncMeshAll.
- Non-blocking calls for off-processor mesh-modification return a request
that indicates whether or not the operation has completed. The request
is more than an MPI request; it encapsulates both the MPI information and
the mesh operations that were requested. If non-blocking calls are used,
appropriate calls to iMeshP "wait" or "poll" functions must be used to
handle and satisfy requests.
*/
/*------------------------------------------------------------------------*/
/*------------------------------------------------------------------------*/
/* Partition Functionality */
/*------------------------------------------------------------------------*/
/*------------------------------------------------------------------------*/
/** \brief Create a partition; return its handle.
*
* Given a mesh instance and a communicator,
* return a partition handle for a new partition within the mesh instance
* that uses the communicator.
* In the future, we may have different creation routines for different
* communication systems; once the partition is created, the application
* would not have to worry about the communication system again.
* For now, implementations are MPI based, so MPI communicators are provided.
* For serial use, the communicator may be MPI_COMM_SELF or communicator may
* be NULL.
*
* COMMUNICATION: Collective.
*
* \param instance (In) Mesh instance to contain the partition.
* \param communicator (In) Communicator to be used for parallel
* communication.
* \param partition (Out) The newly created partition.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_createPartitionAll(
iMesh_Instance instance,
MPI_Comm communicator,
iMeshP_PartitionHandle *partition,
int *err);
/** \brief Destroy a partition.
*
* Given a partition handle,
* destroy the partition associated with the handle.
* Note that the partition handle is not invalidated upon return.
*
* COMMUNICATION: Collective.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition to be destroyed.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_destroyPartitionAll(
iMesh_Instance instance,
iMeshP_PartitionHandle partition,
int *err);
/** \brief Return communicator associated with a partition.
*
* Given a partition handle, return the communicator associated with
* it during its creation by iMeshP_createPartitionAll.
*
* COMMUNICATION: None
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param communicator (Out) Communicator associated with the partition.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getPartitionComm(
iMesh_Instance instance,
iMeshP_PartitionHandle partition,
MPI_Comm *communicator,
int *err);
/** \brief Update a partition after parts have been added.
*
* This function gives the implementation an opportunity to locally store info
* about the partition so that queries on the partition can be
* performed without synchronous communication.
* This function must be called after all parts have been added to the
* partition and after changes to the partition (e.g., due to load balancing).
* Values that are precomputed by syncPartitionAll include:
* - the total number of parts in a partition;
* - the mapping between part IDs and processes; and
* - updated remote entity handle information.
*
* COMMUNICATION: Collective.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being updated.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_syncPartitionAll(
iMesh_Instance instance,
iMeshP_PartitionHandle partition,
int *err);
/** \brief Return the number of partitions associated with a mesh instance.
*
* Given a mesh instance, return the number of partition handles
* associated with the mesh instance.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the partitions.
* \param num_partitions (Out) Number of partitions associated with the
* mesh instance.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getNumPartitions(
iMesh_Instance instance,
int *num_partitions,
int *err);
/** \brief Return the partition handles associated with a mesh instance.
*
* Given a mesh instance, return all partition handles
* associated with the mesh instance.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the
* partitions.
* \param partitions (In/Out) Array of partition handles
* associated with the mesh
* instance.
* \param partitions_allocated (In/Out) Allocated size of
* partitions array.
* \param partitions_size (Out) Occupied size of
* partitions array.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getPartitions(
iMesh_Instance instance,
iMeshP_PartitionHandle **partitions,
int *partitions_allocated,
int *partitions_size,
int *err);
/** \brief Return the global number of parts in a partition.
*
* Given a partition handle, return the total number of parts
* in the partition across all processes in the partition's communicator.
*
* COMMUNICATION: None++.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param num_global_part (Out) Global number of parts in the partition.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getNumGlobalParts(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
int *num_global_part,
int *err);
/** \brief Return the local number of parts in a partition.
*
* Given a partition handle, return the number of local (on-process) parts
* in the partition.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param num_local_part (Out) Local (on-process) number of parts in
* the partition.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getNumLocalParts(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
int *num_local_part,
int *err);
/** \brief Return the part handles of local parts in a partition.
*
* Given a partition handle, return the
* part handles for the local (on-process) parts in the partition.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param parts (In/Out) Array of part handles
* for local parts in the partition.
* \param parts_allocated (In/Out) Allocated size of
* parts array.
* \param parts_size (Out) Occupied size of
* parts array.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getLocalParts(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
iMeshP_PartHandle **parts,
int *parts_allocated,
int *parts_size,
int *err);
/** \brief Return the process rank of a given part.
*
* Given a partition handle and a part ID, return the process rank
* (with respect to the partition's communicator) of the
* process that owns the part. The part may be local or remote.
*
* COMMUNICATION: None++.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param part_id (In) Part ID for the part being queried.
* \param rank (Out) Process rank of part_id.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getRankOfPart(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_Part part_id,
int *rank,
int *err);
/** \brief Return the process ranks of given parts.
*
* Given a partition handle and an array of part IDs, return the process ranks
* (with respect to the partition's communicator) of the
* process that owns each part. The parts may be local or remote.
*
* COMMUNICATION: None++.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param part_ids (In) Array of Part IDs for the parts being
* queried.
* \param part_ids_size (In) The number of Part IDs in part_ids.
* \param ranks (In/Out) Array of ranks for the Part Ids in
* part_ids.
* \param ranks_allocated (In/Out) Allocated size of ranks array.
* \param ranks_size (Out) Occupied size of ranks array.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getRankOfPartArr(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_Part *part_ids,
const int part_ids_size,
int **ranks,
int *ranks_allocated,
int *ranks_size,
int *err);
/** \brief Return the number of entities of a given type in a partition.
*
* Given a partition handle and an entity set (possibly the root set),
* return the global number of entities of a
* given entity type in the partition and set. This function may require
* communication and, thus, must be called by all processes in the partition's
* communicator.
*
* COMMUNICATION: Collective.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param entity_set (In) Entity set handle for the entity set
* being queried.
* \param entity_type (In) Requested entity type;
* may be iBase_ALL_TYPES.
* \param num_type (Out) Number of entities of entity_type in
* the partition and entity set.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getNumOfTypeAll(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iBase_EntitySetHandle entity_set,
int entity_type,
int *num_type,
int *err);
/** \brief Return the number of entities of a given topology in a partition.
*
* Given a partition handle and an entity set (possibly the root set),
* return the global number of entities of a
* given entity topology in the partition and set. This function may require
* communication and, thus, must be called by all processes in the partition's
* communicator.
*
* COMMUNICATION: Collective.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param entity_set (In) Entity set handle for the entity set
* being queried; may be the root set.
* \param entity_topology (In) Requested entity topology;
* may be iMesh_ALL_TOPOLOGIES.
* \param num_topo (Out) Number of entities with entity_topology in
* the partition and entity set.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getNumOfTopoAll(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iBase_EntitySetHandle entity_set,
int entity_topology,
int *num_topo,
int *err);
/*------------------------------------------------------------------------*/
/*------------------------------------------------------------------------*/
/* Part Functionality */
/*------------------------------------------------------------------------*/
/*------------------------------------------------------------------------*/
/** \brief Create a new part in a partition.
*
* Given a partition handle, create a new part and add it to the
* partition on the process invoking the creation. Return the part handle
* for the new part.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being updated.
* \param part (Out) The newly created part.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_createPart(
iMesh_Instance instance,
iMeshP_PartitionHandle partition,
iMeshP_PartHandle *part,
int *err);
/** \brief Remove a part from a partition.
*
* Given a partition handle and a part handle, remove the part
* from the partition and destroy the part. Note that the part handle
* is not invalidated by this function.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being updated.
* \param part (In) The part to be removed.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_destroyPart(
iMesh_Instance instance,
iMeshP_PartitionHandle partition,
iMeshP_PartHandle part,
int *err);
/** \brief Obtain a part ID from a part handle.
*
* Given a partition handle and a local part handle, return the part ID.
* If the part handle is not a valid part handle for a local part,
* an error is returned.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param part_id (Out) Part ID for part.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getPartIdFromPartHandle(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
iMeshP_Part *part_id,
int *err);
/** \brief Obtain part IDs from part handles.
*
* Given a partition handle and an array of local part handles,
* return the part ID for each part handle.
* If any part handle is not a valid part handle for a local part,
* an error is returned.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param parts (In) Array of part handles for the parts
* being queried.
* \param parts_size (In) Number of part handles being queried.
* \param part_ids (In/Out) Array of part IDs associated with the
* parts.
* \param part_ids_allocated (In/Out) Allocated size of part_ids array.
* \param part_ids_size (Out) Occupied size of part_ids array.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getPartIdsFromPartHandlesArr(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle *parts,
const int parts_size,
iMeshP_Part **part_ids,
int *part_ids_allocated,
int *part_ids_size,
int *err);
/** \brief Obtain a part handle from a part ID.
*
* Given a partition handle and a part ID, return the part handle
* associated with the part
* if the part is local; otherwise, return an error code.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param part_id (In) Part ID for the part being queried.
* \param part (Out) Part handle associated with part_id.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getPartHandleFromPartId(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
iMeshP_Part part_id,
iMeshP_PartHandle *part,
int *err);
/** \brief Obtain part handles from part IDs.
*
* Given a partition handle and an array of local part IDs,
* return the part handle for each part ID.
* If any part ID is not a valid part ID for a local part,
* an error is returned.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param part_ids (In) Array of part IDs for the parts
* being queried.
* \param part_ids_size (In) Number of part IDs being queried.
* \param parts (In/Out) Array of part handles associated
* with the part_ids.
* \param parts_allocated (In/Out) Allocated size of parts
* array.
* \param parts_size (Out) Occupied size of parts
* array.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getPartHandlesFromPartsIdsArr(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_Part *part_ids,
const int part_ids_size,
iMeshP_PartHandle **parts,
int *parts_allocated,
int *parts_size,
int *err);
/*------------------------------------------------------------------------*/
/* Part Boundaries */
/*------------------------------------------------------------------------*/
/** \brief Return the number of parts that neighbor a given part.
*
* Given a partition handle, a part handle, and an entity type,
* return the number of parts in the partition that neighbor the given part
* (i.e., that (1) have copies of entities of the given entity type owned by
* the given part or (2) own entities of the given entity type that are
* copied on the given part).
*
* COMMUNICATION: None++.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entity_type (In) Entity type of the copied entities;
* may be iBase_ALL_TYPES.
* \param num_part_nbors (Out) Number of parts neighboring the given part.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getNumPartNbors(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
int entity_type,
int *num_part_nbors,
int *err);
/** \brief Return the number of parts that neighbor given parts.
*
* Given a partition handle, an array of part handles, and an entity type,
* return the number of parts in the partition that neighbor each of the
* given parts
* (i.e., that (1) have copies of entities of the given entity type owned by
* the given part or (2) own entities of the given entity type that are
* copied on the given part).
*
* COMMUNICATION: None++.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param parts (In) Array of part handles for the
* parts being queried.
* \param parts_size (In) Number of part handles in
* parts.
* \param entity_type (In) Entity type of the copied
* entities;
* may be iBase_ALL_TYPES.
* \param num_part_nbors (In/Out) Array of values specifying the
* number of part neighbors for
* each part in parts.
* \param num_part_nbors_allocated (In/Out) Allocated size of num_part_nbors
* array.
* \param num_part_nbors_size (Out) Occupied size of num_part_nbors
* array.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getNumPartNborsArr(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle *parts,
int parts_size,
int entity_type,
int **num_part_nbors,
int *num_part_nbors_allocated,
int *num_part_nbors_size,
int *err);
/** \brief Return the parts that neighbor a given part.
*
* Given a partition handle, a part handle, and an entity type,
* return the part IDs of parts that neighbor the given part
* (i.e., that (1) have copies of entities of the given entity type owned by
* the given part or (2) own entities of the given entity type that are
* copied on the given part).
*
* COMMUNICATION: None++.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entity_type (In) Entity type of the copied
* entities;
* may be iBase_ALL_TYPES.
* \param num_part_nbors (Out) Number of parts neighboring
* the given part.
* \param nbor_part_ids (In/Out) Array of part IDs for
* part neighbors of part.
* \param nbor_part_ids_allocated (In/Out) Allocated size of nbor_part_ids
* array.
* \param nbor_part_ids_size (Out) Occupied size of nbor_part_ids
* array.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getPartNbors(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
int entity_type,
int *num_part_nbors,
iMeshP_Part **nbor_part_ids,
int *nbor_part_ids_allocated,
int *nbor_part_ids_size,
int *err);
/** \brief Return the parts that neighbor given parts.
*
* Given a partition handle, an array of part handles, and an entity type,
* return the part IDs of parts that neighbor the given parts
* (i.e., that (1) have copies of entities of the given entity type owned by
* the given part or (2) own entities of the given entity type that are
* copied on the given part).
*
* COMMUNICATION: None++.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param parts (In) The parts being queried.
* \param parts_size (In) The number of parts being queried.
* \param entity_type (In) Entity type of the copied
* entities;
* may be iBase_ALL_TYPES.
* \param num_part_nbors (In/Out) Array of values specifying the
* number of part neighbors for
* each part in parts.
* \param num_part_nbors_allocated (In/Out) Allocated size of num_part_nbors
* array.
* \param num_part_nbors_size (Out) Occupied size of num_part_nbors
* array.
* \param nbor_part_ids (In/Out) Array of part IDs for
* part neighbors of part.
* \param nbor_part_ids_allocated (In/Out) Allocated size of nbor_part_ids
* array.
* \param nbor_part_ids_size (Out) Occupied size of nbor_part_ids
* array.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getPartNborsArr(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle *parts,
const int parts_size,
int entity_type,
int **num_part_nbors,
int *num_part_nbors_allocated,
int *num_part_nbors_size,
iMeshP_Part **nbor_part_ids,
int *nbor_part_ids_allocated,
int *nbor_part_ids_size,
int *err);
/** \brief Return the number of entities on a part boundary.
*
* Given a partition handle, a part handle, an entity type and topology, and a
* target part ID, return the number of entities of the given type and/or
* topology on the part boundary shared with the target part.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entity_type (In) Entity type of the boundary entities;
* may be iBase_ALL_TYPES.
* \param entity_topology (In) Entity topology of the boundary entities;
* may be iMesh_ALL_TOPOLOGIES.
* \param target_part_id (In) Part ID with which part is sharing
* the boundary entities; may be
* iMeshP_ALL_PARTS.
* \param num_entities (Out) Number of part boundary entities shared
* by part and target_part_id.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getNumPartBdryEnts(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
int entity_type,
int entity_topology,
iMeshP_Part target_part_id,
int *num_entities,
int *err);
/** \brief Return the entity handles of entities on a part boundary.
*
* Given a partition handle, a part handle, an entity type and topology, and a
* target part ID, return the entity handles of entities of the given type
* and/or topology on the part boundary shared with the target part.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entity_type (In) Entity type of the boundary
* entities;
* may be iBase_ALL_TYPES.
* \param entity_topology (In) Entity topology of the boundary
* entities;
* may be iMesh_ALL_TOPOLOGIES.
* \param target_part_id (In) Part ID with which part
* is sharing the boundary entities;
* may be iMeshP_ALL_PARTS.
* \param entities (In/Out) Array of entity handles for
* entities on the part boundary
* between part and
* target_part_id.
* \param entities_allocated (In/Out) Allocated size of entities
* array.
* \param entities_size (Out) Occupied size of entities
* array.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getPartBdryEnts(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
int entity_type,
int entity_topology,
iMeshP_Part target_part_id,
iBase_EntityHandle **entities,
int *entities_allocated,
int *entities_size,
int *err);
/** \brief Initialize an iterator over a specified part boundary.
*
* Given a partition handle, a part handle, and a
* target part ID, return an iterator over all entities of a given
* entity type and topology along
* the part boundary shared with the target part.
* Iterator functionality for getNext, reset, and end is
* provided through the regular iMesh iterator functions
* iMesh_getNextEntIter, iMesh_resetEntIter, and iMesh_endEntIter,
* respectively.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entity_type (In) Entity type of the boundary entities;
* may be iBase_ALL_TYPES.
* \param entity_topology (In) Entity topology of the boundary entities;
* may be iMesh_ALL_TOPOLOGIES.
* \param target_part_id (In) Part ID with which part is sharing
* the boundary entities; may be
* iMeshP_ALL_PARTS.
* \param entity_iterator (Out) Iterator returned by the function.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_initPartBdryEntIter(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
int entity_type,
int entity_topology,
iMeshP_Part target_part_id,
iBase_EntityIterator* entity_iterator,
int *err);
/** \brief Initialize an array iterator over a specified part boundary.
*
* Given a partition handle, a part handle, and a
* target part ID, return an array iterator over all entities of a given
* entity type and topology along
* the part boundary shared with the target part.
* Iterator functionality for getNext, reset, and end is
* provided through the regular iMesh iterator functions
* iMesh_getNextEntArrIter, iMesh_resetEntArrIter, and iMesh_endEntArrIter,
* respectively.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entity_type (In) Entity type of the boundary entities;
* may be iBase_ALL_TYPES.
* \param entity_topology (In) Entity topology of the boundary entities;
* may be iMesh_ALL_TOPOLOGIES.
* \param array_size (In) Size of chunks of handles returned for
* each value of the iterator.
* \param target_part_id (In) Part ID with which part is sharing
* the boundary entities; may be
* iMeshP_ALL_PARTS.
* \param entity_iterator (Out) Iterator returned by the function.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_initPartBdryEntArrIter(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
int entity_type,
int entity_topology,
int array_size,
iMeshP_Part target_part_id,
iBase_EntityArrIterator* entity_iterator,
int *err);
/*------------------------------------------------------------------------*/
/* Parts and Sets */
/*------------------------------------------------------------------------*/
/** \brief Return the number of entities of a given type in both a part and an entity set.
*
* Given a part handle, an entity set handle, and an entity type, return
* the number of entities of the given type that are in BOTH the given
* part AND the given entity set.
* This function is similar to iMesh_getNumOfType, but it also restricts
* the returned data with respect to its existence in the given part.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entity_set (In) Entity set handle for the entity set
* being queried; may be the root set.
* \param entity_type (In) Entity type of the boundary entities;
* may be iBase_ALL_TYPES.
* \param num_type (Out) Number of entities of entity_type in
* both part and entity_set.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getNumOfType(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
const iBase_EntitySetHandle entity_set,
int entity_type,
int *num_type,
int *err);
/** \brief Return the number of entities of a given topology in both a part and an entity set.
*
* Given a part handle, an entity set handle, and an entity topology, return
* the number of entities of the given topology that are in BOTH the given
* part AND the given entity set.
* This function is similar to iMesh_getNumOfTopo, but it also restricts
* the returned data with respect to its existence in the given part.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entity_set (In) Entity set handle for the entity set
* being queried; may be the root set.
* \param entity_topology (In) Entity topology of the boundary entities;
* may be iMesh_ALL_TOPOLOGIES.
* \param num_topo (Out) Number of entities of entity_topology in
* both part and entity_set.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getNumOfTopo(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
const iBase_EntitySetHandle entity_set,
int entity_topology,
int *num_topo,
int *err);
/**\brief Get indexed representation of mesh or subset of mesh
*
* Given part handle and an entity set and optionally a type or topology,
* for all entities that are in BOTH the part and the entity set, return:
* - The entities in the part and set of the specified type or topology
* - The entities adjacent to those entities with a specified
* type, as a list of unique handles.
* - For each entity in the first list, the adjacent entities,
* specified as indices into the second list.
*
* COMMUNICATION: None.
*
*\param instance (In) Mesh instance containing the
* partition.
*\param partition (In) The partition being queried.
*\param part (In) The part being queried.
*\param entity_set_handle (In) The set being queried
*\param entity_type_requestor (In) If not iBase_ALL_TYPES, act only
* on the subset of entities with
* the specified type.
*\param entity_topology_requestor (In) If not iMesh_ALL_TOPOLOGIES, act
* only on the subset of entities with
* the specified topology.
*\param entity_type_requested (In) The type of the adjacent entities
* to return.
*\param entity_handles (In/Out) The handles of the (non-strict)
* subset of the union of the part
* and entity set, and the optional
* type and topology filtering
* arguments.
*\param adj_entity_handles (In/Out) The union of the entities of type
* 'requested_entity_type' adjacent
* to each entity in 'entity_handles'.
*\param adj_entity_indices (In/Out) For each entity in 'entity_handles',
* the adjacent entities of type
* 'entity_type_requested', specified as
* indices into 'adj_entity_handles'.
* The indices are concatenated into a
* single array in the order of the
* entity handles in 'entity_handles'.
*\param offset (In/Out) For each entity in the
* corresponding position in
* 'entity_handles', the position
* in 'adj_entity_indices' at which
* values for that entity are stored.
*/
IMESH_EXPORT
void iMeshP_getAdjEntIndices(iMesh_Instance instance,
iMeshP_PartitionHandle partition,
iMeshP_PartHandle part,
iBase_EntitySetHandle entity_set_handle,
int entity_type_requestor,
int entity_topology_requestor,
int entity_type_requested,
iBase_EntityHandle** entity_handles,
int* entity_handles_allocated,
int* entity_handles_size,
iBase_EntityHandle** adj_entity_handles,
int* adj_entity_handles_allocated,
int* adj_entity_handles_size,
int** adj_entity_indices,
int* adj_entity_indices_allocated,
int* adj_entity_indices_size,
int** offset,
int* offset_allocated,
int* offset_size,
int *err);
/** \brief Return entities in a both given part and entity set.
*
* Given an entity set handle
* and a part handle, return entity handles for entities
* that are in both the part and the entity set.
* This function is similar to iMesh_getEntities, but it also restricts
* the returned data with respect to its existence in the given part.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entity_set (In) Entity set handle for the
* entity set being queried;
* may be the root set.
* \param entity_type (In) Entity type of the
* entities;
* may be iBase_ALL_TYPES.
* \param entity_topology (In) Entity topology of the
* entities;
* may be iMesh_ALL_TOPOLOGIES.
* \param entities (In/Out) Array of entity handles for
* entities in both part
* and entity_set.
* \param entities_allocated (In/Out) Allocated size of entities.
* \param entities_size (Out) Occupied size of entities.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getEntities(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
const iBase_EntitySetHandle entity_set,
int entity_type,
int entity_topology,
iBase_EntityHandle **entities,
int *entities_allocated,
int *entities_size,
int *err);
/** \brief Return entities adjacent to entities in a given part and entity set.
*
* Given an entity set handle
* and a part handle, return entities adjacent (with respect to a given
* entity type and/or topology) to entities
* that are in both the part and the entity set.
* This function is similar to iMesh_getAdjEntities, but it also restricts
* the returned data with respect to its existence in the given part.
* If a non-root entity set is specified, the function also returns
* flags indicating whether each adjacent entity
* is in the entity set; (*in_entity_set)[i]=1 indicates that adjacent entity
* (*adj_entities)[i] is in the specified entity set.
* Array entry offset[i] stores the index of first adjacent entity to
* entity i.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entity_set (In) Entity set handle for the
* entity set being queried;
* may be the root set.
* \param entity_type_requestor (In) Return entities adjacent to
* entities of this type;
* may be iBase_ALL_TYPES.
* \param entity_topology_requestor (In) Return entities adjacent to
* entities of this topology;
* may be iMesh_ALL_TOPOLOGIES.
* \param entity_type_requested (In) Return adjacent entities of
* this type;
* may be iBase_ALL_TYPES.
* \param adj_entities (In/Out) Array of adjacent entity
* handles returned.
* \param adj_entities_allocated (In/Out) Allocated size of
* adj_entities.
* \param adj_entities_size (Out) Occupied size of
* adj_entities.
* \param offset (In/Out) Array of offsets returned.
* \param offset_allocated (In/Out) Allocated size of offset.
* \param offset_size (Out) Occupied size of offset.
* \param in_entity_set (In/Out) Array of flags returned if
* non-root entity set was input;
* (*in_entity_set)[i]=1
* indicates
* (*adj_entities)[i]
* is in the entity set.
* \param in_entity_set_allocated (In/Out) Allocated size of
* in_entity_set.
* \param in_entity_set_size (Out) Occupied size of
* in_entity_set.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getAdjEntities(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
const iBase_EntitySetHandle entity_set,
int entity_type_requestor,
int entity_topology_requestor,
int entity_type_requested,
iBase_EntityHandle **adj_entities,
int *adj_entities_allocated,
int *adj_entities_size,
int **offset,
int *offset_allocated,
int *offset_size,
int **in_entity_set,
int *in_entity_set_allocated,
int *in_entity_set_size,
int *err);
/** \brief Create an entity iterator for a given part and entity set.
* Given a local part and an entity set, return an iterator over the
* entities of the requested type and topology that are in both the
* part and the entity set.
* Iterator functionality for getNext, reset, and end is
* provided through the regular iMesh iterator functions
* iMesh_getNextEntIter, iMesh_resetEntIter, and iMesh_endEntIter,
* respectively.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entity_set (In) Entity set handle for the
* entity set being queried.
* \param requested_entity_type (In) Type of entities to include in
* the iterator.
* \param requested_entity_topology (In) Topology of entities to include
* in the iterator.
* \param entity_iterator (Out) Iterator returned from function.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_initEntIter(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
const iBase_EntitySetHandle entity_set,
const int requested_entity_type,
const int requested_entity_topology,
iBase_EntityIterator* entity_iterator,
int *err);
/** \brief Create an entity array iterator for a given part and entity set.
* Given a local part and an entity set, return an array iterator over the
* entities of the requested type and topology that are in both the
* part and the entity set.
* Iterator functionality for getNext, reset, and end is
* provided through the regular iMesh iterator functions
* iMesh_getNextEntArrIter, iMesh_resetEntArrIter, and iMesh_endEntArrIter,
* respectively.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entity_set (In) Entity set handle for the
* entity set being queried.
* \param requested_entity_type (In) Type of entities to include in
* the iterator.
* \param requested_entity_topology (In) Topology of entities to include
* in the iterator.
* \param requested_array_size (In) The number of handles returned
* in each value of the iterator.
* \param entArr_iterator (Out) Iterator returned from function.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_initEntArrIter(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
const iBase_EntitySetHandle entity_set,
const int requested_entity_type,
const int requested_entity_topology,
const int requested_array_size,
iBase_EntityArrIterator* entArr_iterator,
int *err);
/*------------------------------------------------------------------------*/
/*------------------------------------------------------------------------*/
/* Entity Functionality */
/*------------------------------------------------------------------------*/
/*------------------------------------------------------------------------*/
/** \brief Return the part ID of the part owning an entity.
*
* Given an entity handle and a partition handle, return the part ID
* of the part that owns the entity.
* Return an error code if an entity is not in the partition.
*
* COMMUNICATION: None++.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param entity (In) Entity whose owning part is to be
* returned.
* \param part_id (Out) Part ID of the part owning
* the entity.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getEntOwnerPart(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iBase_EntityHandle entity,
iMeshP_Part *part_id,
int *err);
/** \brief Return the part IDs of the parts owning the given entities.
*
* Given an array of entity handles and a partition handle, return for each
* entity handle the part ID of the part that owns the entity.
* Return an error code if an entity is not in the partition.
*
* COMMUNICATION: None++.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param entities (In) Entity whose owning part is to be
* returned.
* \param entities_size (In) Number of entities in
* entities array.
* \param part_ids (Out) Part IDs of the parts owning
* the entities.
* \param part_ids_allocated (In/Out) Allocated size of part_ids array.
* \param part_ids_size (Out) Occupied size of part_ids array.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getEntOwnerPartArr(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iBase_EntityHandle *entities,
const int entities_size,
iMeshP_Part **part_ids,
int *part_ids_allocated,
int *part_ids_size,
int *err);
/** \brief Test for entity ownership with respect to a part.
*
* Given a partition handle, a part handle, and an entity handle, return a
* flag indicating whether the entity is owned by the part.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entity (In) Entity whose ownership is being tested.
* \param is_owner (Out) Flag indicating whether the given part
* is the owner of the given entity.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_isEntOwner(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
const iBase_EntityHandle entity,
int *is_owner,
int *err);
/** \brief Test for entity ownership of many entities with respect to a part.
*
* Given a partition handle, a part handle, and an array of entity handles,
* return for each entity handle a flag indicating whether the entity
* is owned by the part.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entities (In) Entities whose ownership is
* being tested.
* \param entities_size (In) Number of entity handles in
* entities.
* \param is_owner (Out) Flag for each entity indicating
* whether the given part is the
* owner of the given entity.
* \param is_owner_allocated (In/Out) Allocated size of is_owner array.
* \param is_owner_size (Out) Occupied size of is_owner array.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_isEntOwnerArr(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
const iBase_EntityHandle *entities,
const int entities_size,
int **is_owner,
int *is_owner_allocated,
int *is_owner_size,
int *err);
/** \brief Return entity status (Internal, boundary, ghost).
*
* Given a partition handle, a part handle, and an entity handle, return a
* flag indicating whether the entity is strictly internal, is on a
* part boundary, or is a ghost with respect to the given part.
* The returned value is a member of the iMeshP_EntStatus enumerated type.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entity (In) Entity whose status is being tested.
* \param par_status (Out) Value indicating the status of the
* is the entity with respect to the part.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getEntStatus(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
const iBase_EntityHandle entity,
int *par_status,
int *err);
/** \brief Return entity status (Internal, boundary, ghost).
*
* Given a partition handle, a part handle, and an array of entity handles,
* return for each entity handle a flag indicating whether the entity is
* strictly internal, is on a part boundary, or is a ghost with respect
* to the given part.
* The returned value is a member of the iMeshP_EntStatus enumerated type.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param part (In) The part being queried.
* \param entities (In) Entities whose status is
* being tested.
* \param entities_size (In) Number of entity handles in
* entities.
* \param par_status (Out) Value for each entity indicating
* the status of the entity with
* respect to the part.
* \param par_status_allocated (In/Out) Allocated size of par_status array.
* \param par_status_size (Out) Occupied size of par_status array.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getEntStatusArr(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_PartHandle part,
const iBase_EntityHandle *entities,
const int entities_size,
int **par_status, /* enum iMeshP_EntStatus */
int *par_status_allocated,
int *par_status_size,
int *err);
/** \brief Return the number of copies of an entity that exist in the partition.
*
* Given a partition handle and an entity handle, return the number
* of copies of the entity in the partition.
* If the given entity is an owned entity or boundary entity,
* the number of copies will be complete.
* If the given entity is a ghost entity, the number of copies will be two
* (the ghost and its owner).
*
* COMMUNICATION: None++.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param entity (In) Entity whose copy info is requested.
* \param num_copies_ent (Out) Number of copies of the entity that
* exist in the partition.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getNumCopies(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iBase_EntityHandle entity,
int *num_copies_ent,
int *err);
/** \brief Return the part IDs of parts having copies of a given entity.
*
* Given a partition handle and an entity handle, return the part IDs
* of copies of the entity in the partition.
* If the given entity is an owned entity or boundary entity,
* the number of copies considered will be complete.
* If the given entity is a ghost entity, the number of copies considered
* will be two (the ghost and its owner).
*
* COMMUNICATION: None++.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param entity (In) Entity whose copy info
* is requested.
* \param part_ids (Out) Part IDs of parts having copies
* of the given entity.
* \param part_ids_allocated (In/Out) Allocated size of part_ids array.
* \param part_ids_size (Out) Occupied size of part_ids array.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getCopyParts(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iBase_EntityHandle entity,
iMeshP_Part **part_ids,
int *part_ids_allocated,
int *part_ids_size,
int *err);
/** \brief Get (remote) entity handles of copies of a given entity.
*
* Given a partition handle and an entity handle, return (remote) entity
* handles and part IDs of all copies of the entity.
* If the given entity is an owned entity or boundary entity,
* the number of copies considered will be complete.
* If the given entity is a ghost entity, the number of copies considered
* will be two (the ghost and its owner).
*
* COMMUNICATION: None++.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param entity (In) Entity whose copy info
* is requested.
* \param part_ids (Out) Part IDs of parts having copies
* of the given entity.
* \param part_ids_allocated (In/Out) Allocated size of part_ids array.
* \param part_ids_size (Out) Occupied size of part_ids array.
* \param copies (Out) (Remote) entity handles of the
* entity copies.
* \param copies_allocated (In/Out) Allocated size of copies.
* \param copies_size (Out) Occupied size of copies.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getCopies(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iBase_EntityHandle entity,
iMeshP_Part **part_ids,
int *part_ids_allocated,
int *part_ids_size,
iBase_EntityHandle **copies,
int *copies_allocated,
int *copies_size,
int *err);
/** \brief Get the entity handle of a copy of a given entity in a given part.
*
* Given a partition handle, an entity handle and a part ID,
* return the (remote) entity handle of the copy of the entity in that part.
* Return an error if the entity does not exist in the specified part.
*
* COMMUNICATION: None++.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param entity (In) Entity whose copy info
* is requested.
* \param part_id (In) Part ID of part whose copy
* of the given entity is requested.
* \param copy_entity (Out) (Remote) entity handle of the
* entity copy from the given part.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getCopyOnPart(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iBase_EntityHandle entity,
const iMeshP_Part part_id,
iBase_EntityHandle *copy_entity,
int *err);
/** \brief Get the entity handle of a copy of a given entity in its owner part.
*
* Given a partition handle and an entity handle, return the (remote)
* entity handle of the copy of the entity in its owner part.
*
* COMMUNICATION: None++.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param entity (In) Entity whose copy info
* is requested.
* \param owner_part_id (Out) Part ID of the entity's owner part.
* \param owner_entity (Out) (Remote) entity handle of the
* entity copy from the owner part.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_getOwnerCopy(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iBase_EntityHandle entity,
iMeshP_Part *owner_part_id,
iBase_EntityHandle *owner_entity,
int *err);
/*------------------------------------------------------------------------*/
/*------------------------------------------------------------------------*/
/*------- COMMUNICATION ----------*/
/*------------------------------------------------------------------------*/
/*------------------------------------------------------------------------*/
/**\brief Wait for a specific iMeshP request to complete.
*
* Given an iMeshP_RequestHandle, wait for the request to complete.
*
* COMMUNICATION: Blocking point-to-point.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param request (In) iMeshP request for whose completion
* we should wait.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_waitForRequest(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
iMeshP_RequestHandle request,
int *err);
/**\brief Wait for any of the specified iMeshP requests to complete.
*
* Given an array of iMeshP_RequestHandles, wait for any one of the requests
* to complete.
*
* COMMUNICATION: Blocking point-to-point.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param requests (In) iMeshP requests for which we wait
* until one request completes.
* \param requests_size (In) Number of requests in requests.
* \param index (Out) Index of the request that completed.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_waitForAnyRequest(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
iMeshP_RequestHandle *requests,
int requests_size,
int *index,
int *err);
/**\brief Wait for all of the specified iMeshP requests to complete.
*
* Given an array of iMeshP_RequestHandles, wait for all of the requests
* to complete.
*
* COMMUNICATION: Blocking point-to-point.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param requests (In) iMeshP requests for which we wait
* until completion.
* \param requests_size (In) Number of requests in requests.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_waitForAllRequests(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
iMeshP_RequestHandle *requests,
int requests_size,
int *err);
/**\brief Wait for a specific request to complete; return entities received.
*
* Given an iMeshP_RequestHandle, wait for the request to complete. Return
* entities for which information was received.
*
* COMMUNICATION: Blocking point-to-point.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param request (In) iMeshP request for whose completion
* we should wait.
* \param out_entities (Out) Entities for which information was
* received.
* \param out_entities_allocated (In/Out) Allocated size of out_entities.
* \param out_entities_size (Out) Occupied size of out_entities.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_waitForRequestEnt(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
iMeshP_RequestHandle request,
iBase_EntityHandle **out_entities,
int *out_entities_allocated,
int *out_entities_size,
int *err);
/**\brief Test whether a specific request has completed.
*
* Given an iMeshP_RequestHandle, test whether the request has completed.
* This function will not wait until the request completes; it will only
* return the completion status (complete = 1 or 0).
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param request (In) iMeshP request for whose completion
* we should test.
* \param completed (Out) Flag indicating whether (1) or
* not (0) the given request has
* completed.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_testRequest(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
iMeshP_RequestHandle request,
int *completed,
int *err);
/** \brief Poll for outstanding requests.
*
* Check for outstanding requests from other parts, handle any requests
* found, and return an array of requests that have been handled. If
* the array has a size allocated already, then the implementation stops
* working when it has generated that many completed requests, even if there
* are more requests waiting.
*
* COMMUNICATION: non-blocking; point-to-point.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition being queried.
* \param requests_completed (Out) Requests that were completed.
* \param requests_completed_allocated (In/Out) Allocated size of
* requests_completed.
* \param requests_completed_size (Out) Occupied size of
* requests_completed.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_pollForRequests(
iMesh_Instance instance,
iMeshP_PartitionHandle partition,
iMeshP_RequestHandle **requests_completed,
int *requests_completed_allocated,
int *requests_completed_size,
int *err);
/*--------------------------------------------------------------------
------- Requests for off-processor mesh modification -------
--------------------------------------------------------------------*/
/** \brief Add entities to on-process and/or off-process parts.
*
* Given a partition and a list of entities, add those entities to the
* target parts. The entities can be added as copies or migrated entirely
* (i.e., change ownership of the entities)
* to the parts. The entities' downward adjacencies are also copied and/or
* migrated as appropriate to support the entities.
* This function is a collective, non-blocking operation
* to be called by all processes in the partition's communicator.
* An iMeshP_RequestHandle is returned; any of the
* iMeshP_wait* functions can be used to block until the request is completed.
*
* COMMUNICATION: Collective. Non-blocking.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) Handle for the partition being queried.
* \param entities (In) Entities to be sent.
* \param entities_size (In) Number of entities to be sent.
* \param target_part_ids (In) Array of size entities_size listing
* the parts to which the entities should
* be sent.
* \param command_code (In) Flag indicating whether to migrate
* the entities or only make copies.
* \param update_ghost (In) Flag indicating whether (1) or not (0)
* ghost copies of the entities should be
* updated with new owner information.
* \param request (Out) iMeshP RequestHandle returned; can be used
* for blocking until this send is complete.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_exchEntArrToPartsAll(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iBase_EntityHandle *entities,
const int entities_size,
const iMeshP_Part *target_part_ids,
int command_code,
int update_ghost,
iMeshP_RequestHandle *request,
int *err);
/** \brief Request in-migration of an entity and its upward adjacencies.
*
* This function is a "pull" migration, where a part requests to become the
* owner of an entity that is owned by another part (so that the part has
* the right to modify the entity). The requested
* entity must be on the part boundary and is identified by a local handle
* (i.e., an entity part-boundary copy). This operation may require multiple
* rounds of communication, and at some times, certain entities may be
* locked (unavailable for local modification) while info about their
* remote copies is still in question. Tags and parallel set membership
* are migrated as well as the appropriate adjacency info.
* An iMeshP request handle is returned.
*
* COMMUNICATION: point-to-point, non-blocking, pull.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param part (In) The part to which the entity is migrated.
* \param local_entity (In) The local entity copy for the entity to be
* migrated.
* \param request (Out) The iMeshP request handle returned.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_migrateEntity(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
iMeshP_PartHandle part,
iBase_EntityHandle local_entity,
iMeshP_RequestHandle *request,
int *err);
/** \brief Update vertex coordinates for vertex copies.
*
* For a given vertex, update its copies with the vertex's coordinates.
* This function assumes that a local vertex's coordinates were updated
* through a call to iMesh_setVtxCoords. This function then updates all
* copies of the vertex with the updated coordinates.
* The communication here is push-and-forget; as such,
* no request handle needs to be returned.
*
* COMMUNICATION: point-to-point, non-blocking, push-and-forget.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param local_vertex (In) The vertex whose copies should be updated.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_updateVtxCoords(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iBase_EntityHandle local_vertex,
int *err);
/** \brief Replace entities on the part boundary.
*
* This function performs changes on the part boundary where the
* calling application can ensure that things are done
* identically on both sides and that the arguments are passed in an order
* that can be matched. (Specifically, matching new entities should appear in
* the same order in the call array.) An example is creation of new
* boundary edges during edge splitting.
* Communication here could be a
* two-way push-and-forget, or some variant on push-and-confirm.
* CHANGES: At Onkar's suggestion, added an offset array (similar to array
* adjacency requests) so that a single call can easily handle coordination
* with multiple entities on part-boundary.
*
* COMMUNICATION: point-to-point, non-blocking, push-and-forget.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param old_entities (In) The entities to be replaced.
* \param old_entities_size (In) The number of entities to be replaced.
* \param new_entities (In) The entities that replace the old entities.
* \param new_entities_size (In) The number of entities in new_entities.
* \param offset (In) Index into new_entities; old_entities[i]
* is replaced by new_entities[offset[i]] to
* new_entities[offset[i+1]-1].
* \param offset_size (In) The number of entries in offset.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_replaceOnPartBdry(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iBase_EntityHandle *old_entities,
const int old_entities_size,
const iBase_EntityHandle *new_entities,
const int new_entities_size,
const int *offset,
const int offset_size,
int *err);
/** \brief Push ghost copies of individual entities onto other parts.
*
* Given an entity and a target part, create a ghost copy of the entity on
* the target part.
*
* Communication here is push-and-confirm (so that the original knows remote
* entity handle of the created ghosts). The closure of a new ghost is pushed
* automatically as part of the underlying communication.
*
* COMMUNICATION: point-to-point, non-blocking, push.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param target_part_id (In) The part to receive the new ghost.
* \param entity_to_copy (In) The entity to be copied in target_part_id.
* \param request (Out) The iMeshP request handle returned.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_addGhostOf(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_Part target_part_id,
iBase_EntityHandle entity_to_copy,
iMeshP_RequestHandle *request,
int *err);
/** \brief Remove ghost copies of individual entities from other parts.
*
* Given an entity and a target part, remove the ghost copy of the entity on
* the target part.
*
* Communication is push-and-forget; as such, no request handle is needed.
* The remote part will clean up the closure of the removed ghost
* as appropriate during deletion.
*
* COMMUNICATION: point-to-point, non-blocking, push-and-forget.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param target_part_id (In) The part to lose the ghost.
* \param copy_to_purge (In) The entity whose ghost is removed from
* target_part_id.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_rmvGhostOf(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iMeshP_Part target_part_id,
iBase_EntityHandle copy_to_purge,
int *err);
/** \brief Indicate completion of mesh modification.
*
* Calling this function indicates that the user is finished with mesh
* modification for now. With mesh modification complete, the implementation
* can update ghost, partition, boundary, and other information to
* re-establish a valid distributed mesh. This function waits for all
* message traffic to clear and rebuilds ghost information that was
* allowed to go obsolete during mesh modification.
*
* COMMUNICATION: collective.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_syncMeshAll(
iMesh_Instance instance,
iMeshP_PartitionHandle partition,
int *err);
/*--------------------------------------------------------------------------*/
/* Functions to send Tag data from owning entities to copies. */
/*--------------------------------------------------------------------------*/
/**\brief Synchronously send tag data for given entity types and topologies.
*
* Send tag information for shared entities of specified type and
* topology. The tag data is "pushed" from the owner entities to all copies.
* This version operates on all shared entities of specified type and topology
* (or all types/topologies if iBase_ALL_TYPES/iMesh_ALL_TOPOLOGIES are
* given). This function assumes tag handles given on various
* calling parts are consistent; i.e. they have the same name,
* data type, size, etc. This call blocks until communication is
* completed.
*
* COMMUNICATION: point-to-point, blocking.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param source_tag (In) Tag handle for the sending entities.
* \param dest_tag (In) Tag handle for the receiving entities.
* \param entity_type (In) Tag data is exchanged only for this
* entity type.
* \param entity_topo (In) Tag data is exchanged only for this
* entity topology.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_pushTags(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
iBase_TagHandle source_tag,
iBase_TagHandle dest_tag,
int entity_type,
int entity_topo,
int *err);
/**\brief Synchronously send tag data for individual entities.
*
* Send tag information for the specified entities.
* The tag data is "pushed" from the owner entities to all copies.
* This function assumes tag handles given on various
* calling parts are consistent; i.e. they have the same name,
* data type, size, etc. This call blocks until communication is
* completed.
*
* COMMUNICATION: point-to-point, blocking.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param source_tag (In) Tag handle for the sending entities.
* \param dest_tag (In) Tag handle for the receiving entities.
* \param entities (In) Owned entities for which to send data.
* \param entities_size (In) The number of entities for which to send data.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_pushTagsEnt(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
iBase_TagHandle source_tag,
iBase_TagHandle dest_tag,
const iBase_EntityHandle *entities,
int entities_size,
int *err);
/**\brief Asynchronously send tag data for given entity types and topologies.
*
* Send tag information for shared entities of specified type and
* topology. The tag data is "pushed" from the owner entities to all copies.
* This version operates on all shared entities of specified type and topology
* (or all types/topologies if iBase_ALL_TYPES/iMesh_ALL_TOPOLOGIES are
* given). This function assumes tag handles given on various
* calling parts are consistent; i.e. they have the same name,
* data type, size, etc.
* This call does not block; applications should call
* iMeshP_waitForRequest (or a similar wait function)
* to block until this push is completed.
*
* COMMUNICATION: point-to-point, non-blocking.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param source_tag (In) Tag handle for the sending entities.
* \param dest_tag (In) Tag handle for the receiving entities.
* \param entity_type (In) Tag data is exchanged only for this
* entity type.
* \param entity_topo (In) Tag data is exchanged only for this
* entity topology.
* \param request (Out) The iMeshP request handle returned.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_iPushTags(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
iBase_TagHandle source_tag,
iBase_TagHandle dest_tag,
int entity_type,
int entity_topo,
iMeshP_RequestHandle *request,
int *err);
/**\brief Asynchronously send tag data for individual entities.
*
* Send tag information for the specified entities.
* The tag data is "pushed" from the owner entities to all copies.
* This function assumes tag handles given on various
* calling parts are consistent; i.e. they have the same name,
* data type, size, etc.
* This call does not block; applications should call
* iMeshP_waitForRequest (or a similar wait function)
* to block until this push is completed.
*
* COMMUNICATION: point-to-point, non-blocking.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being queried.
* \param source_tag (In) Tag handle for the sending entities.
* \param dest_tag (In) Tag handle for the receiving entities.
* \param entities (In) Owned entities for which to send data.
* \param entities_size (In) The number of entities for which to send data.
* \param request (Out) The iMeshP request handle returned.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_iPushTagsEnt(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
iBase_TagHandle source_tag,
iBase_TagHandle dest_tag,
const iBase_EntityHandle *entities,
int entities_size,
iMeshP_RequestHandle *request,
int *err);
/*------------------------------------------------------------*
* GHOSTING *
*------------------------------------------------------------*/
/* \brief Create ghost entities between parts.
*
* Ghost entities are specified similar to 2nd-order adjacencies, i.e.,
* through a "bridge" dimension. The number of layers is measured from
* the inter-part interfaces. For example, to get two layers of region
* entities in the ghost layer, measured from faces on the interface,
* use ghost_dim=3, bridge_dim=2, and num_layers=2.
* The number of layers specified is with respect to the global mesh;
* that is, ghosting may extend beyond a single neighboring processor if the
* number of layers is high.
*
* Ghost information is cached in the partition.
* The triplet describing a ghosting "rule" (ghost dim, bridge dim, #
* layers) is stored in the partition; ghosting that became incorrect
* due to mesh modification or redistribution of mesh entities is
* re-established using these rules by the end
* of iMeshP_syncPartitionAll and iMeshP_syncMeshAll.
* Implementations can choose to keep ghosting consistent throughout
* mesh modification, but ghosts are not required to be consistent until
* the end of these two functions.
* iMeshP_createGhostEntsAll is cumulative; that is, multiple calls can only
* add more ghosts, not eliminate previous ghosts.
*
* COMMUNICATION: Collective. Blocking.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition in which to create ghosts.
* \param ghost_type (In) Entity type of entities to be ghosted.
* \param bridge_type (In) Entity type through which bridge
* adjacencies are found.
* \param num_layers (In) Number of layers of ghost entities.
* \param include_copies (In) Flag indicating whether to create ghosts
* of non-owned part boundary entities
* (YES=1, NO=0).
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_createGhostEntsAll(
iMesh_Instance instance,
iMeshP_PartitionHandle partition,
int ghost_type,
int bridge_type,
int num_layers,
int include_copies,
int *err);
/* \brief Delete all ghost entities between parts.
*
* Given a partition, delete all ghost entities in that partition of the mesh.
*
* COMMUNICATION: Collective. Blocking.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition from which to delete ghosts.
* \param err (Out) Error code.
*
*/
IMESH_EXPORT
void iMeshP_deleteGhostEntsAll(
iMesh_Instance instance,
iMeshP_PartitionHandle partition,
int *err);
/** \brief Return information about all ghosting on a partition.
*
* Return the ghosting rules established through calls to
* iMeshP_createGhostEntsAll.
*
* COMMUNICATION: None.
*
* \param instance (In) Mesh instance containing the
* partition.
* \param partition (In) The partition to be queried.
* \param ghost_rules_allocated (In/Out) Allocated size of ghost_type,
* bridge_type and num_layers.
* \param ghost_rules_size (Out) Occupied size of ghost_type,
* bridge_type and num_layers;
* equal to the number of ghosting
* rules currently registered in
* the partition.
* \param ghost_type (Out) Entity type of ghost entities
* for each rule.
* \param bridge_type (Out) Entity type of bridge entities
* for each rule.
* \param num_layers (Out) Number of layers of ghosts in each
* rule.
* \param err (Out) Error code.
*/
IMESH_EXPORT
void iMeshP_ghostEntInfo(
const iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
int *ghost_rules_allocated,
int *ghost_rules_size,
int **ghost_type,
int **bridge_type,
int **num_layers,
int *err);
/*--------------------------------------------------------------------------
FILE I/O
--------------------------------------------------------------------------*/
/* iMeshP file I/O closely aligns with iMesh file I/O. The major
* change is the addition of a iMeshP_PartitionHandle argument to both
* iMeshP_loadAll and iMeshP_saveAll, enabling I/O from parallel processes.
* For now, individual implementations will support different sets of
* options; Tim and Ken will work to unify the options by SC08.
*/
/** \brief Populate a mesh instance and a partition by reading data from files.
*
* Before calling iMeshP_loadAll, the application creates both a mesh
* instance and a partition handle. iMeshP_loadAll then reads the
* specified file, inserts entities into the mesh instance, constructs
* parts within the partition, and inserts entities into the parts.
* Options allow n>=1 files on p processes.
* Optional capabilities of iMeshP_loadAll include computing an initial
* partition (e.g., if a serial mesh file without part assignments is read)
* and creating ghost entities as requested by the application; the
* availability of these options is implementation dependent.
*
* COMMUNICATION: Collective.
*
* \param instance (In) Mesh instance to contain the data.
* \param partition (In) The newly populated partition.
* \param entity_set (In) Set to which the mesh will be added.
* \param name (in) File name from which mesh data is read.
* \param options (In) Implementation-specific options string.
* \param err (Out) Error code.
* \param name_len (In) Length of the file name character string.
* \param options_len (In) Length of the options character string.
*/
IMESH_EXPORT
void iMeshP_loadAll(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iBase_EntitySetHandle entity_set,
const char *name,
const char *options,
int *err,
int name_len,
int options_len);
/** \brief Write data from a mesh instance and a partition to files.
*
* iMeshP_saveAll writes mesh and partition data to the specified file.
* Options allow n>=1 files on p processes.
*
* COMMUNICATION: Collective.
*
* \param instance (In) Mesh instance containing the partition.
* \param partition (In) The partition being saved.
* \param entity_set (In) Set from which data will be saved.
* \param name (in) File name to which mesh data is written.
* \param options (In) Implementation-specific options string.
* \param err (Out) Error code.
* \param name_len (In) Length of the file name character string.
* \param options_len (In) Length of the options character string.
*/
IMESH_EXPORT
void iMeshP_saveAll(
iMesh_Instance instance,
const iMeshP_PartitionHandle partition,
const iBase_EntitySetHandle entity_set,
const char *name,
const char *options,
int *err,
const int name_len,
int options_len);
/*
------------------------------------------------
Major Items left to do:
- Support for multiple partitions.
We discussed designating a given partition as
the "active" partition; i.e., the partition that is actually used in
the distribution of mesh data in distributed memory. We were concerned
that when multiple partitions were used, multiple copies of mesh
entities would be needed to fully support multiple partitions at the
same time. Designating one partition as "active" would store data
with respect to only one partition.
- File I/O support.
Need common set of options to allow interoperability.
Support single files, N << P files on P processes, and P files.
Support reading and writing partition information.
Support initial parallel partitioning of serial file data.
Support storing mapping of parts to processes in files.
------------------------------------------------
Minor Items left to do:
- Determine which capabilities need both "getNumX" and "getX" functions.
That is, when would an application need "getNumX" to allocate memory
for "getX" or separately from "getX". When could we use only "getX"
and return numX as a byproduct.
- Determine with functions need "Ent" and "EntArr" versions, or whether
we should adopt only the more general "EntArr" version.
- Determine whether to revise iMeshP_createPartition to make it less MPI
specific. We don't want to require applications to link with MPI if the
implementation doesn't require it. We may define an ITAPS_Comm name
typedef'ed appropriately.
- iMeshP_getOwnerCopy could be achieved by calling iMeshP_getOwnerPart
followed by iMeshP_getCopyOnPart. Do we want to keep iMeshP_getOwnerCopy?
- Need function to receive tag data from part-boundary entities in owner.
Possible options: return the tag data values received directly, or
include a mathematical operation (similar to MPI_SUM). 9/15/08
------------------------------------------------
Comments and resolved questions:
- Applications will build partitions by (1) creating a partition handle
on each process to be included in the partition; (2) adding parts to
the partition handle within the process; (3) populating the parts with
entities, and (4) calling iMeshP_syncPartitionAll to allow the
implementation to compute global data for the partition.
- For now, we will not include an iterator over local (to a
process) parts within a partition. If it is needed, it can be added
later.
- We will not provide capability to move entire parts to new
processes; instead, the new process must create the part in its
partition handle and then receive (perhaps in bulk) the entities to
populate the part. In other words, parts can be added to only a local
partition handle.
- Currently, iMesh doesn't have the functionality to get entities or
entity sets by type and tag in serial. Should it?
Many people said it would be useful; others said it could be costly
(in parallel) or numerically difficult (for floating point values).
This issue is an iMesh issue, not a parallel interface issue, so
for this document, the issue is resolved. The resolution: If
iMesh adopts this capability, we will add it to the
parallel interface.
- We will not include functions that return all entities with
given characteristics within a partition; the memory use of these
functions can be large. Instead, we will return entity information
with respect to parts and/or mesh instances. If the user wants such
info, he should go through the mechanics of gathering it himself so
that he is painfully aware of how much memory he is allocating.
Removed the following global queries:
+ All tag names over the partition;
+ All entities in this partition having a given type, tag and/or
tag name.
+ All entity sets in this partition having a given
type, tag and/or tag name.
- We will not include functions that return information about each
part and/or process in a partition. Such functions limit memory
scalability for large numbers of parts. If the user wants such
info, he should go through the mechanics of gathering it himself so
that he is painfully aware of how much memory he is allocating.
Removed the following global queries:
+ The number of entities in each part of the partition;
+ The number of entity sets in each part of the partition;
+ The number of entities with given type, tag, and/or
tag name in each part of the partition;
+ The number of entity sets with given type, tag,
and/or tag name in each part of the partition;
+ All tag names in each part of the partition;
- For functions that replace a set handle with a part handle, return
all appropriate entities in a part, whether they are owned or are
copies. The application can test for ownership if needed.
- Part assignments computed with respect to a set of
entities induce part assignments to adjacent entities in an
implementation-dependent fashion. That is, if a partition is computed
with respect to regions, queries about ownership of faces and vertices
are valid.
------------------------------------------------
Discussed but unresolved questions:
- We discussed adding functions that give
hints to an implementation about which data mappings the application
will use, allowing the implementation to pre-compute them if it chooses
to. The example discussed was mapping between entities and parts, but
other examples in iMesh may also exist.
- We discussed adding an iterator over entities
with given type/topology in a set or part. We have non-iterator
functionality, but not an iterator.
KDD: Is this true? What is iMesh_initEntIter (and its analogous
KDD: iMeshP_initEntIter)?
- We discussed storing in a partition
information about which "objects" were used in computing the partition.
These objects can be single entities or groups of entities.
KDD: Perhaps this capability should be part of the load-balancing service.
- We discussed designating a given partition as
the "active" partition; i.e., the partition that is actually used in
the distribution of mesh data in distributed memory. We were concerned
that when multiple partitions were used, multiple copies of mesh
entities would be needed to fully support multiple partitions at the
same time. Designating one partition as "active" would store data
with respect to only one partition.
------------------------------------------------
Not-yet-discussed, unresolved questions
Entity questions:
- From Carl: "getTag*Operate: Again, we haven't got this in serial. Does
the existence of such operations imply that we expect to implement
fields as tags? (Because that wasn't what I was assuming about field
implementations at all, personally...) Note that I'm not opposed to
this sort of global reduction operation, I just wonder whether it'll see
use outside of field-like situations. If not, then it should be in
parallel fields, not parallel mesh, and usage for
fields-implemented-as-tags should be handled there."
*/
/*--------------------------------*/
/* NOTES FROM BOOTCAMP MARCH 2008 */
/*--------------------------------*/
/*
- Changed getPartRank to getRankOfPart. (Carl)
- Made sure iMeshP_getNumOfTypeAll and iMeshP_getNumOfTopoAll were
documented as collective operations. (Carl)
- Changed suffix "Par" to "All". (Lori)
- Added iMeshP_testPart() to test status of part handle, returning
LOCAL, REMOTE, or INVALID. (Mark M, Lori).
6/25/08: Removed this function since part handles can no longer be remote.
If an application wants to test the validity of a part handle, it can try
to compute its Part ID.
- Changed iMeshP_addCopyOf and iMeshP_rmvCopyOf back to
iMeshP_addGhostOf and iMeshP_rmvGhostOf. If we wanted to use these
functions for adding boundary copies, we'd have to include a list of
already existing remote copies in the arguments, as well as
communicate with parts already owning copies to let them know a ghost
copy has been made. Actually, this raises an interesting question:
does a boundary copy need to know about all ghost copies of it?
- Change getEntParStatus to getEntStatus. (Lori)
- Changed sendEntArrToPartsPar to exchEntArrToPartsAll. (Lori,Tim)
Parts and Processes:
- Martin argued for consecutive unique Part IDs in addition to or
instead of Part handles. He will send use cases. If we decide to
add them back to the interface, we could compute them in
iMeshP_syncPartitionAll rather than in iMeshP_createPart. That is, an
application couldn't access them until after iMeshP_syncPartitionAll.
6/25/08: On follow-up, Martin couldn't recall why having consecutive
PartIDs was necessary. While we all agree they are conceptually nice,
they are difficult to implement and not really necessary. Part IDs will
be globally unique but not necessarily consecutive.
- Are part handles globally unique? They probably need to be
globally unique in order for them to be useful as remote part
handles. Also, does the process rank need to be encoded in the part
handle in order to map from parts to processes for communication?
6/25/08: DECIDED: We will have globally unique part IDs. Part handles
will be valid for only local parts. Accessing remote parts must be done
via Part IDs.
- If in iMeshP_syncPartitionAll, we computed a mapping from part
handles to integers 0,..., k-1, we could store only ranges of
integers to achieve the part-to-process and process-to-parts mappings;
this would require O(P) storage per process for P processes.
6/5/08: DECIDED: Do not need getPartOnRank or getNumPartOnRank. These
functions were troublesome due to their storage or communication requirements.
We decided to remove them.
- Alternatively, the mapping of all parts to processes can be stored
in O(k) total memory, distributed across processors (e.g., a
distributed data directory) but interrogating the directory requires
communication.
6/5/08: See note above.
- iMeshP_getPartsOnRank created discussion and needs to be resolved.
IMeshP_getPartsOnRank would likely require either O(k) storage per
process for k parts or communication. For other points, please see
Mark M's 3/12/08 email.
6/5/08: See note above.
CreateEnt:
- Carl asked if we should have a version of createEnt that accepts a
part handle. Should this function be used only for creating owned
entities? How do you envision creating part boundary entities when a
parallel mesh is initially loaded?
Ghost entities:
- We currently have a mechanism only for pushing ghosts onto other
parts. Will we want a mechanism for pulling them, too? (E.g., a
part says, "I want ghosts for this entity.")
PartNbor functions:
- Did we agree to remove the entity type from these functions? That
is, do we want to return the part IDs for all parts that have
any copies? The entity type was intended to allow us to get the part
IDs for all parts that have copies of a given type (perhaps
ALL_TYPES).
Functions handling both Parts and Entity Sets:
- Tim said these function names (e.g., iMeshP_getNumOfType,
iMeshP_getAllVtxCoord) are too close to existing iMesh function
names, even though the argument lists would be different. He agreed
to email suggestions for better names.
Copies:
- Functions getNumCopies, getCopies, getCopyParts, and getCopyOnPart
have different behavior for ghost and part-boundary copies. Ghosts
will return only itself and its owner in getCopies; part-boundary
entities will return copies on other parts as well.
- Tim envisions applications (e.g., FETI methods) updating tag data
in their copies that they would like to accumulate back to the
owner. Xiaolin said that he writes in his ghosts, but doesn't send
those values back to the owner. Currently, we have the ability
to send tag data only from owners to ghosts. Tim will look at this issue
and propose a solution.
Communication:
- Although we should think in terms of parts, communication really
occurs with respect to processes. We need to make sure all our
communication routines make sense with respect to both processes and
parts, and perhaps, revise their descriptions. Also, if we send to
parts, the implementation really does need the mapping of parts to
processes.
Entity Owner/Status Queries:
- Should we combine functions getEntOwnerPart and getEntStatus into
one function? Or should we combine functions getOwnerCopy and
getEntOwner into one function? Or should we remove getOwnerCopy and
make applications call getOwnerPart followed by getCopyOnPart?
Reducing storage:
- Mark Miller proposed allowing the user to specify the amount of
copying done by the implementation, depending on applications' needs.
For example, for a static viz application, every boundary entity may not
need to know where all its copies are, so the implementation would not
have to store them. Can the implementations accept a flag advising them how
much copying is needed? If so, the implementations could choose to
optimize storage or ignore the flag.
*/
/*--------------------------------------------------------------------
* SVN File Information
*
* $SVN:Author$
* $SVN:Date$
* $SVN:Revision$
*--------------------------------------------------------------------
*/
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* !defined(_ITAPS_iMeshP) */
| 44.279113 | 95 | 0.595957 |
07d68cc8511f05b2fe3ee4e7ccd0a5574bd8e1cc | 720 | kt | Kotlin | smart-starter/log-spring-boot-starter/src/main/kotlin/com/smart/starter/log/SmartLogAutoConfiguration.kt | ming4762/smart_boot | 399b48a5e40b080c1fa1591102c0e2562f9d9ac3 | [
"MIT"
] | 5 | 2019-08-22T10:29:28.000Z | 2021-01-12T10:46:04.000Z | smart-starter/log-spring-boot-starter/src/main/kotlin/com/smart/starter/log/SmartLogAutoConfiguration.kt | ming4762/smart_boot | 399b48a5e40b080c1fa1591102c0e2562f9d9ac3 | [
"MIT"
] | null | null | null | smart-starter/log-spring-boot-starter/src/main/kotlin/com/smart/starter/log/SmartLogAutoConfiguration.kt | ming4762/smart_boot | 399b48a5e40b080c1fa1591102c0e2562f9d9ac3 | [
"MIT"
] | null | null | null | package com.smart.starter.log
import com.smart.starter.log.aspect.LogAspect
import org.mybatis.spring.annotation.MapperScan
import org.springframework.boot.context.properties.EnableConfigurationProperties
import org.springframework.context.annotation.Bean
import org.springframework.context.annotation.ComponentScan
import org.springframework.context.annotation.Configuration
/**
* 日志配置类
* @author ming
* 2019/6/28 下午4:01
*/
@Configuration
@EnableConfigurationProperties(LogProperties :: class)
@ComponentScan
@MapperScan("com.smart.starter.log.mapper", sqlSessionFactoryRef = "systemSqlSessionFactory")
class SmartLogAutoConfiguration {
/**
* 创建日志切面
*/
@Bean
fun logAspect() = LogAspect()
} | 27.692308 | 93 | 0.793056 |
92dd06906ef65327125bf092407b23de79edbe3c | 365 | h | C | PrivateFrameworks/CorePDF.framework/UIPDFPopupAnnotationView.h | shaojiankui/iOS10-Runtime-Headers | 6b0d842bed0c52c2a7c1464087b3081af7e10c43 | [
"MIT"
] | 36 | 2016-04-20T04:19:04.000Z | 2018-10-08T04:12:25.000Z | PrivateFrameworks/CorePDF.framework/UIPDFPopupAnnotationView.h | shaojiankui/iOS10-Runtime-Headers | 6b0d842bed0c52c2a7c1464087b3081af7e10c43 | [
"MIT"
] | null | null | null | PrivateFrameworks/CorePDF.framework/UIPDFPopupAnnotationView.h | shaojiankui/iOS10-Runtime-Headers | 6b0d842bed0c52c2a7c1464087b3081af7e10c43 | [
"MIT"
] | 10 | 2016-06-16T02:40:44.000Z | 2019-01-15T03:31:45.000Z | /* Generated by RuntimeBrowser
Image: /System/Library/PrivateFrameworks/CorePDF.framework/CorePDF
*/
@interface UIPDFPopupAnnotationView : UIPDFAnnotationView {
UIImage * _image;
}
- (void)dealloc;
- (void)drawRect:(struct CGRect { struct CGPoint { double x_1_1_1; double x_1_1_2; } x1; struct CGSize { double x_2_1_1; double x_2_1_2; } x2; })arg1;
@end
| 28.076923 | 150 | 0.742466 |
65489ab1059af5d3af74f3912af4a5da4c39124a | 383 | py | Python | las1.2.py | Theskill19/sweetpotato | 7cb46c412f400bcd51838db365038a766cf593cd | [
"CC0-1.0"
] | null | null | null | las1.2.py | Theskill19/sweetpotato | 7cb46c412f400bcd51838db365038a766cf593cd | [
"CC0-1.0"
] | null | null | null | las1.2.py | Theskill19/sweetpotato | 7cb46c412f400bcd51838db365038a766cf593cd | [
"CC0-1.0"
] | null | null | null | #2. Пользователь вводит время в секундах.
# Переведите время в часы, минуты и секунды и выведите в формате чч:мм:сс.
# Используйте форматирование строк.
time = int(input("Введите время в секундах "))
hours = time // 3600
minutes = (time - hours * 3600) // 60
seconds = time - (hours * 3600 + minutes * 60)
print(f"Время в формате чч:мм:сс {hours} : {minutes} : {seconds}") | 42.555556 | 75 | 0.681462 |
122193135cc10e4aec11ef8099bd14d369d6285f | 2,316 | h | C | chrome/browser/tab_contents/web_contents_collection.h | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 575 | 2015-06-18T23:58:20.000Z | 2022-03-23T09:32:39.000Z | chrome/browser/tab_contents/web_contents_collection.h | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | chrome/browser/tab_contents/web_contents_collection.h | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 52 | 2015-07-14T10:40:50.000Z | 2022-03-15T01:11:49.000Z | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_TAB_CONTENTS_WEB_CONTENTS_COLLECTION_H_
#define CHROME_BROWSER_TAB_CONTENTS_WEB_CONTENTS_COLLECTION_H_
#include <memory>
#include "base/containers/flat_map.h"
#include "content/public/browser/web_contents_observer.h"
namespace content {
class WebContents;
} // namespace content
// Utility class for receiving `WebContentsObserver` callbacks from sets of
// `WebContents`. Manages a set of `WebContentsObserver` which forward their
// callbacks annotated with the WebContents they occurred in to an observer. The
// collection ensures that observer lifetimes are properly handled.
class WebContentsCollection {
public:
class Observer {
public:
// Observer callbacks that will be fired from each web contents being
// watched in `web_contents_observers_`.
virtual void WebContentsDestroyed(content::WebContents* web_contents) {}
virtual void RenderProcessGone(content::WebContents* web_contents,
base::TerminationStatus status) {}
virtual void NavigationEntryCommitted(
content::WebContents* web_contents,
const content::LoadCommittedDetails& load_details) {}
protected:
virtual ~Observer() = default;
};
// `observer` must outlive `this`.
explicit WebContentsCollection(Observer* observer);
~WebContentsCollection();
// Start forwarding `WebContentsObserver` calls from `web_contents` to
// `observer_`.
void StartObserving(content::WebContents* web_contents);
// Stops `observer_` from receiving calls from `web_contents`.
void StopObserving(content::WebContents* web_contents);
private:
class ForwardingWebContentsObserver;
void WebContentsDestroyed(content::WebContents* web_contents);
// Observer which will receive callbacks from any of the `WebContentsObserver`
// in `web_contents_observers_`.
Observer* const observer_;
// Map of observers for the WebContents part of this collection.
base::flat_map<content::WebContents*,
std::unique_ptr<ForwardingWebContentsObserver>>
web_contents_observers_;
};
#endif // CHROME_BROWSER_TAB_CONTENTS_WEB_CONTENTS_COLLECTION_H_
| 35.630769 | 80 | 0.760794 |
3e892c6dba30e1347170e037942418546539585d | 1,232 | h | C | chrome/browser/net/explicitly_allowed_network_ports_policy_handler.h | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | chrome/browser/net/explicitly_allowed_network_ports_policy_handler.h | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | chrome/browser/net/explicitly_allowed_network_ports_policy_handler.h | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_NET_EXPLICITLY_ALLOWED_NETWORK_PORTS_POLICY_HANDLER_H_
#define CHROME_BROWSER_NET_EXPLICITLY_ALLOWED_NETWORK_PORTS_POLICY_HANDLER_H_
#include "components/policy/core/browser/configuration_policy_handler.h"
namespace policy {
// Checks and converts the strings in
// policy::key::kExplicitlyAllowedNetworkPorts to integers in
// prefs::kExplicityAllowedNetworkPorts. The reason that the policy uses strings
// is that it permits us to document explicitly what values are supported and
// for how long.
class ExplicitlyAllowedNetworkPortsPolicyHandler final
: public ListPolicyHandler {
public:
ExplicitlyAllowedNetworkPortsPolicyHandler();
protected:
// Filters out strings that do not cleanly convert to integers in the port
// range 1 to 65535.
bool CheckListEntry(const base::Value& value) override;
// Converts the values to integers.
void ApplyList(base::Value filtered_list, PrefValueMap* prefs) override;
};
} // namespace policy
#endif // CHROME_BROWSER_NET_EXPLICITLY_ALLOWED_NETWORK_PORTS_POLICY_HANDLER_H_
| 36.235294 | 80 | 0.809253 |
331608bfaa1bbeecfc3dd18a2fde05596fb4e203 | 2,754 | py | Python | medios/diarios/diario.py | miglesias91/dicenlosmedios | 1f8867cd09689006f35447ad8540359d9429b518 | [
"MIT"
] | 1 | 2020-10-20T20:50:51.000Z | 2020-10-20T20:50:51.000Z | medios/diarios/diario.py | miglesias91/dicenlosmedios | 1f8867cd09689006f35447ad8540359d9429b518 | [
"MIT"
] | 8 | 2021-03-19T01:17:28.000Z | 2022-03-02T14:57:48.000Z | medios/diarios/diario.py | miglesias91/dicenlosmedios | 1f8867cd09689006f35447ad8540359d9429b518 | [
"MIT"
] | null | null | null | import dateutil
import yaml
import feedparser as fp
import newspaper as np
from medios.medio import Medio
from medios.diarios.noticia import Noticia
from bd.entidades import Kiosco
class Diario(Medio):
def __init__(self, etiqueta):
Medio.__init__(self, etiqueta)
self.noticias = []
self.feeds = {}
self.feed_noticias = ""
self.categorias = []
self.configurar()
def configurar(self):
with open('medios/diarios/config.yaml', 'r') as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
for diario in config['diarios']:
if diario['tag'] != self.etiqueta:
continue
if 'feed_noticias' in diario:
self.feed_noticias = diario['feed_noticias']
if 'categorias' in diario:
self.categorias = diario['categorias']
if 'feeds' in diario:
self.categorias = []
for feed in diario['feeds']:
self.feeds[feed['tag']] = feed['url']
self.categorias.append(feed['tag'])
def leer(self):
kiosco = Kiosco()
print("leyendo '" + self.etiqueta + "'...")
for tag, url_feed in self.feeds.items():
for url_noticia, fecha in self.reconocer_urls_y_fechas_noticias(url_feed=url_feed):
if kiosco.bd.noticias.find(filter={'diario':self.etiqueta, 'url':url_noticia}).count() > 0: # si existe ya la noticia (url), no la decargo
continue
noticia = self.nueva_noticia(url=url_noticia, categoria=tag, diario=self.etiqueta)
if noticia == None:
continue
if noticia.fecha == None:
noticia.fecha = fecha
self.noticias.append(noticia)
def limpiar_texto(self, texto):
return texto
def reconocer_urls_y_fechas_noticias(self, url_feed):
urls_y_fechas = []
for entrada in fp.parse(url_feed).entries:
fecha = self.parsear_fecha(entrada)
urls_y_fechas.append((entrada.link, fecha))
return urls_y_fechas
def nueva_noticia(self, url, categoria, diario):
articulo = np.Article(url=url, language='es')
try:
articulo.download()
articulo.parse()
except:
return None
return Noticia(fecha=articulo.publish_date, url=url, diario=diario, categoria=categoria, titulo=articulo.title, texto=self.limpiar_texto(articulo.text))
def parsear_fecha(self, entrada):
return dateutil.parser.parse(entrada.published) | 34.860759 | 160 | 0.579521 |
1c45c12a3a030e96ca65ccd9aaa300d1505c26ff | 237 | css | CSS | frontend/PaymentApp/assets/css/insurance.css | LaudaDev/sep-acquirer-web-app | 7bc4735dbb2e9f2e5c13f3f314e7f083ba4fb763 | [
"MIT"
] | null | null | null | frontend/PaymentApp/assets/css/insurance.css | LaudaDev/sep-acquirer-web-app | 7bc4735dbb2e9f2e5c13f3f314e7f083ba4fb763 | [
"MIT"
] | null | null | null | frontend/PaymentApp/assets/css/insurance.css | LaudaDev/sep-acquirer-web-app | 7bc4735dbb2e9f2e5c13f3f314e7f083ba4fb763 | [
"MIT"
] | null | null | null | .form-control:focus {
border-color: #DED9D9;
box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(255, 0, 0, 0.6);
}
input[type="text"] {
border: 1px solid #007fff
}
input[type="password"] {
border: 1px solid #007fff
}
| 21.545455 | 81 | 0.637131 |
1f57cc210dc80a7f07f3fbf0e80656f6a2eb84ee | 5,305 | css | CSS | css/progressbar.css | Hache07/Proyecto_CICC | 0d648fe3866d8b816a02c88b5f01f691995acc6d | [
"MIT"
] | 2 | 2018-11-06T22:57:57.000Z | 2018-11-06T22:57:58.000Z | css/progressbar.css | Hache07/Proyecto_CICC | 0d648fe3866d8b816a02c88b5f01f691995acc6d | [
"MIT"
] | null | null | null | css/progressbar.css | Hache07/Proyecto_CICC | 0d648fe3866d8b816a02c88b5f01f691995acc6d | [
"MIT"
] | null | null | null | @-moz-keyframes pulse {
/* line 4, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
30% {
opacity: .6; }
/* line 5, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
60% {
opacity: 0; }
/* line 6, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
100% {
opacity: .6; } }
@-ms-keyframes pulse {
/* line 9, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
30% {
opacity: .6; }
/* line 10, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
60% {
opacity: 0; }
/* line 11, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
100% {
opacity: .6; } }
@-o-keyframes pulse {
/* line 14, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
30% {
opacity: .6; }
/* line 15, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
60% {
opacity: 0; }
/* line 16, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
100% {
opacity: .6; } }
@-webkit-keyframes pulse {
/* line 19, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
30% {
opacity: .6; }
/* line 20, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
60% {
opacity: 0; }
/* line 21, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
100% {
opacity: .6; } }
@keyframes pulse {
/* line 24, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
30% {
opacity: .6; }
/* line 25, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
60% {
opacity: 0; }
/* line 26, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
100% {
opacity: .6; } }
/* line 30, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
html,
body {
height: 100%; }
/* Wrapper for page content to push down footer */
/* line 36, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
#wrap {
min-height: 100%;
height: auto;
/* Negative indent footer by its height */
margin: 0 auto -60px;
/* Pad bottom by footer height */
padding: 0 0 60px; }
/* line 46, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
#footer {
height: 60px;
background-color: #f5f5f5; }
/* line 52, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
.container {
width: auto;
max-width: 680px;
padding: 0 15px; }
/* line 57, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
.container .credit {
margin: 20px 0; }
/* line 63, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
#progress {
position: fixed;
z-index: 2147483647;
top: 0;
-webkit-border-radius: 1px;
-moz-border-radius: 1px;
-ms-border-radius: 1px;
-o-border-radius: 1px;
border-radius: 1px;
-webkit-transition: width 500ms ease-out, opacity 400ms linear;
-moz-transition: width 500ms ease-out, opacity 400ms linear;
-o-transition: width 500ms ease-out, opacity 400ms linear;
transition: width 500ms ease-out, opacity 400ms linear; }
/* line 73, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
#progress dt,
#progress dd {
position: absolute;
top: 0;
height: 2px;
-webkit-box-shadow: #b91f1f 1px 0 6px 1px;
-moz-box-shadow: #b91f1f 1px 0 6px 1px;
box-shadow: #b91f1f 1px 0 6px 1px;
-webkit-border-radius: 100%;
-moz-border-radius: 100%;
-ms-border-radius: 100%;
-o-border-radius: 100%;
border-radius: 100%; }
/* line 82, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
#progress dt {
opacity: .6;
width: 180px;
right: -80px;
clip: rect(-6px, 90px, 14px, -6px); }
/* line 89, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
#progress dd {
opacity: .6;
width: 20px;
right: 0;
clip: rect(-6px, 22px, 14px, 10px); }
/* line 100, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
#progress.waiting dt,
#progress.waiting dd {
-moz-animation: pulse 2s ease-out 0s infinite;
-ms-animation: pulse 2s ease-out 0s infinite;
-o-animation: pulse 2s ease-out 0s infinite;
-webkit-animation: pulse 2s ease-out 0s infinite;
animation: pulse 2s ease-out 0s infinite; }
/* line 109, /Users/Mikasa/Projects/progressBarJS/source/stylesheets/jquery.progressbar.css.scss */
#progress.done {
filter: progid:DXImageTransform.Microsoft.Alpha(Opacity=0);
opacity: 0; }
| 35.844595 | 103 | 0.658058 |
55ea031db24b817064bdf6c26483fef316eca0e8 | 524 | swift | Swift | Agenda/Componentes/LigacaoTelefonica.swift | ianpab/agenda | 4fa8c18768fac477fb1c4776700eb9cedddc5463 | [
"MIT"
] | null | null | null | Agenda/Componentes/LigacaoTelefonica.swift | ianpab/agenda | 4fa8c18768fac477fb1c4776700eb9cedddc5463 | [
"MIT"
] | null | null | null | Agenda/Componentes/LigacaoTelefonica.swift | ianpab/agenda | 4fa8c18768fac477fb1c4776700eb9cedddc5463 | [
"MIT"
] | null | null | null | //
// LigacaoTelefonica.swift
// Agenda
//
// Created by Ian Pablo on 02/11/19.
// Copyright © 2019 Alura. All rights reserved.
//
import UIKit
class LigacaoTelefonica: NSObject {
func fazLigacao(_ alunoSelecionado:Aluno){
guard let numeroAluno = alunoSelecionado.telefone else { return }
if let url = URL(string: "tel://\(numeroAluno)"), UIApplication.shared.canOpenURL(url){
UIApplication.shared.open(url, options: [:], completionHandler: nil)
}
}
}
| 24.952381 | 99 | 0.637405 |
9c063a4f361c5416fe91447c82dbf5dcb2ef04a5 | 3,157 | js | JavaScript | src/components/BlobViewer/Repo/useFindRepo.js | mpan-wework/github | 4bffbb1ffadb4684365d1521be992a9dd2c4f45b | [
"MIT"
] | null | null | null | src/components/BlobViewer/Repo/useFindRepo.js | mpan-wework/github | 4bffbb1ffadb4684365d1521be992a9dd2c4f45b | [
"MIT"
] | 6 | 2021-05-11T01:45:56.000Z | 2022-02-26T22:58:31.000Z | src/components/BlobViewer/Repo/useFindRepo.js | mpan-wework/github | 4bffbb1ffadb4684365d1521be992a9dd2c4f45b | [
"MIT"
] | null | null | null | import { useCallback, useEffect, useState } from 'react';
import githubClient from '../../../service/api/github';
import useAsyncMemo from '../../shared/useAsyncMemo';
import useAsyncDebouncedCallback from '../../shared/useAsyncDebounceCallback';
const useFindRepo = (props) => {
const { user } = props;
const [owner, setOwner] = useState(null);
const [repo, setRepo] = useState(null);
const [branch, setBranch] = useState(null);
useEffect(() => {
if (!user) {
setOwner(null);
setRepo(null);
setBranch(null);
}
}, [user]);
const ownerOptions = useAsyncMemo(
async () => {
if (!user) {
return [];
}
const orgs = await githubClient.orgs();
return [user].concat(orgs).map((ownerItem) => ({
label: ownerItem.login,
value: ownerItem.login.toLowerCase(),
data: ownerItem,
}));
},
[user],
[],
);
const loadOwnerOptions = useAsyncDebouncedCallback(
async (inputValue) => {
const data = await githubClient.qOwners(inputValue);
const owners = data.items.map((ownerItem) => ({
label: ownerItem.login,
value: ownerItem.login.toLowerCase(),
data: ownerItem,
}));
return ownerOptions
.filter(
(ownerItem) =>
inputValue.trim() === '' ||
ownerItem.value.indexOf(inputValue.toLowerCase()) > -1,
)
.concat(owners);
},
[ownerOptions],
);
const handleOwnerChange = useCallback((value) => {
setOwner(value);
setRepo(null);
setBranch(null);
}, []);
const loadRepoOptions = useAsyncDebouncedCallback(
async (inputValue) => {
if (!owner) {
return [];
}
const scope =
owner.data.type === 'User'
? { user: owner.data.login }
: { org: owner.data.login };
const data = await githubClient.qRepos(inputValue, scope);
return data.items.map((repoItem) => ({
label: repoItem.name,
value: repoItem.name,
data: repoItem,
}));
},
[owner],
);
const handleRepoChange = useCallback((value) => {
setRepo(value);
setBranch(null);
}, []);
const branchOptions = useAsyncMemo(async () => {
if (!owner || !repo) {
return [];
}
const branches = await githubClient.branches(
owner.data.login,
repo.data.name,
);
return branches.map((branchItem) => ({
label: branchItem.name,
value: branchItem.name.toLowerCase(),
data: branchItem,
}));
}, [owner, repo]);
const loadBranchOptions = useCallback(
async (inputValue) => {
return branchOptions.filter(
(branchItem) =>
inputValue.trim() === '' ||
branchItem.value.indexOf(inputValue.toLowerCase()) > -1,
);
},
[branchOptions],
);
return [
{
owner,
ownerOptions,
repo,
branch,
branchOptions,
},
{
loadOwnerOptions,
handleOwnerChange,
loadRepoOptions,
handleRepoChange,
loadBranchOptions,
handleBranchChange: setBranch,
},
];
};
export default useFindRepo;
| 23.043796 | 78 | 0.568578 |
ba2f628893b06040a5c72814ed49f302107d2bfd | 4,146 | kt | Kotlin | plugin-gradle/src/test/java/com/github/autostyle/gradle/ErrorShouldRethrowTest.kt | weisJ/autostyle | 85f539da6d6228517e300b72dd254ff61474b877 | [
"Apache-2.0"
] | 14 | 2019-11-13T05:32:58.000Z | 2021-12-16T09:10:26.000Z | plugin-gradle/src/test/java/com/github/autostyle/gradle/ErrorShouldRethrowTest.kt | weisJ/autostyle | 85f539da6d6228517e300b72dd254ff61474b877 | [
"Apache-2.0"
] | 29 | 2019-11-12T19:44:36.000Z | 2021-12-08T17:56:46.000Z | plugin-gradle/src/test/java/com/github/autostyle/gradle/ErrorShouldRethrowTest.kt | weisJ/autostyle | 85f539da6d6228517e300b72dd254ff61474b877 | [
"Apache-2.0"
] | 1 | 2020-12-11T21:01:35.000Z | 2020-12-11T21:01:35.000Z | /*
* Copyright 2019 Vladimir Sitnikov <sitnikov.vladimir@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.autostyle.gradle
import org.assertj.core.api.Assertions.assertThat
import org.gradle.testkit.runner.BuildResult
import org.gradle.testkit.runner.TaskOutcome
import org.junit.jupiter.api.Test
/** Tests the desired behavior from https://github.com/diffplug/spotless/issues/46. */
class ErrorShouldRethrowTest : GradleIntegrationTest() {
private fun writeBuild(formatSettings: String = "", enforceCheck: Boolean? = null) {
val build = """
plugins {
id 'com.github.autostyle'
id 'java'
}
autostyle {
${enforceCheck?.let { "enforceCheck $it" } }
format 'misc', {
lineEndings 'UNIX'
target file('README.md')
custom 'no fu', 1, {
if (it.toLowerCase(Locale.ROOT).contains('fubar')) {
throw new RuntimeException('No fubar!');
}
}
custom 'no foo', 1, {
if (it.toLowerCase(Locale.ROOT).contains('foobar')) {
throw new RuntimeException('No foobar!');
}
}
$formatSettings
}
}
""".trimIndent()
setFile("build.gradle").toContent(build)
}
@Test
fun passesIfNoException() {
writeBuild()
setFile("README.md").toContent("This code is fun.")
runWithSuccess("> Task :autostyleMiscCheck")
}
@Test
fun anyExceptionShouldFail() {
writeBuild()
setFile("README.md").toContent("This code is fubar.")
runWithFailure(
":autostyleMiscStep 'no swearing' found problem in 'README.md':",
"No swearing!",
"java.lang.RuntimeException: No swearing!"
)
}
@Test
fun unlessEnforceCheckIsFalse() {
writeBuild(enforceCheck = false)
setFile("README.md").toContent("This code is fubar.")
// autostyleCheck is not executed executed as a part of check since enforceCheck=false
runWithSuccess(outcome = null)
}
private fun runWithSuccess(vararg messages: String, outcome: TaskOutcome? = TaskOutcome.SUCCESS) {
val result = gradleRunner().withArguments("check").build()
assertResultAndMessages(result, outcome, *messages)
}
private fun runWithFailure(vararg messages: String, outcome: TaskOutcome? = TaskOutcome.FAILED) {
val result = gradleRunner().withArguments("check").buildAndFail()
assertResultAndMessages(result, outcome, *messages)
}
private fun assertResultAndMessages(
result: BuildResult,
outcome: TaskOutcome?,
vararg messages: String
) {
assertThat(result.task(":autostyleMiscProcess")?.outcome).isEqualTo(outcome).`as`("autostyleMiscCheck.outcome")
// val expectedToStartWith =
// StringPrinter.buildStringFromLines(*messages).trim { it <= ' ' }
// val numNewlines = CharMatcher.`is`('\n').countIn(expectedToStartWith)
// val actualLines = LineEnding.toUnix(result.output).split('\n')
// val actualStart = actualLines.subList(0, numNewlines + 1).joinToString("\n")
// assertThat(actualStart).isEqualTo(expectedToStartWith)
// assertThat(result.tasks(outcome).size + result.tasks(TaskOutcome.UP_TO_DATE).size)
// .isEqualTo(result.tasks.size)
}
}
| 39.485714 | 119 | 0.613603 |
adc3790417cc77be10fb9149b06c5b94c024e19b | 304 | rs | Rust | client-core/src/types.rs | calvinlauco/chain | ef40ea2c44f8e1aca8a36e50ff10172879df3491 | [
"Apache-2.0"
] | 2 | 2020-06-22T17:52:07.000Z | 2020-10-02T14:05:14.000Z | client-core/src/types.rs | chatch/chain | 9ef4e2914688a80bb91c954f27bd2e4aeaede293 | [
"Apache-2.0"
] | 1 | 2021-05-10T18:58:59.000Z | 2021-05-10T18:58:59.000Z | client-core/src/types.rs | tomtau/chain | 52d04727e32d896a61af08afa86763779508ca3a | [
"Apache-2.0"
] | null | null | null | //! Types used in `client-core`
mod address_type;
mod wallet_type;
pub mod transaction_change;
pub use self::address_type::AddressType;
#[doc(inline)]
pub use self::transaction_change::{
BalanceChange, TransactionChange, TransactionInput, TransactionType,
};
pub use self::wallet_type::WalletKind;
| 23.384615 | 72 | 0.776316 |
5b2fdd374e48850b49696ab2f661529fe9b394cc | 82,257 | c | C | sys/geom/journal/g_journal.c | dcui/FreeBSD-9.3_kernel | 39d9caaa6ba320e2f8e910b1f5f01efc24ca4a92 | [
"BSD-3-Clause"
] | 3 | 2015-12-15T00:56:39.000Z | 2018-01-11T01:01:38.000Z | sys/geom/journal/g_journal.c | dcui/FreeBSD-9.3_kernel | 39d9caaa6ba320e2f8e910b1f5f01efc24ca4a92 | [
"BSD-3-Clause"
] | null | null | null | sys/geom/journal/g_journal.c | dcui/FreeBSD-9.3_kernel | 39d9caaa6ba320e2f8e910b1f5f01efc24ca4a92 | [
"BSD-3-Clause"
] | 2 | 2018-01-11T01:01:12.000Z | 2020-11-19T03:07:29.000Z | /*-
* Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: releng/9.3/sys/geom/journal/g_journal.c 253415 2013-07-17 10:35:57Z kib $");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/bio.h>
#include <sys/sysctl.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/eventhandler.h>
#include <sys/proc.h>
#include <sys/kthread.h>
#include <sys/sched.h>
#include <sys/taskqueue.h>
#include <sys/vnode.h>
#include <sys/sbuf.h>
#ifdef GJ_MEMDEBUG
#include <sys/stack.h>
#include <sys/kdb.h>
#endif
#include <vm/vm.h>
#include <vm/vm_kern.h>
#include <geom/geom.h>
#include <geom/journal/g_journal.h>
FEATURE(geom_journal, "GEOM journaling support");
/*
* On-disk journal format:
*
* JH - Journal header
* RH - Record header
*
* %%%%%% ****** +------+ +------+ ****** +------+ %%%%%%
* % JH % * RH * | Data | | Data | ... * RH * | Data | ... % JH % ...
* %%%%%% ****** +------+ +------+ ****** +------+ %%%%%%
*
*/
CTASSERT(sizeof(struct g_journal_header) <= 512);
CTASSERT(sizeof(struct g_journal_record_header) <= 512);
static MALLOC_DEFINE(M_JOURNAL, "journal_data", "GEOM_JOURNAL Data");
static struct mtx g_journal_cache_mtx;
MTX_SYSINIT(g_journal_cache, &g_journal_cache_mtx, "cache usage", MTX_DEF);
const struct g_journal_desc *g_journal_filesystems[] = {
&g_journal_ufs,
NULL
};
SYSCTL_DECL(_kern_geom);
int g_journal_debug = 0;
TUNABLE_INT("kern.geom.journal.debug", &g_journal_debug);
static u_int g_journal_switch_time = 10;
static u_int g_journal_force_switch = 70;
static u_int g_journal_parallel_flushes = 16;
static u_int g_journal_parallel_copies = 16;
static u_int g_journal_accept_immediately = 64;
static u_int g_journal_record_entries = GJ_RECORD_HEADER_NENTRIES;
static u_int g_journal_do_optimize = 1;
static SYSCTL_NODE(_kern_geom, OID_AUTO, journal, CTLFLAG_RW, 0,
"GEOM_JOURNAL stuff");
SYSCTL_INT(_kern_geom_journal, OID_AUTO, debug, CTLFLAG_RW, &g_journal_debug, 0,
"Debug level");
SYSCTL_UINT(_kern_geom_journal, OID_AUTO, switch_time, CTLFLAG_RW,
&g_journal_switch_time, 0, "Switch journals every N seconds");
SYSCTL_UINT(_kern_geom_journal, OID_AUTO, force_switch, CTLFLAG_RW,
&g_journal_force_switch, 0, "Force switch when journal is N% full");
SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_flushes, CTLFLAG_RW,
&g_journal_parallel_flushes, 0,
"Number of flush I/O requests to send in parallel");
SYSCTL_UINT(_kern_geom_journal, OID_AUTO, accept_immediately, CTLFLAG_RW,
&g_journal_accept_immediately, 0,
"Number of I/O requests accepted immediately");
SYSCTL_UINT(_kern_geom_journal, OID_AUTO, parallel_copies, CTLFLAG_RW,
&g_journal_parallel_copies, 0,
"Number of copy I/O requests to send in parallel");
static int
g_journal_record_entries_sysctl(SYSCTL_HANDLER_ARGS)
{
u_int entries;
int error;
entries = g_journal_record_entries;
error = sysctl_handle_int(oidp, &entries, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
if (entries < 1 || entries > GJ_RECORD_HEADER_NENTRIES)
return (EINVAL);
g_journal_record_entries = entries;
return (0);
}
SYSCTL_PROC(_kern_geom_journal, OID_AUTO, record_entries,
CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_record_entries_sysctl, "I",
"Maximum number of entires in one journal record");
SYSCTL_UINT(_kern_geom_journal, OID_AUTO, optimize, CTLFLAG_RW,
&g_journal_do_optimize, 0, "Try to combine bios on flush and copy");
static u_int g_journal_cache_used = 0;
static u_int g_journal_cache_limit = 64 * 1024 * 1024;
TUNABLE_INT("kern.geom.journal.cache.limit", &g_journal_cache_limit);
static u_int g_journal_cache_divisor = 2;
TUNABLE_INT("kern.geom.journal.cache.divisor", &g_journal_cache_divisor);
static u_int g_journal_cache_switch = 90;
static u_int g_journal_cache_misses = 0;
static u_int g_journal_cache_alloc_failures = 0;
static u_int g_journal_cache_low = 0;
static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, cache, CTLFLAG_RW, 0,
"GEOM_JOURNAL cache");
SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, used, CTLFLAG_RD,
&g_journal_cache_used, 0, "Number of allocated bytes");
static int
g_journal_cache_limit_sysctl(SYSCTL_HANDLER_ARGS)
{
u_int limit;
int error;
limit = g_journal_cache_limit;
error = sysctl_handle_int(oidp, &limit, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
g_journal_cache_limit = limit;
g_journal_cache_low = (limit / 100) * g_journal_cache_switch;
return (0);
}
SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, limit,
CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_cache_limit_sysctl, "I",
"Maximum number of allocated bytes");
SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, divisor, CTLFLAG_RDTUN,
&g_journal_cache_divisor, 0,
"(kmem_size / kern.geom.journal.cache.divisor) == cache size");
static int
g_journal_cache_switch_sysctl(SYSCTL_HANDLER_ARGS)
{
u_int cswitch;
int error;
cswitch = g_journal_cache_switch;
error = sysctl_handle_int(oidp, &cswitch, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
if (cswitch < 0 || cswitch > 100)
return (EINVAL);
g_journal_cache_switch = cswitch;
g_journal_cache_low = (g_journal_cache_limit / 100) * cswitch;
return (0);
}
SYSCTL_PROC(_kern_geom_journal_cache, OID_AUTO, switch,
CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, g_journal_cache_switch_sysctl, "I",
"Force switch when we hit this percent of cache use");
SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, misses, CTLFLAG_RW,
&g_journal_cache_misses, 0, "Number of cache misses");
SYSCTL_UINT(_kern_geom_journal_cache, OID_AUTO, alloc_failures, CTLFLAG_RW,
&g_journal_cache_alloc_failures, 0, "Memory allocation failures");
static u_long g_journal_stats_bytes_skipped = 0;
static u_long g_journal_stats_combined_ios = 0;
static u_long g_journal_stats_switches = 0;
static u_long g_journal_stats_wait_for_copy = 0;
static u_long g_journal_stats_journal_full = 0;
static u_long g_journal_stats_low_mem = 0;
static SYSCTL_NODE(_kern_geom_journal, OID_AUTO, stats, CTLFLAG_RW, 0,
"GEOM_JOURNAL statistics");
SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, skipped_bytes, CTLFLAG_RW,
&g_journal_stats_bytes_skipped, 0, "Number of skipped bytes");
SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, combined_ios, CTLFLAG_RW,
&g_journal_stats_combined_ios, 0, "Number of combined I/O requests");
SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, switches, CTLFLAG_RW,
&g_journal_stats_switches, 0, "Number of journal switches");
SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, wait_for_copy, CTLFLAG_RW,
&g_journal_stats_wait_for_copy, 0, "Wait for journal copy on switch");
SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, journal_full, CTLFLAG_RW,
&g_journal_stats_journal_full, 0,
"Number of times journal was almost full.");
SYSCTL_ULONG(_kern_geom_journal_stats, OID_AUTO, low_mem, CTLFLAG_RW,
&g_journal_stats_low_mem, 0, "Number of times low_mem hook was called.");
static g_taste_t g_journal_taste;
static g_ctl_req_t g_journal_config;
static g_dumpconf_t g_journal_dumpconf;
static g_init_t g_journal_init;
static g_fini_t g_journal_fini;
struct g_class g_journal_class = {
.name = G_JOURNAL_CLASS_NAME,
.version = G_VERSION,
.taste = g_journal_taste,
.ctlreq = g_journal_config,
.dumpconf = g_journal_dumpconf,
.init = g_journal_init,
.fini = g_journal_fini
};
static int g_journal_destroy(struct g_journal_softc *sc);
static void g_journal_metadata_update(struct g_journal_softc *sc);
static void g_journal_switch_wait(struct g_journal_softc *sc);
#define GJ_SWITCHER_WORKING 0
#define GJ_SWITCHER_DIE 1
#define GJ_SWITCHER_DIED 2
static int g_journal_switcher_state = GJ_SWITCHER_WORKING;
static int g_journal_switcher_wokenup = 0;
static int g_journal_sync_requested = 0;
#ifdef GJ_MEMDEBUG
struct meminfo {
size_t mi_size;
struct stack mi_stack;
};
#endif
/*
* We use our own malloc/realloc/free funtions, so we can collect statistics
* and force journal switch when we're running out of cache.
*/
static void *
gj_malloc(size_t size, int flags)
{
void *p;
#ifdef GJ_MEMDEBUG
struct meminfo *mi;
#endif
mtx_lock(&g_journal_cache_mtx);
if (g_journal_cache_limit > 0 && !g_journal_switcher_wokenup &&
g_journal_cache_used + size > g_journal_cache_low) {
GJ_DEBUG(1, "No cache, waking up the switcher.");
g_journal_switcher_wokenup = 1;
wakeup(&g_journal_switcher_state);
}
if ((flags & M_NOWAIT) && g_journal_cache_limit > 0 &&
g_journal_cache_used + size > g_journal_cache_limit) {
mtx_unlock(&g_journal_cache_mtx);
g_journal_cache_alloc_failures++;
return (NULL);
}
g_journal_cache_used += size;
mtx_unlock(&g_journal_cache_mtx);
flags &= ~M_NOWAIT;
#ifndef GJ_MEMDEBUG
p = malloc(size, M_JOURNAL, flags | M_WAITOK);
#else
mi = malloc(sizeof(*mi) + size, M_JOURNAL, flags | M_WAITOK);
p = (u_char *)mi + sizeof(*mi);
mi->mi_size = size;
stack_save(&mi->mi_stack);
#endif
return (p);
}
static void
gj_free(void *p, size_t size)
{
#ifdef GJ_MEMDEBUG
struct meminfo *mi;
#endif
KASSERT(p != NULL, ("p=NULL"));
KASSERT(size > 0, ("size=0"));
mtx_lock(&g_journal_cache_mtx);
KASSERT(g_journal_cache_used >= size, ("Freeing too much?"));
g_journal_cache_used -= size;
mtx_unlock(&g_journal_cache_mtx);
#ifdef GJ_MEMDEBUG
mi = p = (void *)((u_char *)p - sizeof(*mi));
if (mi->mi_size != size) {
printf("GJOURNAL: Size mismatch! %zu != %zu\n", size,
mi->mi_size);
printf("GJOURNAL: Alloc backtrace:\n");
stack_print(&mi->mi_stack);
printf("GJOURNAL: Free backtrace:\n");
kdb_backtrace();
}
#endif
free(p, M_JOURNAL);
}
static void *
gj_realloc(void *p, size_t size, size_t oldsize)
{
void *np;
#ifndef GJ_MEMDEBUG
mtx_lock(&g_journal_cache_mtx);
g_journal_cache_used -= oldsize;
g_journal_cache_used += size;
mtx_unlock(&g_journal_cache_mtx);
np = realloc(p, size, M_JOURNAL, M_WAITOK);
#else
np = gj_malloc(size, M_WAITOK);
bcopy(p, np, MIN(oldsize, size));
gj_free(p, oldsize);
#endif
return (np);
}
static void
g_journal_check_overflow(struct g_journal_softc *sc)
{
off_t length, used;
if ((sc->sc_active.jj_offset < sc->sc_inactive.jj_offset &&
sc->sc_journal_offset >= sc->sc_inactive.jj_offset) ||
(sc->sc_active.jj_offset > sc->sc_inactive.jj_offset &&
sc->sc_journal_offset >= sc->sc_inactive.jj_offset &&
sc->sc_journal_offset < sc->sc_active.jj_offset)) {
panic("Journal overflow "
"(id = %u joffset=%jd active=%jd inactive=%jd)",
(unsigned)sc->sc_id,
(intmax_t)sc->sc_journal_offset,
(intmax_t)sc->sc_active.jj_offset,
(intmax_t)sc->sc_inactive.jj_offset);
}
if (sc->sc_active.jj_offset < sc->sc_inactive.jj_offset) {
length = sc->sc_inactive.jj_offset - sc->sc_active.jj_offset;
used = sc->sc_journal_offset - sc->sc_active.jj_offset;
} else {
length = sc->sc_jend - sc->sc_active.jj_offset;
length += sc->sc_inactive.jj_offset - sc->sc_jstart;
if (sc->sc_journal_offset >= sc->sc_active.jj_offset)
used = sc->sc_journal_offset - sc->sc_active.jj_offset;
else {
used = sc->sc_jend - sc->sc_active.jj_offset;
used += sc->sc_journal_offset - sc->sc_jstart;
}
}
/* Already woken up? */
if (g_journal_switcher_wokenup)
return;
/*
* If the active journal takes more than g_journal_force_switch precent
* of free journal space, we force journal switch.
*/
KASSERT(length > 0,
("length=%jd used=%jd active=%jd inactive=%jd joffset=%jd",
(intmax_t)length, (intmax_t)used,
(intmax_t)sc->sc_active.jj_offset,
(intmax_t)sc->sc_inactive.jj_offset,
(intmax_t)sc->sc_journal_offset));
if ((used * 100) / length > g_journal_force_switch) {
g_journal_stats_journal_full++;
GJ_DEBUG(1, "Journal %s %jd%% full, forcing journal switch.",
sc->sc_name, (used * 100) / length);
mtx_lock(&g_journal_cache_mtx);
g_journal_switcher_wokenup = 1;
wakeup(&g_journal_switcher_state);
mtx_unlock(&g_journal_cache_mtx);
}
}
static void
g_journal_orphan(struct g_consumer *cp)
{
struct g_journal_softc *sc;
char name[256];
int error;
g_topology_assert();
sc = cp->geom->softc;
strlcpy(name, cp->provider->name, sizeof(name));
GJ_DEBUG(0, "Lost provider %s.", name);
if (sc == NULL)
return;
error = g_journal_destroy(sc);
if (error == 0)
GJ_DEBUG(0, "Journal %s destroyed.", name);
else {
GJ_DEBUG(0, "Cannot destroy journal %s (error=%d). "
"Destroy it manually after last close.", sc->sc_name,
error);
}
}
static int
g_journal_access(struct g_provider *pp, int acr, int acw, int ace)
{
struct g_journal_softc *sc;
int dcr, dcw, dce;
g_topology_assert();
GJ_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name,
acr, acw, ace);
dcr = pp->acr + acr;
dcw = pp->acw + acw;
dce = pp->ace + ace;
sc = pp->geom->softc;
if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY)) {
if (acr <= 0 && acw <= 0 && ace <= 0)
return (0);
else
return (ENXIO);
}
if (pp->acw == 0 && dcw > 0) {
GJ_DEBUG(1, "Marking %s as dirty.", sc->sc_name);
sc->sc_flags &= ~GJF_DEVICE_CLEAN;
g_topology_unlock();
g_journal_metadata_update(sc);
g_topology_lock();
} /* else if (pp->acw == 0 && dcw > 0 && JEMPTY(sc)) {
GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
sc->sc_flags |= GJF_DEVICE_CLEAN;
g_topology_unlock();
g_journal_metadata_update(sc);
g_topology_lock();
} */
return (0);
}
static void
g_journal_header_encode(struct g_journal_header *hdr, u_char *data)
{
bcopy(GJ_HEADER_MAGIC, data, sizeof(GJ_HEADER_MAGIC));
data += sizeof(GJ_HEADER_MAGIC);
le32enc(data, hdr->jh_journal_id);
data += 4;
le32enc(data, hdr->jh_journal_next_id);
}
static int
g_journal_header_decode(const u_char *data, struct g_journal_header *hdr)
{
bcopy(data, hdr->jh_magic, sizeof(hdr->jh_magic));
data += sizeof(hdr->jh_magic);
if (bcmp(hdr->jh_magic, GJ_HEADER_MAGIC, sizeof(GJ_HEADER_MAGIC)) != 0)
return (EINVAL);
hdr->jh_journal_id = le32dec(data);
data += 4;
hdr->jh_journal_next_id = le32dec(data);
return (0);
}
static void
g_journal_flush_cache(struct g_journal_softc *sc)
{
struct bintime bt;
int error;
if (sc->sc_bio_flush == 0)
return;
GJ_TIMER_START(1, &bt);
if (sc->sc_bio_flush & GJ_FLUSH_JOURNAL) {
error = g_io_flush(sc->sc_jconsumer);
GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
sc->sc_jconsumer->provider->name, error);
}
if (sc->sc_bio_flush & GJ_FLUSH_DATA) {
/*
* TODO: This could be called in parallel with the
* previous call.
*/
error = g_io_flush(sc->sc_dconsumer);
GJ_DEBUG(error == 0 ? 2 : 0, "Flush cache of %s: error=%d.",
sc->sc_dconsumer->provider->name, error);
}
GJ_TIMER_STOP(1, &bt, "Cache flush time");
}
static int
g_journal_write_header(struct g_journal_softc *sc)
{
struct g_journal_header hdr;
struct g_consumer *cp;
u_char *buf;
int error;
cp = sc->sc_jconsumer;
buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
strlcpy(hdr.jh_magic, GJ_HEADER_MAGIC, sizeof(hdr.jh_magic));
hdr.jh_journal_id = sc->sc_journal_id;
hdr.jh_journal_next_id = sc->sc_journal_next_id;
g_journal_header_encode(&hdr, buf);
error = g_write_data(cp, sc->sc_journal_offset, buf,
cp->provider->sectorsize);
/* if (error == 0) */
sc->sc_journal_offset += cp->provider->sectorsize;
gj_free(buf, cp->provider->sectorsize);
return (error);
}
/*
* Every journal record has a header and data following it.
* Functions below are used to decode the header before storing it to
* little endian and to encode it after reading to system endianess.
*/
static void
g_journal_record_header_encode(struct g_journal_record_header *hdr,
u_char *data)
{
struct g_journal_entry *ent;
u_int i;
bcopy(GJ_RECORD_HEADER_MAGIC, data, sizeof(GJ_RECORD_HEADER_MAGIC));
data += sizeof(GJ_RECORD_HEADER_MAGIC);
le32enc(data, hdr->jrh_journal_id);
data += 8;
le16enc(data, hdr->jrh_nentries);
data += 2;
bcopy(hdr->jrh_sum, data, sizeof(hdr->jrh_sum));
data += 8;
for (i = 0; i < hdr->jrh_nentries; i++) {
ent = &hdr->jrh_entries[i];
le64enc(data, ent->je_joffset);
data += 8;
le64enc(data, ent->je_offset);
data += 8;
le64enc(data, ent->je_length);
data += 8;
}
}
static int
g_journal_record_header_decode(const u_char *data,
struct g_journal_record_header *hdr)
{
struct g_journal_entry *ent;
u_int i;
bcopy(data, hdr->jrh_magic, sizeof(hdr->jrh_magic));
data += sizeof(hdr->jrh_magic);
if (strcmp(hdr->jrh_magic, GJ_RECORD_HEADER_MAGIC) != 0)
return (EINVAL);
hdr->jrh_journal_id = le32dec(data);
data += 8;
hdr->jrh_nentries = le16dec(data);
data += 2;
if (hdr->jrh_nentries > GJ_RECORD_HEADER_NENTRIES)
return (EINVAL);
bcopy(data, hdr->jrh_sum, sizeof(hdr->jrh_sum));
data += 8;
for (i = 0; i < hdr->jrh_nentries; i++) {
ent = &hdr->jrh_entries[i];
ent->je_joffset = le64dec(data);
data += 8;
ent->je_offset = le64dec(data);
data += 8;
ent->je_length = le64dec(data);
data += 8;
}
return (0);
}
/*
* Function reads metadata from a provider (via the given consumer), decodes
* it to system endianess and verifies its correctness.
*/
static int
g_journal_metadata_read(struct g_consumer *cp, struct g_journal_metadata *md)
{
struct g_provider *pp;
u_char *buf;
int error;
g_topology_assert();
error = g_access(cp, 1, 0, 0);
if (error != 0)
return (error);
pp = cp->provider;
g_topology_unlock();
/* Metadata is stored in last sector. */
buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
&error);
g_topology_lock();
g_access(cp, -1, 0, 0);
if (buf == NULL) {
GJ_DEBUG(1, "Cannot read metadata from %s (error=%d).",
cp->provider->name, error);
return (error);
}
/* Decode metadata. */
error = journal_metadata_decode(buf, md);
g_free(buf);
/* Is this is gjournal provider at all? */
if (strcmp(md->md_magic, G_JOURNAL_MAGIC) != 0)
return (EINVAL);
/*
* Are we able to handle this version of metadata?
* We only maintain backward compatibility.
*/
if (md->md_version > G_JOURNAL_VERSION) {
GJ_DEBUG(0,
"Kernel module is too old to handle metadata from %s.",
cp->provider->name);
return (EINVAL);
}
/* Is checksum correct? */
if (error != 0) {
GJ_DEBUG(0, "MD5 metadata hash mismatch for provider %s.",
cp->provider->name);
return (error);
}
return (0);
}
/*
* Two functions below are responsible for updating metadata.
* Only metadata on the data provider is updated (we need to update
* information about active journal in there).
*/
static void
g_journal_metadata_done(struct bio *bp)
{
/*
* There is not much we can do on error except informing about it.
*/
if (bp->bio_error != 0) {
GJ_LOGREQ(0, bp, "Cannot update metadata (error=%d).",
bp->bio_error);
} else {
GJ_LOGREQ(2, bp, "Metadata updated.");
}
gj_free(bp->bio_data, bp->bio_length);
g_destroy_bio(bp);
}
static void
g_journal_metadata_update(struct g_journal_softc *sc)
{
struct g_journal_metadata md;
struct g_consumer *cp;
struct bio *bp;
u_char *sector;
cp = sc->sc_dconsumer;
sector = gj_malloc(cp->provider->sectorsize, M_WAITOK);
strlcpy(md.md_magic, G_JOURNAL_MAGIC, sizeof(md.md_magic));
md.md_version = G_JOURNAL_VERSION;
md.md_id = sc->sc_id;
md.md_type = sc->sc_orig_type;
md.md_jstart = sc->sc_jstart;
md.md_jend = sc->sc_jend;
md.md_joffset = sc->sc_inactive.jj_offset;
md.md_jid = sc->sc_journal_previous_id;
md.md_flags = 0;
if (sc->sc_flags & GJF_DEVICE_CLEAN)
md.md_flags |= GJ_FLAG_CLEAN;
if (sc->sc_flags & GJF_DEVICE_HARDCODED)
strlcpy(md.md_provider, sc->sc_name, sizeof(md.md_provider));
else
bzero(md.md_provider, sizeof(md.md_provider));
md.md_provsize = cp->provider->mediasize;
journal_metadata_encode(&md, sector);
/*
* Flush the cache, so we know all data are on disk.
* We write here informations like "journal is consistent", so we need
* to be sure it is. Without BIO_FLUSH here, we can end up in situation
* where metadata is stored on disk, but not all data.
*/
g_journal_flush_cache(sc);
bp = g_alloc_bio();
bp->bio_offset = cp->provider->mediasize - cp->provider->sectorsize;
bp->bio_length = cp->provider->sectorsize;
bp->bio_data = sector;
bp->bio_cmd = BIO_WRITE;
if (!(sc->sc_flags & GJF_DEVICE_DESTROY)) {
bp->bio_done = g_journal_metadata_done;
g_io_request(bp, cp);
} else {
bp->bio_done = NULL;
g_io_request(bp, cp);
biowait(bp, "gjmdu");
g_journal_metadata_done(bp);
}
/*
* Be sure metadata reached the disk.
*/
g_journal_flush_cache(sc);
}
/*
* This is where the I/O request comes from the GEOM.
*/
static void
g_journal_start(struct bio *bp)
{
struct g_journal_softc *sc;
sc = bp->bio_to->geom->softc;
GJ_LOGREQ(3, bp, "Request received.");
switch (bp->bio_cmd) {
case BIO_READ:
case BIO_WRITE:
mtx_lock(&sc->sc_mtx);
bioq_insert_tail(&sc->sc_regular_queue, bp);
wakeup(sc);
mtx_unlock(&sc->sc_mtx);
return;
case BIO_GETATTR:
if (strcmp(bp->bio_attribute, "GJOURNAL::provider") == 0) {
strlcpy(bp->bio_data, bp->bio_to->name, bp->bio_length);
bp->bio_completed = strlen(bp->bio_to->name) + 1;
g_io_deliver(bp, 0);
return;
}
/* FALLTHROUGH */
case BIO_DELETE:
default:
g_io_deliver(bp, EOPNOTSUPP);
return;
}
}
static void
g_journal_std_done(struct bio *bp)
{
struct g_journal_softc *sc;
sc = bp->bio_from->geom->softc;
mtx_lock(&sc->sc_mtx);
bioq_insert_tail(&sc->sc_back_queue, bp);
wakeup(sc);
mtx_unlock(&sc->sc_mtx);
}
static struct bio *
g_journal_new_bio(off_t start, off_t end, off_t joffset, u_char *data,
int flags)
{
struct bio *bp;
bp = g_alloc_bio();
bp->bio_offset = start;
bp->bio_joffset = joffset;
bp->bio_length = end - start;
bp->bio_cmd = BIO_WRITE;
bp->bio_done = g_journal_std_done;
if (data == NULL)
bp->bio_data = NULL;
else {
bp->bio_data = gj_malloc(bp->bio_length, flags);
if (bp->bio_data != NULL)
bcopy(data, bp->bio_data, bp->bio_length);
}
return (bp);
}
#define g_journal_insert_bio(head, bp, flags) \
g_journal_insert((head), (bp)->bio_offset, \
(bp)->bio_offset + (bp)->bio_length, (bp)->bio_joffset, \
(bp)->bio_data, flags)
/*
* The function below does a lot more than just inserting bio to the queue.
* It keeps the queue sorted by offset and ensures that there are no doubled
* data (it combines bios where ranges overlap).
*
* The function returns the number of bios inserted (as bio can be splitted).
*/
static int
g_journal_insert(struct bio **head, off_t nstart, off_t nend, off_t joffset,
u_char *data, int flags)
{
struct bio *nbp, *cbp, *pbp;
off_t cstart, cend;
u_char *tmpdata;
int n;
GJ_DEBUG(3, "INSERT(%p): (%jd, %jd, %jd)", *head, nstart, nend,
joffset);
n = 0;
pbp = NULL;
GJQ_FOREACH(*head, cbp) {
cstart = cbp->bio_offset;
cend = cbp->bio_offset + cbp->bio_length;
if (nstart >= cend) {
/*
* +-------------+
* | |
* | current | +-------------+
* | bio | | |
* | | | new |
* +-------------+ | bio |
* | |
* +-------------+
*/
GJ_DEBUG(3, "INSERT(%p): 1", *head);
} else if (nend <= cstart) {
/*
* +-------------+
* | |
* +-------------+ | current |
* | | | bio |
* | new | | |
* | bio | +-------------+
* | |
* +-------------+
*/
nbp = g_journal_new_bio(nstart, nend, joffset, data,
flags);
if (pbp == NULL)
*head = nbp;
else
pbp->bio_next = nbp;
nbp->bio_next = cbp;
n++;
GJ_DEBUG(3, "INSERT(%p): 2 (nbp=%p pbp=%p)", *head, nbp,
pbp);
goto end;
} else if (nstart <= cstart && nend >= cend) {
/*
* +-------------+ +-------------+
* | current bio | | current bio |
* +---+-------------+---+ +-------------+---+
* | | | | | | |
* | | | | | | |
* | +-------------+ | +-------------+ |
* | new bio | | new bio |
* +---------------------+ +-----------------+
*
* +-------------+ +-------------+
* | current bio | | current bio |
* +---+-------------+ +-------------+
* | | | | |
* | | | | |
* | +-------------+ +-------------+
* | new bio | | new bio |
* +-----------------+ +-------------+
*/
g_journal_stats_bytes_skipped += cbp->bio_length;
cbp->bio_offset = nstart;
cbp->bio_joffset = joffset;
cbp->bio_length = cend - nstart;
if (cbp->bio_data != NULL) {
gj_free(cbp->bio_data, cend - cstart);
cbp->bio_data = NULL;
}
if (data != NULL) {
cbp->bio_data = gj_malloc(cbp->bio_length,
flags);
if (cbp->bio_data != NULL) {
bcopy(data, cbp->bio_data,
cbp->bio_length);
}
data += cend - nstart;
}
joffset += cend - nstart;
nstart = cend;
GJ_DEBUG(3, "INSERT(%p): 3 (cbp=%p)", *head, cbp);
} else if (nstart > cstart && nend >= cend) {
/*
* +-----------------+ +-------------+
* | current bio | | current bio |
* | +-------------+ | +---------+---+
* | | | | | | |
* | | | | | | |
* +---+-------------+ +---+---------+ |
* | new bio | | new bio |
* +-------------+ +-------------+
*/
g_journal_stats_bytes_skipped += cend - nstart;
nbp = g_journal_new_bio(nstart, cend, joffset, data,
flags);
nbp->bio_next = cbp->bio_next;
cbp->bio_next = nbp;
cbp->bio_length = nstart - cstart;
if (cbp->bio_data != NULL) {
cbp->bio_data = gj_realloc(cbp->bio_data,
cbp->bio_length, cend - cstart);
}
if (data != NULL)
data += cend - nstart;
joffset += cend - nstart;
nstart = cend;
n++;
GJ_DEBUG(3, "INSERT(%p): 4 (cbp=%p)", *head, cbp);
} else if (nstart > cstart && nend < cend) {
/*
* +---------------------+
* | current bio |
* | +-------------+ |
* | | | |
* | | | |
* +---+-------------+---+
* | new bio |
* +-------------+
*/
g_journal_stats_bytes_skipped += nend - nstart;
nbp = g_journal_new_bio(nstart, nend, joffset, data,
flags);
nbp->bio_next = cbp->bio_next;
cbp->bio_next = nbp;
if (cbp->bio_data == NULL)
tmpdata = NULL;
else
tmpdata = cbp->bio_data + nend - cstart;
nbp = g_journal_new_bio(nend, cend,
cbp->bio_joffset + nend - cstart, tmpdata, flags);
nbp->bio_next = ((struct bio *)cbp->bio_next)->bio_next;
((struct bio *)cbp->bio_next)->bio_next = nbp;
cbp->bio_length = nstart - cstart;
if (cbp->bio_data != NULL) {
cbp->bio_data = gj_realloc(cbp->bio_data,
cbp->bio_length, cend - cstart);
}
n += 2;
GJ_DEBUG(3, "INSERT(%p): 5 (cbp=%p)", *head, cbp);
goto end;
} else if (nstart <= cstart && nend < cend) {
/*
* +-----------------+ +-------------+
* | current bio | | current bio |
* +-------------+ | +---+---------+ |
* | | | | | | |
* | | | | | | |
* +-------------+---+ | +---------+---+
* | new bio | | new bio |
* +-------------+ +-------------+
*/
g_journal_stats_bytes_skipped += nend - nstart;
nbp = g_journal_new_bio(nstart, nend, joffset, data,
flags);
if (pbp == NULL)
*head = nbp;
else
pbp->bio_next = nbp;
nbp->bio_next = cbp;
cbp->bio_offset = nend;
cbp->bio_length = cend - nend;
cbp->bio_joffset += nend - cstart;
tmpdata = cbp->bio_data;
if (tmpdata != NULL) {
cbp->bio_data = gj_malloc(cbp->bio_length,
flags);
if (cbp->bio_data != NULL) {
bcopy(tmpdata + nend - cstart,
cbp->bio_data, cbp->bio_length);
}
gj_free(tmpdata, cend - cstart);
}
n++;
GJ_DEBUG(3, "INSERT(%p): 6 (cbp=%p)", *head, cbp);
goto end;
}
if (nstart == nend)
goto end;
pbp = cbp;
}
nbp = g_journal_new_bio(nstart, nend, joffset, data, flags);
if (pbp == NULL)
*head = nbp;
else
pbp->bio_next = nbp;
nbp->bio_next = NULL;
n++;
GJ_DEBUG(3, "INSERT(%p): 8 (nbp=%p pbp=%p)", *head, nbp, pbp);
end:
if (g_journal_debug >= 3) {
GJQ_FOREACH(*head, cbp) {
GJ_DEBUG(3, "ELEMENT: %p (%jd, %jd, %jd, %p)", cbp,
(intmax_t)cbp->bio_offset,
(intmax_t)cbp->bio_length,
(intmax_t)cbp->bio_joffset, cbp->bio_data);
}
GJ_DEBUG(3, "INSERT(%p): DONE %d", *head, n);
}
return (n);
}
/*
* The function combines neighbour bios trying to squeeze as much data as
* possible into one bio.
*
* The function returns the number of bios combined (negative value).
*/
static int
g_journal_optimize(struct bio *head)
{
struct bio *cbp, *pbp;
int n;
n = 0;
pbp = NULL;
GJQ_FOREACH(head, cbp) {
/* Skip bios which has to be read first. */
if (cbp->bio_data == NULL) {
pbp = NULL;
continue;
}
/* There is no previous bio yet. */
if (pbp == NULL) {
pbp = cbp;
continue;
}
/* Is this a neighbour bio? */
if (pbp->bio_offset + pbp->bio_length != cbp->bio_offset) {
/* Be sure that bios queue is sorted. */
KASSERT(pbp->bio_offset + pbp->bio_length < cbp->bio_offset,
("poffset=%jd plength=%jd coffset=%jd",
(intmax_t)pbp->bio_offset,
(intmax_t)pbp->bio_length,
(intmax_t)cbp->bio_offset));
pbp = cbp;
continue;
}
/* Be sure we don't end up with too big bio. */
if (pbp->bio_length + cbp->bio_length > MAXPHYS) {
pbp = cbp;
continue;
}
/* Ok, we can join bios. */
GJ_LOGREQ(4, pbp, "Join: ");
GJ_LOGREQ(4, cbp, "and: ");
pbp->bio_data = gj_realloc(pbp->bio_data,
pbp->bio_length + cbp->bio_length, pbp->bio_length);
bcopy(cbp->bio_data, pbp->bio_data + pbp->bio_length,
cbp->bio_length);
gj_free(cbp->bio_data, cbp->bio_length);
pbp->bio_length += cbp->bio_length;
pbp->bio_next = cbp->bio_next;
g_destroy_bio(cbp);
cbp = pbp;
g_journal_stats_combined_ios++;
n--;
GJ_LOGREQ(4, pbp, "Got: ");
}
return (n);
}
/*
* TODO: Update comment.
* These are functions responsible for copying one portion of data from journal
* to the destination provider.
* The order goes like this:
* 1. Read the header, which contains informations about data blocks
* following it.
* 2. Read the data blocks from the journal.
* 3. Write the data blocks on the data provider.
*
* g_journal_copy_start()
* g_journal_copy_done() - got finished write request, logs potential errors.
*/
/*
* When there is no data in cache, this function is used to read it.
*/
static void
g_journal_read_first(struct g_journal_softc *sc, struct bio *bp)
{
struct bio *cbp;
/*
* We were short in memory, so data was freed.
* In that case we need to read it back from journal.
*/
cbp = g_alloc_bio();
cbp->bio_cflags = bp->bio_cflags;
cbp->bio_parent = bp;
cbp->bio_offset = bp->bio_joffset;
cbp->bio_length = bp->bio_length;
cbp->bio_data = gj_malloc(bp->bio_length, M_WAITOK);
cbp->bio_cmd = BIO_READ;
cbp->bio_done = g_journal_std_done;
GJ_LOGREQ(4, cbp, "READ FIRST");
g_io_request(cbp, sc->sc_jconsumer);
g_journal_cache_misses++;
}
static void
g_journal_copy_send(struct g_journal_softc *sc)
{
struct bio *bioq, *bp, *lbp;
bioq = lbp = NULL;
mtx_lock(&sc->sc_mtx);
for (; sc->sc_copy_in_progress < g_journal_parallel_copies;) {
bp = GJQ_FIRST(sc->sc_inactive.jj_queue);
if (bp == NULL)
break;
GJQ_REMOVE(sc->sc_inactive.jj_queue, bp);
sc->sc_copy_in_progress++;
GJQ_INSERT_AFTER(bioq, bp, lbp);
lbp = bp;
}
mtx_unlock(&sc->sc_mtx);
if (g_journal_do_optimize)
sc->sc_copy_in_progress += g_journal_optimize(bioq);
while ((bp = GJQ_FIRST(bioq)) != NULL) {
GJQ_REMOVE(bioq, bp);
GJQ_INSERT_HEAD(sc->sc_copy_queue, bp);
bp->bio_cflags = GJ_BIO_COPY;
if (bp->bio_data == NULL)
g_journal_read_first(sc, bp);
else {
bp->bio_joffset = 0;
GJ_LOGREQ(4, bp, "SEND");
g_io_request(bp, sc->sc_dconsumer);
}
}
}
static void
g_journal_copy_start(struct g_journal_softc *sc)
{
/*
* Remember in metadata that we're starting to copy journaled data
* to the data provider.
* In case of power failure, we will copy these data once again on boot.
*/
if (!sc->sc_journal_copying) {
sc->sc_journal_copying = 1;
GJ_DEBUG(1, "Starting copy of journal.");
g_journal_metadata_update(sc);
}
g_journal_copy_send(sc);
}
/*
* Data block has been read from the journal provider.
*/
static int
g_journal_copy_read_done(struct bio *bp)
{
struct g_journal_softc *sc;
struct g_consumer *cp;
struct bio *pbp;
KASSERT(bp->bio_cflags == GJ_BIO_COPY,
("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
sc = bp->bio_from->geom->softc;
pbp = bp->bio_parent;
if (bp->bio_error != 0) {
GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
bp->bio_to->name, bp->bio_error);
/*
* We will not be able to deliver WRITE request as well.
*/
gj_free(bp->bio_data, bp->bio_length);
g_destroy_bio(pbp);
g_destroy_bio(bp);
sc->sc_copy_in_progress--;
return (1);
}
pbp->bio_data = bp->bio_data;
cp = sc->sc_dconsumer;
g_io_request(pbp, cp);
GJ_LOGREQ(4, bp, "READ DONE");
g_destroy_bio(bp);
return (0);
}
/*
* Data block has been written to the data provider.
*/
static void
g_journal_copy_write_done(struct bio *bp)
{
struct g_journal_softc *sc;
KASSERT(bp->bio_cflags == GJ_BIO_COPY,
("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_COPY));
sc = bp->bio_from->geom->softc;
sc->sc_copy_in_progress--;
if (bp->bio_error != 0) {
GJ_LOGREQ(0, bp, "[copy] Error while writing data (error=%d)",
bp->bio_error);
}
GJQ_REMOVE(sc->sc_copy_queue, bp);
gj_free(bp->bio_data, bp->bio_length);
GJ_LOGREQ(4, bp, "DONE");
g_destroy_bio(bp);
if (sc->sc_copy_in_progress == 0) {
/*
* This was the last write request for this journal.
*/
GJ_DEBUG(1, "Data has been copied.");
sc->sc_journal_copying = 0;
}
}
static void g_journal_flush_done(struct bio *bp);
/*
* Flush one record onto active journal provider.
*/
static void
g_journal_flush(struct g_journal_softc *sc)
{
struct g_journal_record_header hdr;
struct g_journal_entry *ent;
struct g_provider *pp;
struct bio **bioq;
struct bio *bp, *fbp, *pbp;
off_t joffset, size;
u_char *data, hash[16];
MD5_CTX ctx;
u_int i;
if (sc->sc_current_count == 0)
return;
size = 0;
pp = sc->sc_jprovider;
GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
joffset = sc->sc_journal_offset;
GJ_DEBUG(2, "Storing %d journal entries on %s at %jd.",
sc->sc_current_count, pp->name, (intmax_t)joffset);
/*
* Store 'journal id', so we know to which journal this record belongs.
*/
hdr.jrh_journal_id = sc->sc_journal_id;
/* Could be less than g_journal_record_entries if called due timeout. */
hdr.jrh_nentries = MIN(sc->sc_current_count, g_journal_record_entries);
strlcpy(hdr.jrh_magic, GJ_RECORD_HEADER_MAGIC, sizeof(hdr.jrh_magic));
bioq = &sc->sc_active.jj_queue;
pbp = sc->sc_flush_queue;
fbp = g_alloc_bio();
fbp->bio_parent = NULL;
fbp->bio_cflags = GJ_BIO_JOURNAL;
fbp->bio_offset = -1;
fbp->bio_joffset = joffset;
fbp->bio_length = pp->sectorsize;
fbp->bio_cmd = BIO_WRITE;
fbp->bio_done = g_journal_std_done;
GJQ_INSERT_AFTER(sc->sc_flush_queue, fbp, pbp);
pbp = fbp;
fbp->bio_to = pp;
GJ_LOGREQ(4, fbp, "FLUSH_OUT");
joffset += pp->sectorsize;
sc->sc_flush_count++;
if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
MD5Init(&ctx);
for (i = 0; i < hdr.jrh_nentries; i++) {
bp = sc->sc_current_queue;
KASSERT(bp != NULL, ("NULL bp"));
bp->bio_to = pp;
GJ_LOGREQ(4, bp, "FLUSHED");
sc->sc_current_queue = bp->bio_next;
bp->bio_next = NULL;
sc->sc_current_count--;
/* Add to the header. */
ent = &hdr.jrh_entries[i];
ent->je_offset = bp->bio_offset;
ent->je_joffset = joffset;
ent->je_length = bp->bio_length;
size += ent->je_length;
data = bp->bio_data;
if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
MD5Update(&ctx, data, ent->je_length);
bzero(bp, sizeof(*bp));
bp->bio_cflags = GJ_BIO_JOURNAL;
bp->bio_offset = ent->je_offset;
bp->bio_joffset = ent->je_joffset;
bp->bio_length = ent->je_length;
bp->bio_data = data;
bp->bio_cmd = BIO_WRITE;
bp->bio_done = g_journal_std_done;
GJQ_INSERT_AFTER(sc->sc_flush_queue, bp, pbp);
pbp = bp;
bp->bio_to = pp;
GJ_LOGREQ(4, bp, "FLUSH_OUT");
joffset += bp->bio_length;
sc->sc_flush_count++;
/*
* Add request to the active sc_journal_queue queue.
* This is our cache. After journal switch we don't have to
* read the data from the inactive journal, because we keep
* it in memory.
*/
g_journal_insert(bioq, ent->je_offset,
ent->je_offset + ent->je_length, ent->je_joffset, data,
M_NOWAIT);
}
/*
* After all requests, store valid header.
*/
data = gj_malloc(pp->sectorsize, M_WAITOK);
if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
MD5Final(hash, &ctx);
bcopy(hash, hdr.jrh_sum, sizeof(hdr.jrh_sum));
}
g_journal_record_header_encode(&hdr, data);
fbp->bio_data = data;
sc->sc_journal_offset = joffset;
g_journal_check_overflow(sc);
}
/*
* Flush request finished.
*/
static void
g_journal_flush_done(struct bio *bp)
{
struct g_journal_softc *sc;
struct g_consumer *cp;
KASSERT((bp->bio_cflags & GJ_BIO_MASK) == GJ_BIO_JOURNAL,
("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_JOURNAL));
cp = bp->bio_from;
sc = cp->geom->softc;
sc->sc_flush_in_progress--;
if (bp->bio_error != 0) {
GJ_LOGREQ(0, bp, "[flush] Error while writing data (error=%d)",
bp->bio_error);
}
gj_free(bp->bio_data, bp->bio_length);
GJ_LOGREQ(4, bp, "DONE");
g_destroy_bio(bp);
}
static void g_journal_release_delayed(struct g_journal_softc *sc);
static void
g_journal_flush_send(struct g_journal_softc *sc)
{
struct g_consumer *cp;
struct bio *bioq, *bp, *lbp;
cp = sc->sc_jconsumer;
bioq = lbp = NULL;
while (sc->sc_flush_in_progress < g_journal_parallel_flushes) {
/* Send one flush requests to the active journal. */
bp = GJQ_FIRST(sc->sc_flush_queue);
if (bp != NULL) {
GJQ_REMOVE(sc->sc_flush_queue, bp);
sc->sc_flush_count--;
bp->bio_offset = bp->bio_joffset;
bp->bio_joffset = 0;
sc->sc_flush_in_progress++;
GJQ_INSERT_AFTER(bioq, bp, lbp);
lbp = bp;
}
/* Try to release delayed requests. */
g_journal_release_delayed(sc);
/* If there are no requests to flush, leave. */
if (GJQ_FIRST(sc->sc_flush_queue) == NULL)
break;
}
if (g_journal_do_optimize)
sc->sc_flush_in_progress += g_journal_optimize(bioq);
while ((bp = GJQ_FIRST(bioq)) != NULL) {
GJQ_REMOVE(bioq, bp);
GJ_LOGREQ(3, bp, "Flush request send");
g_io_request(bp, cp);
}
}
static void
g_journal_add_current(struct g_journal_softc *sc, struct bio *bp)
{
int n;
GJ_LOGREQ(4, bp, "CURRENT %d", sc->sc_current_count);
n = g_journal_insert_bio(&sc->sc_current_queue, bp, M_WAITOK);
sc->sc_current_count += n;
n = g_journal_optimize(sc->sc_current_queue);
sc->sc_current_count += n;
/*
* For requests which are added to the current queue we deliver
* response immediately.
*/
bp->bio_completed = bp->bio_length;
g_io_deliver(bp, 0);
if (sc->sc_current_count >= g_journal_record_entries) {
/*
* Let's flush one record onto active journal provider.
*/
g_journal_flush(sc);
}
}
static void
g_journal_release_delayed(struct g_journal_softc *sc)
{
struct bio *bp;
for (;;) {
/* The flush queue is full, exit. */
if (sc->sc_flush_count >= g_journal_accept_immediately)
return;
bp = bioq_takefirst(&sc->sc_delayed_queue);
if (bp == NULL)
return;
sc->sc_delayed_count--;
g_journal_add_current(sc, bp);
}
}
/*
* Add I/O request to the current queue. If we have enough requests for one
* journal record we flush them onto active journal provider.
*/
static void
g_journal_add_request(struct g_journal_softc *sc, struct bio *bp)
{
/*
* The flush queue is full, we need to delay the request.
*/
if (sc->sc_delayed_count > 0 ||
sc->sc_flush_count >= g_journal_accept_immediately) {
GJ_LOGREQ(4, bp, "DELAYED");
bioq_insert_tail(&sc->sc_delayed_queue, bp);
sc->sc_delayed_count++;
return;
}
KASSERT(TAILQ_EMPTY(&sc->sc_delayed_queue.queue),
("DELAYED queue not empty."));
g_journal_add_current(sc, bp);
}
static void g_journal_read_done(struct bio *bp);
/*
* Try to find requested data in cache.
*/
static struct bio *
g_journal_read_find(struct bio *head, int sorted, struct bio *pbp, off_t ostart,
off_t oend)
{
off_t cstart, cend;
struct bio *bp;
GJQ_FOREACH(head, bp) {
if (bp->bio_offset == -1)
continue;
cstart = MAX(ostart, bp->bio_offset);
cend = MIN(oend, bp->bio_offset + bp->bio_length);
if (cend <= ostart)
continue;
else if (cstart >= oend) {
if (!sorted)
continue;
else {
bp = NULL;
break;
}
}
if (bp->bio_data == NULL)
break;
GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
bp);
bcopy(bp->bio_data + cstart - bp->bio_offset,
pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
pbp->bio_completed += cend - cstart;
if (pbp->bio_completed == pbp->bio_length) {
/*
* Cool, the whole request was in cache, deliver happy
* message.
*/
g_io_deliver(pbp, 0);
return (pbp);
}
break;
}
return (bp);
}
/*
* Try to find requested data in cache.
*/
static struct bio *
g_journal_read_queue_find(struct bio_queue *head, struct bio *pbp, off_t ostart,
off_t oend)
{
off_t cstart, cend;
struct bio *bp;
TAILQ_FOREACH(bp, head, bio_queue) {
cstart = MAX(ostart, bp->bio_offset);
cend = MIN(oend, bp->bio_offset + bp->bio_length);
if (cend <= ostart)
continue;
else if (cstart >= oend)
continue;
KASSERT(bp->bio_data != NULL,
("%s: bio_data == NULL", __func__));
GJ_DEBUG(3, "READ(%p): (%jd, %jd) (bp=%p)", head, cstart, cend,
bp);
bcopy(bp->bio_data + cstart - bp->bio_offset,
pbp->bio_data + cstart - pbp->bio_offset, cend - cstart);
pbp->bio_completed += cend - cstart;
if (pbp->bio_completed == pbp->bio_length) {
/*
* Cool, the whole request was in cache, deliver happy
* message.
*/
g_io_deliver(pbp, 0);
return (pbp);
}
break;
}
return (bp);
}
/*
* This function is used for colecting data on read.
* The complexity is because parts of the data can be stored in four different
* places:
* - in delayed requests
* - in memory - the data not yet send to the active journal provider
* - in requests which are going to be sent to the active journal
* - in the active journal
* - in the inactive journal
* - in the data provider
*/
static void
g_journal_read(struct g_journal_softc *sc, struct bio *pbp, off_t ostart,
off_t oend)
{
struct bio *bp, *nbp, *head;
off_t cstart, cend;
u_int i, sorted = 0;
GJ_DEBUG(3, "READ: (%jd, %jd)", ostart, oend);
cstart = cend = -1;
bp = NULL;
head = NULL;
for (i = 0; i <= 5; i++) {
switch (i) {
case 0: /* Delayed requests. */
head = NULL;
sorted = 0;
break;
case 1: /* Not-yet-send data. */
head = sc->sc_current_queue;
sorted = 1;
break;
case 2: /* In-flight to the active journal. */
head = sc->sc_flush_queue;
sorted = 0;
break;
case 3: /* Active journal. */
head = sc->sc_active.jj_queue;
sorted = 1;
break;
case 4: /* Inactive journal. */
/*
* XXX: Here could be a race with g_journal_lowmem().
*/
head = sc->sc_inactive.jj_queue;
sorted = 1;
break;
case 5: /* In-flight to the data provider. */
head = sc->sc_copy_queue;
sorted = 0;
break;
default:
panic("gjournal %s: i=%d", __func__, i);
}
if (i == 0)
bp = g_journal_read_queue_find(&sc->sc_delayed_queue.queue, pbp, ostart, oend);
else
bp = g_journal_read_find(head, sorted, pbp, ostart, oend);
if (bp == pbp) { /* Got the whole request. */
GJ_DEBUG(2, "Got the whole request from %u.", i);
return;
} else if (bp != NULL) {
cstart = MAX(ostart, bp->bio_offset);
cend = MIN(oend, bp->bio_offset + bp->bio_length);
GJ_DEBUG(2, "Got part of the request from %u (%jd-%jd).",
i, (intmax_t)cstart, (intmax_t)cend);
break;
}
}
if (bp != NULL) {
if (bp->bio_data == NULL) {
nbp = g_duplicate_bio(pbp);
nbp->bio_cflags = GJ_BIO_READ;
nbp->bio_data =
pbp->bio_data + cstart - pbp->bio_offset;
nbp->bio_offset =
bp->bio_joffset + cstart - bp->bio_offset;
nbp->bio_length = cend - cstart;
nbp->bio_done = g_journal_read_done;
g_io_request(nbp, sc->sc_jconsumer);
}
/*
* If we don't have the whole request yet, call g_journal_read()
* recursively.
*/
if (ostart < cstart)
g_journal_read(sc, pbp, ostart, cstart);
if (oend > cend)
g_journal_read(sc, pbp, cend, oend);
} else {
/*
* No data in memory, no data in journal.
* Its time for asking data provider.
*/
GJ_DEBUG(3, "READ(data): (%jd, %jd)", ostart, oend);
nbp = g_duplicate_bio(pbp);
nbp->bio_cflags = GJ_BIO_READ;
nbp->bio_data = pbp->bio_data + ostart - pbp->bio_offset;
nbp->bio_offset = ostart;
nbp->bio_length = oend - ostart;
nbp->bio_done = g_journal_read_done;
g_io_request(nbp, sc->sc_dconsumer);
/* We have the whole request, return here. */
return;
}
}
/*
* Function responsible for handling finished READ requests.
* Actually, g_std_done() could be used here, the only difference is that we
* log error.
*/
static void
g_journal_read_done(struct bio *bp)
{
struct bio *pbp;
KASSERT(bp->bio_cflags == GJ_BIO_READ,
("Invalid bio (%d != %d).", bp->bio_cflags, GJ_BIO_READ));
pbp = bp->bio_parent;
pbp->bio_inbed++;
pbp->bio_completed += bp->bio_length;
if (bp->bio_error != 0) {
if (pbp->bio_error == 0)
pbp->bio_error = bp->bio_error;
GJ_DEBUG(0, "Error while reading data from %s (error=%d).",
bp->bio_to->name, bp->bio_error);
}
g_destroy_bio(bp);
if (pbp->bio_children == pbp->bio_inbed &&
pbp->bio_completed == pbp->bio_length) {
/* We're done. */
g_io_deliver(pbp, 0);
}
}
/*
* Deactive current journal and active next one.
*/
static void
g_journal_switch(struct g_journal_softc *sc)
{
struct g_provider *pp;
if (JEMPTY(sc)) {
GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
pp = LIST_FIRST(&sc->sc_geom->provider);
if (!(sc->sc_flags & GJF_DEVICE_CLEAN) && pp->acw == 0) {
sc->sc_flags |= GJF_DEVICE_CLEAN;
GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
g_journal_metadata_update(sc);
}
} else {
GJ_DEBUG(3, "Switching journal %s.", sc->sc_geom->name);
pp = sc->sc_jprovider;
sc->sc_journal_previous_id = sc->sc_journal_id;
sc->sc_journal_id = sc->sc_journal_next_id;
sc->sc_journal_next_id = arc4random();
GJ_VALIDATE_OFFSET(sc->sc_journal_offset, sc);
g_journal_write_header(sc);
sc->sc_inactive.jj_offset = sc->sc_active.jj_offset;
sc->sc_inactive.jj_queue = sc->sc_active.jj_queue;
sc->sc_active.jj_offset =
sc->sc_journal_offset - pp->sectorsize;
sc->sc_active.jj_queue = NULL;
/*
* Switch is done, start copying data from the (now) inactive
* journal to the data provider.
*/
g_journal_copy_start(sc);
}
mtx_lock(&sc->sc_mtx);
sc->sc_flags &= ~GJF_DEVICE_SWITCH;
mtx_unlock(&sc->sc_mtx);
}
static void
g_journal_initialize(struct g_journal_softc *sc)
{
sc->sc_journal_id = arc4random();
sc->sc_journal_next_id = arc4random();
sc->sc_journal_previous_id = sc->sc_journal_id;
sc->sc_journal_offset = sc->sc_jstart;
sc->sc_inactive.jj_offset = sc->sc_jstart;
g_journal_write_header(sc);
sc->sc_active.jj_offset = sc->sc_jstart;
}
static void
g_journal_mark_as_dirty(struct g_journal_softc *sc)
{
const struct g_journal_desc *desc;
int i;
GJ_DEBUG(1, "Marking file system %s as dirty.", sc->sc_name);
for (i = 0; (desc = g_journal_filesystems[i]) != NULL; i++)
desc->jd_dirty(sc->sc_dconsumer);
}
/*
* Function read record header from the given journal.
* It is very simlar to g_read_data(9), but it doesn't allocate memory for bio
* and data on every call.
*/
static int
g_journal_sync_read(struct g_consumer *cp, struct bio *bp, off_t offset,
void *data)
{
int error;
bzero(bp, sizeof(*bp));
bp->bio_cmd = BIO_READ;
bp->bio_done = NULL;
bp->bio_offset = offset;
bp->bio_length = cp->provider->sectorsize;
bp->bio_data = data;
g_io_request(bp, cp);
error = biowait(bp, "gjs_read");
return (error);
}
#if 0
/*
* Function is called when we start the journal device and we detect that
* one of the journals was not fully copied.
* The purpose of this function is to read all records headers from journal
* and placed them in the inactive queue, so we can start journal
* synchronization process and the journal provider itself.
* Design decision was taken to not synchronize the whole journal here as it
* can take too much time. Reading headers only and delaying synchronization
* process until after journal provider is started should be the best choice.
*/
#endif
static void
g_journal_sync(struct g_journal_softc *sc)
{
struct g_journal_record_header rhdr;
struct g_journal_entry *ent;
struct g_journal_header jhdr;
struct g_consumer *cp;
struct bio *bp, *fbp, *tbp;
off_t joffset, offset;
u_char *buf, sum[16];
uint64_t id;
MD5_CTX ctx;
int error, found, i;
found = 0;
fbp = NULL;
cp = sc->sc_jconsumer;
bp = g_alloc_bio();
buf = gj_malloc(cp->provider->sectorsize, M_WAITOK);
offset = joffset = sc->sc_inactive.jj_offset = sc->sc_journal_offset;
GJ_DEBUG(2, "Looking for termination at %jd.", (intmax_t)joffset);
/*
* Read and decode first journal header.
*/
error = g_journal_sync_read(cp, bp, offset, buf);
if (error != 0) {
GJ_DEBUG(0, "Error while reading journal header from %s.",
cp->provider->name);
goto end;
}
error = g_journal_header_decode(buf, &jhdr);
if (error != 0) {
GJ_DEBUG(0, "Cannot decode journal header from %s.",
cp->provider->name);
goto end;
}
id = sc->sc_journal_id;
if (jhdr.jh_journal_id != sc->sc_journal_id) {
GJ_DEBUG(1, "Journal ID mismatch at %jd (0x%08x != 0x%08x).",
(intmax_t)offset, (u_int)jhdr.jh_journal_id, (u_int)id);
goto end;
}
offset += cp->provider->sectorsize;
id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
for (;;) {
/*
* If the biggest record won't fit, look for a record header or
* journal header from the begining.
*/
GJ_VALIDATE_OFFSET(offset, sc);
error = g_journal_sync_read(cp, bp, offset, buf);
if (error != 0) {
/*
* Not good. Having an error while reading header
* means, that we cannot read next headers and in
* consequence we cannot find termination.
*/
GJ_DEBUG(0,
"Error while reading record header from %s.",
cp->provider->name);
break;
}
error = g_journal_record_header_decode(buf, &rhdr);
if (error != 0) {
GJ_DEBUG(2, "Not a record header at %jd (error=%d).",
(intmax_t)offset, error);
/*
* This is not a record header.
* If we are lucky, this is next journal header.
*/
error = g_journal_header_decode(buf, &jhdr);
if (error != 0) {
GJ_DEBUG(1, "Not a journal header at %jd (error=%d).",
(intmax_t)offset, error);
/*
* Nope, this is not journal header, which
* bascially means that journal is not
* terminated properly.
*/
error = ENOENT;
break;
}
/*
* Ok. This is header of _some_ journal. Now we need to
* verify if this is header of the _next_ journal.
*/
if (jhdr.jh_journal_id != id) {
GJ_DEBUG(1, "Journal ID mismatch at %jd "
"(0x%08x != 0x%08x).", (intmax_t)offset,
(u_int)jhdr.jh_journal_id, (u_int)id);
error = ENOENT;
break;
}
/* Found termination. */
found++;
GJ_DEBUG(1, "Found termination at %jd (id=0x%08x).",
(intmax_t)offset, (u_int)id);
sc->sc_active.jj_offset = offset;
sc->sc_journal_offset =
offset + cp->provider->sectorsize;
sc->sc_journal_id = id;
id = sc->sc_journal_next_id = jhdr.jh_journal_next_id;
while ((tbp = fbp) != NULL) {
fbp = tbp->bio_next;
GJ_LOGREQ(3, tbp, "Adding request.");
g_journal_insert_bio(&sc->sc_inactive.jj_queue,
tbp, M_WAITOK);
}
/* Skip journal's header. */
offset += cp->provider->sectorsize;
continue;
}
/* Skip record's header. */
offset += cp->provider->sectorsize;
/*
* Add information about every record entry to the inactive
* queue.
*/
if (sc->sc_flags & GJF_DEVICE_CHECKSUM)
MD5Init(&ctx);
for (i = 0; i < rhdr.jrh_nentries; i++) {
ent = &rhdr.jrh_entries[i];
GJ_DEBUG(3, "Insert entry: %jd %jd.",
(intmax_t)ent->je_offset, (intmax_t)ent->je_length);
g_journal_insert(&fbp, ent->je_offset,
ent->je_offset + ent->je_length, ent->je_joffset,
NULL, M_WAITOK);
if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
u_char *buf2;
/*
* TODO: Should use faster function (like
* g_journal_sync_read()).
*/
buf2 = g_read_data(cp, offset, ent->je_length,
NULL);
if (buf2 == NULL)
GJ_DEBUG(0, "Cannot read data at %jd.",
(intmax_t)offset);
else {
MD5Update(&ctx, buf2, ent->je_length);
g_free(buf2);
}
}
/* Skip entry's data. */
offset += ent->je_length;
}
if (sc->sc_flags & GJF_DEVICE_CHECKSUM) {
MD5Final(sum, &ctx);
if (bcmp(sum, rhdr.jrh_sum, sizeof(rhdr.jrh_sum)) != 0) {
GJ_DEBUG(0, "MD5 hash mismatch at %jd!",
(intmax_t)offset);
}
}
}
end:
gj_free(bp->bio_data, cp->provider->sectorsize);
g_destroy_bio(bp);
/* Remove bios from unterminated journal. */
while ((tbp = fbp) != NULL) {
fbp = tbp->bio_next;
g_destroy_bio(tbp);
}
if (found < 1 && joffset > 0) {
GJ_DEBUG(0, "Journal on %s is broken/corrupted. Initializing.",
sc->sc_name);
while ((tbp = sc->sc_inactive.jj_queue) != NULL) {
sc->sc_inactive.jj_queue = tbp->bio_next;
g_destroy_bio(tbp);
}
g_journal_initialize(sc);
g_journal_mark_as_dirty(sc);
} else {
GJ_DEBUG(0, "Journal %s consistent.", sc->sc_name);
g_journal_copy_start(sc);
}
}
/*
* Wait for requests.
* If we have requests in the current queue, flush them after 3 seconds from the
* last flush. In this way we don't wait forever (or for journal switch) with
* storing not full records on journal.
*/
static void
g_journal_wait(struct g_journal_softc *sc, time_t last_write)
{
int error, timeout;
GJ_DEBUG(3, "%s: enter", __func__);
if (sc->sc_current_count == 0) {
if (g_journal_debug < 2)
msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", 0);
else {
/*
* If we have debug turned on, show number of elements
* in various queues.
*/
for (;;) {
error = msleep(sc, &sc->sc_mtx, PRIBIO,
"gj:work", hz * 3);
if (error == 0) {
mtx_unlock(&sc->sc_mtx);
break;
}
GJ_DEBUG(3, "Report: current count=%d",
sc->sc_current_count);
GJ_DEBUG(3, "Report: flush count=%d",
sc->sc_flush_count);
GJ_DEBUG(3, "Report: flush in progress=%d",
sc->sc_flush_in_progress);
GJ_DEBUG(3, "Report: copy in progress=%d",
sc->sc_copy_in_progress);
GJ_DEBUG(3, "Report: delayed=%d",
sc->sc_delayed_count);
}
}
GJ_DEBUG(3, "%s: exit 1", __func__);
return;
}
/*
* Flush even not full records every 3 seconds.
*/
timeout = (last_write + 3 - time_second) * hz;
if (timeout <= 0) {
mtx_unlock(&sc->sc_mtx);
g_journal_flush(sc);
g_journal_flush_send(sc);
GJ_DEBUG(3, "%s: exit 2", __func__);
return;
}
error = msleep(sc, &sc->sc_mtx, PRIBIO | PDROP, "gj:work", timeout);
if (error == EWOULDBLOCK)
g_journal_flush_send(sc);
GJ_DEBUG(3, "%s: exit 3", __func__);
}
/*
* Worker thread.
*/
static void
g_journal_worker(void *arg)
{
struct g_journal_softc *sc;
struct g_geom *gp;
struct g_provider *pp;
struct bio *bp;
time_t last_write;
int type;
thread_lock(curthread);
sched_prio(curthread, PRIBIO);
thread_unlock(curthread);
sc = arg;
type = 0; /* gcc */
if (sc->sc_flags & GJF_DEVICE_CLEAN) {
GJ_DEBUG(0, "Journal %s clean.", sc->sc_name);
g_journal_initialize(sc);
} else {
g_journal_sync(sc);
}
/*
* Check if we can use BIO_FLUSH.
*/
sc->sc_bio_flush = 0;
if (g_io_flush(sc->sc_jconsumer) == 0) {
sc->sc_bio_flush |= GJ_FLUSH_JOURNAL;
GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
sc->sc_jconsumer->provider->name);
} else {
GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
sc->sc_jconsumer->provider->name);
}
if (sc->sc_jconsumer != sc->sc_dconsumer) {
if (g_io_flush(sc->sc_dconsumer) == 0) {
sc->sc_bio_flush |= GJ_FLUSH_DATA;
GJ_DEBUG(1, "BIO_FLUSH supported by %s.",
sc->sc_dconsumer->provider->name);
} else {
GJ_DEBUG(0, "BIO_FLUSH not supported by %s.",
sc->sc_dconsumer->provider->name);
}
}
gp = sc->sc_geom;
g_topology_lock();
pp = g_new_providerf(gp, "%s.journal", sc->sc_name);
pp->mediasize = sc->sc_mediasize;
/*
* There could be a problem when data provider and journal providers
* have different sectorsize, but such scenario is prevented on journal
* creation.
*/
pp->sectorsize = sc->sc_sectorsize;
g_error_provider(pp, 0);
g_topology_unlock();
last_write = time_second;
if (sc->sc_rootmount != NULL) {
GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
root_mount_rel(sc->sc_rootmount);
sc->sc_rootmount = NULL;
}
for (;;) {
/* Get first request from the queue. */
mtx_lock(&sc->sc_mtx);
bp = bioq_first(&sc->sc_back_queue);
if (bp != NULL)
type = (bp->bio_cflags & GJ_BIO_MASK);
if (bp == NULL) {
bp = bioq_first(&sc->sc_regular_queue);
if (bp != NULL)
type = GJ_BIO_REGULAR;
}
if (bp == NULL) {
try_switch:
if ((sc->sc_flags & GJF_DEVICE_SWITCH) ||
(sc->sc_flags & GJF_DEVICE_DESTROY)) {
if (sc->sc_current_count > 0) {
mtx_unlock(&sc->sc_mtx);
g_journal_flush(sc);
g_journal_flush_send(sc);
continue;
}
if (sc->sc_flush_in_progress > 0)
goto sleep;
if (sc->sc_copy_in_progress > 0)
goto sleep;
}
if (sc->sc_flags & GJF_DEVICE_SWITCH) {
mtx_unlock(&sc->sc_mtx);
g_journal_switch(sc);
wakeup(&sc->sc_journal_copying);
continue;
}
if (sc->sc_flags & GJF_DEVICE_DESTROY) {
GJ_DEBUG(1, "Shutting down worker "
"thread for %s.", gp->name);
sc->sc_worker = NULL;
wakeup(&sc->sc_worker);
mtx_unlock(&sc->sc_mtx);
kproc_exit(0);
}
sleep:
g_journal_wait(sc, last_write);
continue;
}
/*
* If we're in switch process, we need to delay all new
* write requests until its done.
*/
if ((sc->sc_flags & GJF_DEVICE_SWITCH) &&
type == GJ_BIO_REGULAR && bp->bio_cmd == BIO_WRITE) {
GJ_LOGREQ(2, bp, "WRITE on SWITCH");
goto try_switch;
}
if (type == GJ_BIO_REGULAR)
bioq_remove(&sc->sc_regular_queue, bp);
else
bioq_remove(&sc->sc_back_queue, bp);
mtx_unlock(&sc->sc_mtx);
switch (type) {
case GJ_BIO_REGULAR:
/* Regular request. */
switch (bp->bio_cmd) {
case BIO_READ:
g_journal_read(sc, bp, bp->bio_offset,
bp->bio_offset + bp->bio_length);
break;
case BIO_WRITE:
last_write = time_second;
g_journal_add_request(sc, bp);
g_journal_flush_send(sc);
break;
default:
panic("Invalid bio_cmd (%d).", bp->bio_cmd);
}
break;
case GJ_BIO_COPY:
switch (bp->bio_cmd) {
case BIO_READ:
if (g_journal_copy_read_done(bp))
g_journal_copy_send(sc);
break;
case BIO_WRITE:
g_journal_copy_write_done(bp);
g_journal_copy_send(sc);
break;
default:
panic("Invalid bio_cmd (%d).", bp->bio_cmd);
}
break;
case GJ_BIO_JOURNAL:
g_journal_flush_done(bp);
g_journal_flush_send(sc);
break;
case GJ_BIO_READ:
default:
panic("Invalid bio (%d).", type);
}
}
}
static void
g_journal_destroy_event(void *arg, int flags __unused)
{
struct g_journal_softc *sc;
g_topology_assert();
sc = arg;
g_journal_destroy(sc);
}
static void
g_journal_timeout(void *arg)
{
struct g_journal_softc *sc;
sc = arg;
GJ_DEBUG(0, "Timeout. Journal %s cannot be completed.",
sc->sc_geom->name);
g_post_event(g_journal_destroy_event, sc, M_NOWAIT, NULL);
}
static struct g_geom *
g_journal_create(struct g_class *mp, struct g_provider *pp,
const struct g_journal_metadata *md)
{
struct g_journal_softc *sc;
struct g_geom *gp;
struct g_consumer *cp;
int error;
sc = NULL; /* gcc */
g_topology_assert();
/*
* There are two possibilities:
* 1. Data and both journals are on the same provider.
* 2. Data and journals are all on separated providers.
*/
/* Look for journal device with the same ID. */
LIST_FOREACH(gp, &mp->geom, geom) {
sc = gp->softc;
if (sc == NULL)
continue;
if (sc->sc_id == md->md_id)
break;
}
if (gp == NULL)
sc = NULL;
else if (sc != NULL && (sc->sc_type & md->md_type) != 0) {
GJ_DEBUG(1, "Journal device %u already configured.", sc->sc_id);
return (NULL);
}
if (md->md_type == 0 || (md->md_type & ~GJ_TYPE_COMPLETE) != 0) {
GJ_DEBUG(0, "Invalid type on %s.", pp->name);
return (NULL);
}
if (md->md_type & GJ_TYPE_DATA) {
GJ_DEBUG(0, "Journal %u: %s contains data.", md->md_id,
pp->name);
}
if (md->md_type & GJ_TYPE_JOURNAL) {
GJ_DEBUG(0, "Journal %u: %s contains journal.", md->md_id,
pp->name);
}
if (sc == NULL) {
/* Action geom. */
sc = malloc(sizeof(*sc), M_JOURNAL, M_WAITOK | M_ZERO);
sc->sc_id = md->md_id;
sc->sc_type = 0;
sc->sc_flags = 0;
sc->sc_worker = NULL;
gp = g_new_geomf(mp, "gjournal %u", sc->sc_id);
gp->start = g_journal_start;
gp->orphan = g_journal_orphan;
gp->access = g_journal_access;
gp->softc = sc;
gp->flags |= G_GEOM_VOLATILE_BIO;
sc->sc_geom = gp;
mtx_init(&sc->sc_mtx, "gjournal", NULL, MTX_DEF);
bioq_init(&sc->sc_back_queue);
bioq_init(&sc->sc_regular_queue);
bioq_init(&sc->sc_delayed_queue);
sc->sc_delayed_count = 0;
sc->sc_current_queue = NULL;
sc->sc_current_count = 0;
sc->sc_flush_queue = NULL;
sc->sc_flush_count = 0;
sc->sc_flush_in_progress = 0;
sc->sc_copy_queue = NULL;
sc->sc_copy_in_progress = 0;
sc->sc_inactive.jj_queue = NULL;
sc->sc_active.jj_queue = NULL;
sc->sc_rootmount = root_mount_hold("GJOURNAL");
GJ_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
if (md->md_type != GJ_TYPE_COMPLETE) {
/*
* Journal and data are on separate providers.
* At this point we have only one of them.
* We setup a timeout in case the other part will not
* appear, so we won't wait forever.
*/
callout_reset(&sc->sc_callout, 5 * hz,
g_journal_timeout, sc);
}
}
/* Remember type of the data provider. */
if (md->md_type & GJ_TYPE_DATA)
sc->sc_orig_type = md->md_type;
sc->sc_type |= md->md_type;
cp = NULL;
if (md->md_type & GJ_TYPE_DATA) {
if (md->md_flags & GJ_FLAG_CLEAN)
sc->sc_flags |= GJF_DEVICE_CLEAN;
if (md->md_flags & GJ_FLAG_CHECKSUM)
sc->sc_flags |= GJF_DEVICE_CHECKSUM;
cp = g_new_consumer(gp);
error = g_attach(cp, pp);
KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
pp->name, error));
error = g_access(cp, 1, 1, 1);
if (error != 0) {
GJ_DEBUG(0, "Cannot access %s (error=%d).", pp->name,
error);
g_journal_destroy(sc);
return (NULL);
}
sc->sc_dconsumer = cp;
sc->sc_mediasize = pp->mediasize - pp->sectorsize;
sc->sc_sectorsize = pp->sectorsize;
sc->sc_jstart = md->md_jstart;
sc->sc_jend = md->md_jend;
if (md->md_provider[0] != '\0')
sc->sc_flags |= GJF_DEVICE_HARDCODED;
sc->sc_journal_offset = md->md_joffset;
sc->sc_journal_id = md->md_jid;
sc->sc_journal_previous_id = md->md_jid;
}
if (md->md_type & GJ_TYPE_JOURNAL) {
if (cp == NULL) {
cp = g_new_consumer(gp);
error = g_attach(cp, pp);
KASSERT(error == 0, ("Cannot attach to %s (error=%d).",
pp->name, error));
error = g_access(cp, 1, 1, 1);
if (error != 0) {
GJ_DEBUG(0, "Cannot access %s (error=%d).",
pp->name, error);
g_journal_destroy(sc);
return (NULL);
}
} else {
/*
* Journal is on the same provider as data, which means
* that data provider ends where journal starts.
*/
sc->sc_mediasize = md->md_jstart;
}
sc->sc_jconsumer = cp;
}
if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE) {
/* Journal is not complete yet. */
return (gp);
} else {
/* Journal complete, cancel timeout. */
callout_drain(&sc->sc_callout);
}
error = kproc_create(g_journal_worker, sc, &sc->sc_worker, 0, 0,
"g_journal %s", sc->sc_name);
if (error != 0) {
GJ_DEBUG(0, "Cannot create worker thread for %s.journal.",
sc->sc_name);
g_journal_destroy(sc);
return (NULL);
}
return (gp);
}
static void
g_journal_destroy_consumer(void *arg, int flags __unused)
{
struct g_consumer *cp;
g_topology_assert();
cp = arg;
g_detach(cp);
g_destroy_consumer(cp);
}
static int
g_journal_destroy(struct g_journal_softc *sc)
{
struct g_geom *gp;
struct g_provider *pp;
struct g_consumer *cp;
g_topology_assert();
if (sc == NULL)
return (ENXIO);
gp = sc->sc_geom;
pp = LIST_FIRST(&gp->provider);
if (pp != NULL) {
if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) {
GJ_DEBUG(1, "Device %s is still open (r%dw%de%d).",
pp->name, pp->acr, pp->acw, pp->ace);
return (EBUSY);
}
g_error_provider(pp, ENXIO);
g_journal_flush(sc);
g_journal_flush_send(sc);
g_journal_switch(sc);
}
sc->sc_flags |= (GJF_DEVICE_DESTROY | GJF_DEVICE_CLEAN);
g_topology_unlock();
if (sc->sc_rootmount != NULL) {
GJ_DEBUG(1, "root_mount_rel %p", sc->sc_rootmount);
root_mount_rel(sc->sc_rootmount);
sc->sc_rootmount = NULL;
}
callout_drain(&sc->sc_callout);
mtx_lock(&sc->sc_mtx);
wakeup(sc);
while (sc->sc_worker != NULL)
msleep(&sc->sc_worker, &sc->sc_mtx, PRIBIO, "gj:destroy", 0);
mtx_unlock(&sc->sc_mtx);
if (pp != NULL) {
GJ_DEBUG(1, "Marking %s as clean.", sc->sc_name);
g_journal_metadata_update(sc);
g_topology_lock();
pp->flags |= G_PF_WITHER;
g_orphan_provider(pp, ENXIO);
} else {
g_topology_lock();
}
mtx_destroy(&sc->sc_mtx);
if (sc->sc_current_count != 0) {
GJ_DEBUG(0, "Warning! Number of current requests %d.",
sc->sc_current_count);
}
LIST_FOREACH(cp, &gp->consumer, consumer) {
if (cp->acr + cp->acw + cp->ace > 0)
g_access(cp, -1, -1, -1);
/*
* We keep all consumers open for writting, so if I'll detach
* and destroy consumer here, I'll get providers for taste, so
* journal will be started again.
* Sending an event here, prevents this from happening.
*/
g_post_event(g_journal_destroy_consumer, cp, M_WAITOK, NULL);
}
gp->softc = NULL;
g_wither_geom(gp, ENXIO);
free(sc, M_JOURNAL);
return (0);
}
static void
g_journal_taste_orphan(struct g_consumer *cp)
{
KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
cp->provider->name));
}
static struct g_geom *
g_journal_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
{
struct g_journal_metadata md;
struct g_consumer *cp;
struct g_geom *gp;
int error;
g_topology_assert();
g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
GJ_DEBUG(2, "Tasting %s.", pp->name);
if (pp->geom->class == mp)
return (NULL);
gp = g_new_geomf(mp, "journal:taste");
/* This orphan function should be never called. */
gp->orphan = g_journal_taste_orphan;
cp = g_new_consumer(gp);
g_attach(cp, pp);
error = g_journal_metadata_read(cp, &md);
g_detach(cp);
g_destroy_consumer(cp);
g_destroy_geom(gp);
if (error != 0)
return (NULL);
gp = NULL;
if (md.md_provider[0] != '\0' &&
!g_compare_names(md.md_provider, pp->name))
return (NULL);
if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
return (NULL);
if (g_journal_debug >= 2)
journal_metadata_dump(&md);
gp = g_journal_create(mp, pp, &md);
return (gp);
}
static struct g_journal_softc *
g_journal_find_device(struct g_class *mp, const char *name)
{
struct g_journal_softc *sc;
struct g_geom *gp;
struct g_provider *pp;
if (strncmp(name, "/dev/", 5) == 0)
name += 5;
LIST_FOREACH(gp, &mp->geom, geom) {
sc = gp->softc;
if (sc == NULL)
continue;
if (sc->sc_flags & GJF_DEVICE_DESTROY)
continue;
if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
continue;
pp = LIST_FIRST(&gp->provider);
if (strcmp(sc->sc_name, name) == 0)
return (sc);
if (pp != NULL && strcmp(pp->name, name) == 0)
return (sc);
}
return (NULL);
}
static void
g_journal_ctl_destroy(struct gctl_req *req, struct g_class *mp)
{
struct g_journal_softc *sc;
const char *name;
char param[16];
int *nargs;
int error, i;
g_topology_assert();
nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
if (nargs == NULL) {
gctl_error(req, "No '%s' argument.", "nargs");
return;
}
if (*nargs <= 0) {
gctl_error(req, "Missing device(s).");
return;
}
for (i = 0; i < *nargs; i++) {
snprintf(param, sizeof(param), "arg%d", i);
name = gctl_get_asciiparam(req, param);
if (name == NULL) {
gctl_error(req, "No 'arg%d' argument.", i);
return;
}
sc = g_journal_find_device(mp, name);
if (sc == NULL) {
gctl_error(req, "No such device: %s.", name);
return;
}
error = g_journal_destroy(sc);
if (error != 0) {
gctl_error(req, "Cannot destroy device %s (error=%d).",
LIST_FIRST(&sc->sc_geom->provider)->name, error);
return;
}
}
}
static void
g_journal_ctl_sync(struct gctl_req *req __unused, struct g_class *mp __unused)
{
g_topology_assert();
g_topology_unlock();
g_journal_sync_requested++;
wakeup(&g_journal_switcher_state);
while (g_journal_sync_requested > 0)
tsleep(&g_journal_sync_requested, PRIBIO, "j:sreq", hz / 2);
g_topology_lock();
}
static void
g_journal_config(struct gctl_req *req, struct g_class *mp, const char *verb)
{
uint32_t *version;
g_topology_assert();
version = gctl_get_paraml(req, "version", sizeof(*version));
if (version == NULL) {
gctl_error(req, "No '%s' argument.", "version");
return;
}
if (*version != G_JOURNAL_VERSION) {
gctl_error(req, "Userland and kernel parts are out of sync.");
return;
}
if (strcmp(verb, "destroy") == 0 || strcmp(verb, "stop") == 0) {
g_journal_ctl_destroy(req, mp);
return;
} else if (strcmp(verb, "sync") == 0) {
g_journal_ctl_sync(req, mp);
return;
}
gctl_error(req, "Unknown verb.");
}
static void
g_journal_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
struct g_consumer *cp, struct g_provider *pp)
{
struct g_journal_softc *sc;
g_topology_assert();
sc = gp->softc;
if (sc == NULL)
return;
if (pp != NULL) {
/* Nothing here. */
} else if (cp != NULL) {
int first = 1;
sbuf_printf(sb, "%s<Role>", indent);
if (cp == sc->sc_dconsumer) {
sbuf_printf(sb, "Data");
first = 0;
}
if (cp == sc->sc_jconsumer) {
if (!first)
sbuf_printf(sb, ",");
sbuf_printf(sb, "Journal");
}
sbuf_printf(sb, "</Role>\n");
if (cp == sc->sc_jconsumer) {
sbuf_printf(sb, "<Jstart>%jd</Jstart>\n",
(intmax_t)sc->sc_jstart);
sbuf_printf(sb, "<Jend>%jd</Jend>\n",
(intmax_t)sc->sc_jend);
}
} else {
sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
}
}
static eventhandler_tag g_journal_event_shutdown = NULL;
static eventhandler_tag g_journal_event_lowmem = NULL;
static void
g_journal_shutdown(void *arg, int howto __unused)
{
struct g_class *mp;
struct g_geom *gp, *gp2;
if (panicstr != NULL)
return;
mp = arg;
DROP_GIANT();
g_topology_lock();
LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
if (gp->softc == NULL)
continue;
GJ_DEBUG(0, "Shutting down geom %s.", gp->name);
g_journal_destroy(gp->softc);
}
g_topology_unlock();
PICKUP_GIANT();
}
/*
* Free cached requests from inactive queue in case of low memory.
* We free GJ_FREE_AT_ONCE elements at once.
*/
#define GJ_FREE_AT_ONCE 4
static void
g_journal_lowmem(void *arg, int howto __unused)
{
struct g_journal_softc *sc;
struct g_class *mp;
struct g_geom *gp;
struct bio *bp;
u_int nfree = GJ_FREE_AT_ONCE;
g_journal_stats_low_mem++;
mp = arg;
DROP_GIANT();
g_topology_lock();
LIST_FOREACH(gp, &mp->geom, geom) {
sc = gp->softc;
if (sc == NULL || (sc->sc_flags & GJF_DEVICE_DESTROY))
continue;
mtx_lock(&sc->sc_mtx);
for (bp = sc->sc_inactive.jj_queue; nfree > 0 && bp != NULL;
nfree--, bp = bp->bio_next) {
/*
* This is safe to free the bio_data, because:
* 1. If bio_data is NULL it will be read from the
* inactive journal.
* 2. If bp is sent down, it is first removed from the
* inactive queue, so it's impossible to free the
* data from under in-flight bio.
* On the other hand, freeing elements from the active
* queue, is not safe.
*/
if (bp->bio_data != NULL) {
GJ_DEBUG(2, "Freeing data from %s.",
sc->sc_name);
gj_free(bp->bio_data, bp->bio_length);
bp->bio_data = NULL;
}
}
mtx_unlock(&sc->sc_mtx);
if (nfree == 0)
break;
}
g_topology_unlock();
PICKUP_GIANT();
}
static void g_journal_switcher(void *arg);
static void
g_journal_init(struct g_class *mp)
{
int error;
/* Pick a conservative value if provided value sucks. */
if (g_journal_cache_divisor <= 0 ||
(vm_kmem_size / g_journal_cache_divisor == 0)) {
g_journal_cache_divisor = 5;
}
if (g_journal_cache_limit > 0) {
g_journal_cache_limit = vm_kmem_size / g_journal_cache_divisor;
g_journal_cache_low =
(g_journal_cache_limit / 100) * g_journal_cache_switch;
}
g_journal_event_shutdown = EVENTHANDLER_REGISTER(shutdown_post_sync,
g_journal_shutdown, mp, EVENTHANDLER_PRI_FIRST);
if (g_journal_event_shutdown == NULL)
GJ_DEBUG(0, "Warning! Cannot register shutdown event.");
g_journal_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
g_journal_lowmem, mp, EVENTHANDLER_PRI_FIRST);
if (g_journal_event_lowmem == NULL)
GJ_DEBUG(0, "Warning! Cannot register lowmem event.");
error = kproc_create(g_journal_switcher, mp, NULL, 0, 0,
"g_journal switcher");
KASSERT(error == 0, ("Cannot create switcher thread."));
}
static void
g_journal_fini(struct g_class *mp)
{
if (g_journal_event_shutdown != NULL) {
EVENTHANDLER_DEREGISTER(shutdown_post_sync,
g_journal_event_shutdown);
}
if (g_journal_event_lowmem != NULL)
EVENTHANDLER_DEREGISTER(vm_lowmem, g_journal_event_lowmem);
g_journal_switcher_state = GJ_SWITCHER_DIE;
wakeup(&g_journal_switcher_state);
while (g_journal_switcher_state != GJ_SWITCHER_DIED)
tsleep(&g_journal_switcher_state, PRIBIO, "jfini:wait", hz / 5);
GJ_DEBUG(1, "Switcher died.");
}
DECLARE_GEOM_CLASS(g_journal_class, g_journal);
static const struct g_journal_desc *
g_journal_find_desc(const char *fstype)
{
const struct g_journal_desc *desc;
int i;
for (desc = g_journal_filesystems[i = 0]; desc != NULL;
desc = g_journal_filesystems[++i]) {
if (strcmp(desc->jd_fstype, fstype) == 0)
break;
}
return (desc);
}
static void
g_journal_switch_wait(struct g_journal_softc *sc)
{
struct bintime bt;
mtx_assert(&sc->sc_mtx, MA_OWNED);
if (g_journal_debug >= 2) {
if (sc->sc_flush_in_progress > 0) {
GJ_DEBUG(2, "%d requests flushing.",
sc->sc_flush_in_progress);
}
if (sc->sc_copy_in_progress > 0) {
GJ_DEBUG(2, "%d requests copying.",
sc->sc_copy_in_progress);
}
if (sc->sc_flush_count > 0) {
GJ_DEBUG(2, "%d requests to flush.",
sc->sc_flush_count);
}
if (sc->sc_delayed_count > 0) {
GJ_DEBUG(2, "%d requests delayed.",
sc->sc_delayed_count);
}
}
g_journal_stats_switches++;
if (sc->sc_copy_in_progress > 0)
g_journal_stats_wait_for_copy++;
GJ_TIMER_START(1, &bt);
sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
sc->sc_flags |= GJF_DEVICE_SWITCH;
wakeup(sc);
while (sc->sc_flags & GJF_DEVICE_SWITCH) {
msleep(&sc->sc_journal_copying, &sc->sc_mtx, PRIBIO,
"gj:switch", 0);
}
GJ_TIMER_STOP(1, &bt, "Switch time of %s", sc->sc_name);
}
static void
g_journal_do_switch(struct g_class *classp)
{
struct g_journal_softc *sc;
const struct g_journal_desc *desc;
struct g_geom *gp;
struct mount *mp;
struct bintime bt;
char *mountpoint;
int error, save, vfslocked;
DROP_GIANT();
g_topology_lock();
LIST_FOREACH(gp, &classp->geom, geom) {
sc = gp->softc;
if (sc == NULL)
continue;
if (sc->sc_flags & GJF_DEVICE_DESTROY)
continue;
if ((sc->sc_type & GJ_TYPE_COMPLETE) != GJ_TYPE_COMPLETE)
continue;
mtx_lock(&sc->sc_mtx);
sc->sc_flags |= GJF_DEVICE_BEFORE_SWITCH;
mtx_unlock(&sc->sc_mtx);
}
g_topology_unlock();
PICKUP_GIANT();
mtx_lock(&mountlist_mtx);
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
if (mp->mnt_gjprovider == NULL)
continue;
if (mp->mnt_flag & MNT_RDONLY)
continue;
desc = g_journal_find_desc(mp->mnt_stat.f_fstypename);
if (desc == NULL)
continue;
if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
continue;
/* mtx_unlock(&mountlist_mtx) was done inside vfs_busy() */
DROP_GIANT();
g_topology_lock();
sc = g_journal_find_device(classp, mp->mnt_gjprovider);
g_topology_unlock();
PICKUP_GIANT();
if (sc == NULL) {
GJ_DEBUG(0, "Cannot find journal geom for %s.",
mp->mnt_gjprovider);
goto next;
} else if (JEMPTY(sc)) {
mtx_lock(&sc->sc_mtx);
sc->sc_flags &= ~GJF_DEVICE_BEFORE_SWITCH;
mtx_unlock(&sc->sc_mtx);
GJ_DEBUG(3, "No need for %s switch.", sc->sc_name);
goto next;
}
mountpoint = mp->mnt_stat.f_mntonname;
vfslocked = VFS_LOCK_GIANT(mp);
error = vn_start_write(NULL, &mp, V_WAIT);
if (error != 0) {
VFS_UNLOCK_GIANT(vfslocked);
GJ_DEBUG(0, "vn_start_write(%s) failed (error=%d).",
mountpoint, error);
goto next;
}
save = curthread_pflags_set(TDP_SYNCIO);
GJ_TIMER_START(1, &bt);
vfs_msync(mp, MNT_NOWAIT);
GJ_TIMER_STOP(1, &bt, "Msync time of %s", mountpoint);
GJ_TIMER_START(1, &bt);
error = VFS_SYNC(mp, MNT_NOWAIT);
if (error == 0)
GJ_TIMER_STOP(1, &bt, "Sync time of %s", mountpoint);
else {
GJ_DEBUG(0, "Cannot sync file system %s (error=%d).",
mountpoint, error);
}
curthread_pflags_restore(save);
vn_finished_write(mp);
if (error != 0) {
VFS_UNLOCK_GIANT(vfslocked);
goto next;
}
/*
* Send BIO_FLUSH before freezing the file system, so it can be
* faster after the freeze.
*/
GJ_TIMER_START(1, &bt);
g_journal_flush_cache(sc);
GJ_TIMER_STOP(1, &bt, "BIO_FLUSH time of %s", sc->sc_name);
GJ_TIMER_START(1, &bt);
error = vfs_write_suspend(mp);
VFS_UNLOCK_GIANT(vfslocked);
GJ_TIMER_STOP(1, &bt, "Suspend time of %s", mountpoint);
if (error != 0) {
GJ_DEBUG(0, "Cannot suspend file system %s (error=%d).",
mountpoint, error);
goto next;
}
error = desc->jd_clean(mp);
if (error != 0)
goto next;
mtx_lock(&sc->sc_mtx);
g_journal_switch_wait(sc);
mtx_unlock(&sc->sc_mtx);
vfs_write_resume(mp);
next:
mtx_lock(&mountlist_mtx);
vfs_unbusy(mp);
}
mtx_unlock(&mountlist_mtx);
sc = NULL;
for (;;) {
DROP_GIANT();
g_topology_lock();
LIST_FOREACH(gp, &g_journal_class.geom, geom) {
sc = gp->softc;
if (sc == NULL)
continue;
mtx_lock(&sc->sc_mtx);
if ((sc->sc_type & GJ_TYPE_COMPLETE) == GJ_TYPE_COMPLETE &&
!(sc->sc_flags & GJF_DEVICE_DESTROY) &&
(sc->sc_flags & GJF_DEVICE_BEFORE_SWITCH)) {
break;
}
mtx_unlock(&sc->sc_mtx);
sc = NULL;
}
g_topology_unlock();
PICKUP_GIANT();
if (sc == NULL)
break;
mtx_assert(&sc->sc_mtx, MA_OWNED);
g_journal_switch_wait(sc);
mtx_unlock(&sc->sc_mtx);
}
}
/*
* TODO: Switcher thread should be started on first geom creation and killed on
* last geom destruction.
*/
static void
g_journal_switcher(void *arg)
{
struct g_class *mp;
struct bintime bt;
int error;
mp = arg;
curthread->td_pflags |= TDP_NORUNNINGBUF;
for (;;) {
g_journal_switcher_wokenup = 0;
error = tsleep(&g_journal_switcher_state, PRIBIO, "jsw:wait",
g_journal_switch_time * hz);
if (g_journal_switcher_state == GJ_SWITCHER_DIE) {
g_journal_switcher_state = GJ_SWITCHER_DIED;
GJ_DEBUG(1, "Switcher exiting.");
wakeup(&g_journal_switcher_state);
kproc_exit(0);
}
if (error == 0 && g_journal_sync_requested == 0) {
GJ_DEBUG(1, "Out of cache, force switch (used=%u "
"limit=%u).", g_journal_cache_used,
g_journal_cache_limit);
}
GJ_TIMER_START(1, &bt);
g_journal_do_switch(mp);
GJ_TIMER_STOP(1, &bt, "Entire switch time");
if (g_journal_sync_requested > 0) {
g_journal_sync_requested = 0;
wakeup(&g_journal_sync_requested);
}
}
}
| 26.898954 | 96 | 0.656418 |
ddb4e0b3b0f8248805aa06b8212ef44631776af6 | 711 | php | PHP | web_api/app.php | cerad/users | 6f8dec1e8ce49efacc6ff1903aa2af53804f804f | [
"MIT"
] | null | null | null | web_api/app.php | cerad/users | 6f8dec1e8ce49efacc6ff1903aa2af53804f804f | [
"MIT"
] | null | null | null | web_api/app.php | cerad/users | 6f8dec1e8ce49efacc6ff1903aa2af53804f804f | [
"MIT"
] | null | null | null | <?php
error_reporting(E_ALL);
require __DIR__ . '/../vendor/autoload.php';
use Cerad\Component\HttpKernel\KernelApp;
use Cerad\Component\HttpMessage\Request;
use Cerad\Component\DependencyInjection\Container;
use Cerad\Module\UserModule\UserParameters;
use Cerad\Module\UserModule\UserServices;
use Cerad\Module\UserModule\UserRoutes;
class UserApp extends KernelApp
{
protected function registerServices(Container $container)
{
parent::registerServices($container);
new UserParameters($container);
new UserServices ($container);
new UserRoutes ($container);
}
}
$app = new UserApp();
$request = new Request($_SERVER);
$response = $app->handle($request);
$response->send();
| 21.545455 | 59 | 0.749648 |
d59fae290c476eb321269bf0d4f1884811db5e56 | 352 | lua | Lua | premake5.lua | dfnzhc/Hola | d0fd46e9966baafd03c994e49efaa3dd8057309e | [
"MIT"
] | null | null | null | premake5.lua | dfnzhc/Hola | d0fd46e9966baafd03c994e49efaa3dd8057309e | [
"MIT"
] | null | null | null | premake5.lua | dfnzhc/Hola | d0fd46e9966baafd03c994e49efaa3dd8057309e | [
"MIT"
] | null | null | null | include "Dependencies.lua"
workspace "Hola"
configurations { "Debug", "Release" }
architecture "x64"
outputDir = "%{cfg.buildcfg}/%{prj.name}"
-- group "Dependencies"
-- include "Ciao/Deps/glad"
-- include "Ciao/Deps/imgui"
-- group ""
include "src/Hola_Common"
include "src/Hola_DX12"
include "src/Sample_DX12"
include "lib/imgui" | 19.555556 | 41 | 0.681818 |
16e0fe3c4eeb82844d3c8ad045573e2d3b11d913 | 882 | ts | TypeScript | src/app/shared/condition.ts | Serverchip/serverchip-web-app | f77a14165fb85e6d72cbe0598fcd68c2c9e3c7f0 | [
"Apache-2.0"
] | null | null | null | src/app/shared/condition.ts | Serverchip/serverchip-web-app | f77a14165fb85e6d72cbe0598fcd68c2c9e3c7f0 | [
"Apache-2.0"
] | null | null | null | src/app/shared/condition.ts | Serverchip/serverchip-web-app | f77a14165fb85e6d72cbe0598fcd68c2c9e3c7f0 | [
"Apache-2.0"
] | null | null | null | export abstract class Condition {
name: string;
conditionType: string;
constructor(name: string, type: string) {
this.name = name;
this.conditionType = type;
}
}
export class DayHourCondition extends Condition {
day_hour: {
days: string,
hour: {
start: string,
end: string
};
};
constructor(name: string, conditionType: string, day_hour: { days: string; hour: { start: string, end: string } }) {
super(name, conditionType);
this.day_hour = day_hour;
}
}
export class InputPortCondition extends Condition {
input_port: {
number: number;
state: boolean;
};
constructor(name: string, conditionType: string, input_port: { number: number; state: boolean }) {
super(name, conditionType);
this.input_port = input_port;
}
}
| 23.210526 | 120 | 0.599773 |
fd62a26e70c295a9ea845774772736fa8119f444 | 3,541 | h | C | include/iris/sensor-api/iris/dist500/DoorState.h | LESA-RPI/irma_ground_truth | 94037de5c7877544738d3569f11f900ba919227e | [
"BSL-1.0"
] | 1 | 2018-06-18T18:08:38.000Z | 2018-06-18T18:08:38.000Z | include/iris/sensor-api/iris/dist500/DoorState.h | duffym4/tof_control | 418714fb29900ce7fdd4ba0bed9105b0c3d623a7 | [
"BSL-1.0"
] | null | null | null | include/iris/sensor-api/iris/dist500/DoorState.h | duffym4/tof_control | 418714fb29900ce7fdd4ba0bed9105b0c3d623a7 | [
"BSL-1.0"
] | 2 | 2018-11-21T17:15:24.000Z | 2019-09-29T02:46:30.000Z | // ***************************************************************************
// * _ _ ____ _ ____ ___ *
// * (_)_ __(_)___ / ___| ___ _ __ ___ ___ _ __ / \ | _ \_ _| *
// * | | '__| / __| \___ \ / _ \ '_ \/ __|/ _ \| '__| / _ \ | |_) | | *
// * | | | | \__ \ ___) | __/ | | \__ \ (_) | | / ___ \| __/| | *
// * |_|_| |_|___/ |____/ \___|_| |_|___/\___/|_| /_/ \_\_| |___| *
// * *
// * Copyright (c) 2010 by iris-GmbH, Berlin All rights reserved *
// * *
// ***************************************************************************
// ---------------------------------------------------------------------------
// Please refer to LICENSE.TXT for more information on copyright and licensing
// terms with respect to this library and its source codes.
// ---------------------------------------------------------------------------
#ifndef DIST500_DOORSTATE_H
#define DIST500_DOORSTATE_H
// iris includes
#include "iris/Common.h"
#include "iris/dist500/PictureBoundData.h"
namespace iris {
namespace dist500 {
/**
* Encapsulates a door state as a class.
* @see DoorController::getDoorState
*/
class SENSORAPI DoorState : public PictureBoundData {
protected:
/**
* Holds the door number
*/
unsigned short door;
/**
* Holds the function area
*/
unsigned short functionArea;
/**
* Holds the left wing's opening percentage
*/
unsigned char left;
/**
* Holds the right wing's opening percentage
*/
unsigned char right;
public:
/**
* Constructor
*
* @param pictureNumber
* @param timestamp
* @param door
* @param functionArea
* @param left
* @param right
*/
DoorState(unsigned long pictureNumber, unsigned long long timestamp, unsigned short door, unsigned short functionArea, unsigned char left, unsigned char right);
/*DoorState(const DoorState& other);*/
/**
* No-arg constructor (needed for Qt signalling)
*/
DoorState();
/**
* Returns the door number
*
* @return Door number
*/
unsigned short getDoor(void) const;
/**
* Returns the function area
*
* @return Function area
*/
unsigned short getFunctionArea(void) const;
/**
* Returns the left wing's opening percentage
*
* @return Left wing's opening percentage
*/
unsigned char getLeft(void) const;
/**
* Returns the right wing's opening percentage
*
* @return Right wing's opening percentage
*/
unsigned char getRight(void) const;
/**
* @brief setLeft
* @param state
*/
void setLeft(unsigned char state);
/**
* @brief setRight
* @param state
*/
void setRight(unsigned char state);
/**
* @brief setFunctionArea
* @param fa
* @see SensorInfo::getFunctionArea
*/
void setFunctionArea(unsigned short fa);
/**
* @brief setDoor
* @param doorNum
* @see SensorInfo::getDoorNumber
*/
void setDoor(unsigned short doorNum);
}; // class DoorState
} // namespace dist500
} // namespace iris
#endif // DIST500_DOORSTATE_H
| 25.65942 | 165 | 0.473313 |
56efa75f5030d44c588bb78e078e1b30aad06c25 | 2,993 | ts | TypeScript | src/app/manager/manager-list-item/manager-list-item.component.ts | makimenko/vect | 93ce5c59b830cde4c49b824b70ddb61fbac76ec8 | [
"MIT"
] | 10 | 2021-02-23T08:42:00.000Z | 2022-02-28T01:30:17.000Z | src/app/manager/manager-list-item/manager-list-item.component.ts | makimenko/vect | 93ce5c59b830cde4c49b824b70ddb61fbac76ec8 | [
"MIT"
] | 2 | 2021-02-26T20:50:17.000Z | 2021-08-15T17:46:44.000Z | src/app/manager/manager-list-item/manager-list-item.component.ts | makimenko/vect | 93ce5c59b830cde4c49b824b70ddb61fbac76ec8 | [
"MIT"
] | 2 | 2021-02-24T13:34:23.000Z | 2021-03-06T11:33:35.000Z | import {Component, EventEmitter, Input, OnInit, Output} from '@angular/core';
import {DiagramItem} from '../../data-access/model/diagram-item.model';
import {DiagramService} from '../../data-access/service/diagram.service';
import {MatDialog} from '@angular/material/dialog';
import {ConfirmDialogComponent} from '../../general/confirm-dialog/confirm-dialog.component';
import {Router} from '@angular/router';
import {animate, state, style, transition, trigger} from '@angular/animations';
enum State {
active = 'active',
warn = 'warn',
deleted = 'deleted',
clicked = 'clicked'
}
@Component({
selector: 'app-manager-list-item',
templateUrl: './manager-list-item.component.html',
styleUrls: ['./manager-list-item.component.scss'],
animations: [
trigger('enterLeaveTrigger', [
transition(':enter', [
style({opacity: 0}),
animate('200ms', style({opacity: 1})),
]) /*,
transition(':leave', [
animate('200ms', style({opacity: 0}))
] ) */
]),
trigger('itemState', [
state(State.warn, style({
background: 'red',
opacity: 0.2
})),
state(State.deleted, style({
background: 'red',
opacity: 0
})),
transition('* -> ' + State.deleted, [
animate('0ms', style({opacity: 1, background: 'red'})),
animate('600ms', style({opacity: 0}))
]),
transition('* -> ' + State.clicked, [
animate('70ms', style({transform: 'scale(0.9)'}))
])
])
]
})
export class ManagerListItemComponent implements OnInit {
@Input() item: DiagramItem;
@Output() loadingEvent = new EventEmitter<boolean>();
@Output() reloadRequired = new EventEmitter<void>();
itemState = State.active;
constructor(
protected diagramService: DiagramService,
protected dialog: MatDialog,
private router: Router
) {
}
ngOnInit(): void {
}
public async doDelete(event: any, id: string): Promise<void> {
// console.log('ManagerListItemComponent.doDelete', id);
this.itemState = State.warn;
// console.log('ManagerListItemComponent.doDelete event', event);
event.stopPropagation();
const confirmDialogRef = this.dialog.open(ConfirmDialogComponent, {
data: {
message: 'Delete diagram?'
}
});
confirmDialogRef.afterClosed().subscribe(async result => {
if (result) {
this.itemState = State.deleted;
this.loadingEvent.emit(true);
await this.diagramService.delete(id);
this.loadingEvent.emit(false);
// Request to refresh list of diagrams
this.reloadRequired.emit();
} else {
this.itemState = State.active;
}
}
);
}
public open(event: any, id: string): void {
// console.log('ManagerListItemComponent.open event', event);
event.stopPropagation();
this.itemState = State.clicked;
setTimeout(() => {
this.router.navigate(['/editor', id]);
}, 80);
}
}
| 27.971963 | 93 | 0.615102 |
dd540f79ba514c8330c098b284a6473469eed5ba | 2,285 | go | Go | deepfence_agent/tools/apache/scope/probe/process/walker_darwin.go | tuapuikia/ThreatMapper | 22c473e133e2a57a402f27a12d44e1787a2895cc | [
"Apache-2.0"
] | 1,281 | 2020-04-08T17:07:21.000Z | 2022-03-31T11:22:16.000Z | deepfence_agent/tools/apache/scope/probe/process/walker_darwin.go | tuapuikia/ThreatMapper | 22c473e133e2a57a402f27a12d44e1787a2895cc | [
"Apache-2.0"
] | 180 | 2020-04-06T15:40:16.000Z | 2022-03-31T02:19:34.000Z | probe/process/walker_darwin.go | Pradeepkumarbk/scope11 | 0d87f2b54fe8f291fec0d13ccda5d9db3c91c273 | [
"Apache-2.0"
] | 148 | 2020-04-08T21:38:39.000Z | 2022-03-30T18:04:50.000Z | package process
import (
"fmt"
"os/exec"
"strconv"
"strings"
)
// NewWalker returns a Darwin (lsof-based) walker.
func NewWalker(_ string, _ bool) Walker {
return &walker{}
}
type walker struct{}
const (
lsofBinary = "lsof"
lsofFields = "cn" // parseLSOF() depends on the order
netstatBinary = "netstat"
)
// These functions copied from procspy.
// IsProcInAccept returns true if the process has a at least one thread
// blocked on the accept() system call
func IsProcInAccept(procRoot, pid string) (ret bool) {
// Not implemented on darwin
return false
}
func (walker) Walk(f func(Process, Process)) error {
output, err := exec.Command(
lsofBinary,
"-i", // only Internet files
"-n", "-P", // no number resolving
"-w", // no warnings
"-F", lsofFields, // \n based output of only the fields we want.
).CombinedOutput()
if err != nil {
return err
}
processes, err := parseLSOF(string(output))
if err != nil {
return err
}
for _, process := range processes {
f(process, Process{})
}
return nil
}
func parseLSOF(output string) (map[string]Process, error) {
var (
processes = map[string]Process{} // Local addr -> Proc
process Process
)
for _, line := range strings.Split(output, "\n") {
if len(line) <= 1 {
continue
}
var (
field = line[0]
value = line[1:]
)
switch field {
case 'p':
pid, err := strconv.Atoi(value)
if err != nil {
return nil, fmt.Errorf("invalid 'p' field in lsof output: %#v", value)
}
process.PID = pid
case 'c':
process.Name = value
case 'n':
// 'n' is the last field, with '-F cn'
// format examples:
// "192.168.2.111:44013->54.229.241.196:80"
// "[2003:45:2b57:8900:1869:2947:f942:aba7]:55711->[2a00:1450:4008:c01::11]:443"
// "*:111" <- a listen
addresses := strings.SplitN(value, "->", 2)
if len(addresses) != 2 {
// That's a listen entry.
continue
}
processes[addresses[0]] = Process{
PID: process.PID,
Name: process.Name,
}
default:
return nil, fmt.Errorf("unexpected lsof field: %c in %#v", field, value)
}
}
return processes, nil
}
// GetDeltaTotalJiffies returns 0 - darwin doesn't have jiffies.
func GetDeltaTotalJiffies() (uint64, float64, error) {
return 0, 0.0, nil
}
| 21.35514 | 83 | 0.629322 |
b0c7c8dce6c70d8ff2dee0d4d341ee24189b8fc0 | 1,339 | rs | Rust | benchmarking/benches/my_benchmark.rs | RenWild/FabChess | f86e9e23663e923144d04da962bc8edbc43e5e76 | [
"BSD-3-Clause"
] | 29 | 2019-08-06T15:08:28.000Z | 2022-01-29T19:42:40.000Z | benchmarking/benches/my_benchmark.rs | RenWild/FabChess | f86e9e23663e923144d04da962bc8edbc43e5e76 | [
"BSD-3-Clause"
] | 11 | 2019-07-26T12:16:03.000Z | 2020-08-04T09:18:19.000Z | benchmarking/benches/my_benchmark.rs | RenWild/FabChess | f86e9e23663e923144d04da962bc8edbc43e5e76 | [
"BSD-3-Clause"
] | 4 | 2020-03-28T17:38:37.000Z | 2020-09-26T19:04:50.000Z | use benchmarking::*;
use core_sdk::evaluation::eval_game_state;
use core_sdk::move_generation::makemove::make_move;
use core_sdk::move_generation::movegen::{self, MoveList};
use criterion::{criterion_group, criterion_main, Criterion};
pub fn evaluation_bench(c: &mut Criterion) {
let states = load_benchmarking_positions();
c.bench_function("evaluation", |b| {
b.iter(|| {
let mut sum = 0;
for i in 0..BENCHMARKING_POSITIONS_AMOUNT {
sum += eval_game_state(&states[i]).final_eval as isize;
}
sum
})
});
}
pub fn generate_moves_bench(c: &mut Criterion) {
let states = load_benchmarking_positions();
let mut movelist = MoveList::default();
c.bench_function("movegen", |b| {
b.iter(|| {
let mut sum = 0;
for i in 0..BENCHMARKING_POSITIONS_AMOUNT {
movegen::generate_moves(&states[i], false, &mut movelist);
sum += movelist.move_list.len();
for mv in movelist.move_list.iter() {
let g = make_move(&states[i], mv.0);
sum += (g.get_hash() & 0xFF) as usize;
}
}
sum
})
});
}
criterion_group!(benches, evaluation_bench, generate_moves_bench);
criterion_main!(benches);
| 33.475 | 74 | 0.583271 |
172b9d768d27a335511c2d65fc595b960244dd90 | 252 | kt | Kotlin | src/main/kotlin/org/webscene/server/template/HtmlTemplate.kt | webscene/webscene-server | 59a047c5d098ef8bb031eb5b94a9a4fe3e39149c | [
"Apache-2.0"
] | null | null | null | src/main/kotlin/org/webscene/server/template/HtmlTemplate.kt | webscene/webscene-server | 59a047c5d098ef8bb031eb5b94a9a4fe3e39149c | [
"Apache-2.0"
] | null | null | null | src/main/kotlin/org/webscene/server/template/HtmlTemplate.kt | webscene/webscene-server | 59a047c5d098ef8bb031eb5b94a9a4fe3e39149c | [
"Apache-2.0"
] | null | null | null | package org.webscene.server.template
import org.webscene.server.html.HtmlTag
/**
* Base for an HTML template.
* @author Nick Apperley
*/
interface HtmlTemplate {
/** The root HTML element to use for the template. */
var content: HtmlTag?
} | 21 | 57 | 0.714286 |
3ee9c2fc255fdba09c8ef09b00947ddd3ea2f2f3 | 10,407 | h | C | WickedEngine/wiGraphicsDevice_DX11.h | Athomield/WickedEngine | 92d58b8e0834df480b91d31d2558897a4e2a8b0e | [
"Zlib",
"MIT"
] | 1 | 2019-10-22T09:25:16.000Z | 2019-10-22T09:25:16.000Z | WickedEngine/wiGraphicsDevice_DX11.h | vfx-fuhao/WickedEngine | c2b6f443e913918753464fc6686f8c6da2de034f | [
"Zlib",
"MIT"
] | null | null | null | WickedEngine/wiGraphicsDevice_DX11.h | vfx-fuhao/WickedEngine | c2b6f443e913918753464fc6686f8c6da2de034f | [
"Zlib",
"MIT"
] | 1 | 2019-11-07T13:54:15.000Z | 2019-11-07T13:54:15.000Z | #pragma once
#include "CommonInclude.h"
#include "wiGraphicsDevice.h"
#include "wiWindowRegistration.h"
#include "wiContainers.h"
#include <d3d11_3.h>
#include <DXGI1_3.h>
#include <atomic>
namespace wiGraphics
{
class GraphicsDevice_DX11 : public GraphicsDevice
{
private:
ID3D11Device* device = nullptr;
D3D_DRIVER_TYPE driverType;
D3D_FEATURE_LEVEL featureLevel;
IDXGISwapChain1* swapChain = nullptr;
ID3D11RenderTargetView* renderTargetView = nullptr;
ID3D11Texture2D* backBuffer = nullptr;
ID3D11DeviceContext* immediateContext = nullptr;
ID3D11DeviceContext* deviceContexts[COMMANDLIST_COUNT] = {};
ID3D11CommandList* commandLists[COMMANDLIST_COUNT] = {};
ID3DUserDefinedAnnotation* userDefinedAnnotations[COMMANDLIST_COUNT] = {};
UINT stencilRef[COMMANDLIST_COUNT];
XMFLOAT4 blendFactor[COMMANDLIST_COUNT];
ID3D11VertexShader* prev_vs[COMMANDLIST_COUNT] = {};
ID3D11PixelShader* prev_ps[COMMANDLIST_COUNT] = {};
ID3D11HullShader* prev_hs[COMMANDLIST_COUNT] = {};
ID3D11DomainShader* prev_ds[COMMANDLIST_COUNT] = {};
ID3D11GeometryShader* prev_gs[COMMANDLIST_COUNT] = {};
ID3D11ComputeShader* prev_cs[COMMANDLIST_COUNT] = {};
XMFLOAT4 prev_blendfactor[COMMANDLIST_COUNT] = {};
UINT prev_samplemask[COMMANDLIST_COUNT] = {};
ID3D11BlendState* prev_bs[COMMANDLIST_COUNT] = {};
ID3D11RasterizerState* prev_rs[COMMANDLIST_COUNT] = {};
UINT prev_stencilRef[COMMANDLIST_COUNT] = {};
ID3D11DepthStencilState* prev_dss[COMMANDLIST_COUNT] = {};
ID3D11InputLayout* prev_il[COMMANDLIST_COUNT] = {};
PRIMITIVETOPOLOGY prev_pt[COMMANDLIST_COUNT] = {};
ID3D11UnorderedAccessView* raster_uavs[COMMANDLIST_COUNT][8] = {};
uint8_t raster_uavs_slot[COMMANDLIST_COUNT] = {};
uint8_t raster_uavs_count[COMMANDLIST_COUNT] = {};
void validate_raster_uavs(CommandList cmd);
struct GPUAllocator
{
GPUBuffer buffer;
size_t byteOffset = 0;
uint64_t residentFrame = 0;
bool dirty = false;
} frame_allocators[COMMANDLIST_COUNT];
void commit_allocations(CommandList cmd);
void CreateBackBufferResources();
std::atomic<uint8_t> commandlist_count{ 0 };
wiContainers::ThreadSafeRingBuffer<CommandList, COMMANDLIST_COUNT> free_commandlists;
wiContainers::ThreadSafeRingBuffer<CommandList, COMMANDLIST_COUNT> active_commandlists;
public:
GraphicsDevice_DX11(wiWindowRegistration::window_type window, bool fullscreen = false, bool debuglayer = false);
virtual ~GraphicsDevice_DX11();
HRESULT CreateBuffer(const GPUBufferDesc *pDesc, const SubresourceData* pInitialData, GPUBuffer *pBuffer) override;
HRESULT CreateTexture1D(const TextureDesc* pDesc, const SubresourceData *pInitialData, Texture1D *pTexture1D) override;
HRESULT CreateTexture2D(const TextureDesc* pDesc, const SubresourceData *pInitialData, Texture2D *pTexture2D) override;
HRESULT CreateTexture3D(const TextureDesc* pDesc, const SubresourceData *pInitialData, Texture3D *pTexture3D) override;
HRESULT CreateInputLayout(const VertexLayoutDesc *pInputElementDescs, UINT NumElements, const ShaderByteCode* shaderCode, VertexLayout *pInputLayout) override;
HRESULT CreateVertexShader(const void *pShaderBytecode, SIZE_T BytecodeLength, VertexShader *pVertexShader) override;
HRESULT CreatePixelShader(const void *pShaderBytecode, SIZE_T BytecodeLength, PixelShader *pPixelShader) override;
HRESULT CreateGeometryShader(const void *pShaderBytecode, SIZE_T BytecodeLength, GeometryShader *pGeometryShader) override;
HRESULT CreateHullShader(const void *pShaderBytecode, SIZE_T BytecodeLength, HullShader *pHullShader) override;
HRESULT CreateDomainShader(const void *pShaderBytecode, SIZE_T BytecodeLength, DomainShader *pDomainShader) override;
HRESULT CreateComputeShader(const void *pShaderBytecode, SIZE_T BytecodeLength, ComputeShader *pComputeShader) override;
HRESULT CreateBlendState(const BlendStateDesc *pBlendStateDesc, BlendState *pBlendState) override;
HRESULT CreateDepthStencilState(const DepthStencilStateDesc *pDepthStencilStateDesc, DepthStencilState *pDepthStencilState) override;
HRESULT CreateRasterizerState(const RasterizerStateDesc *pRasterizerStateDesc, RasterizerState *pRasterizerState) override;
HRESULT CreateSamplerState(const SamplerDesc *pSamplerDesc, Sampler *pSamplerState) override;
HRESULT CreateQuery(const GPUQueryDesc *pDesc, GPUQuery *pQuery) override;
HRESULT CreatePipelineState(const PipelineStateDesc* pDesc, PipelineState* pso) override;
int CreateSubresource(Texture* texture, SUBRESOURCE_TYPE type, UINT firstSlice, UINT sliceCount, UINT firstMip, UINT mipCount) override;
void DestroyResource(GPUResource* pResource) override;
void DestroyBuffer(GPUBuffer *pBuffer) override;
void DestroyTexture1D(Texture1D *pTexture1D) override;
void DestroyTexture2D(Texture2D *pTexture2D) override;
void DestroyTexture3D(Texture3D *pTexture3D) override;
void DestroyInputLayout(VertexLayout *pInputLayout) override;
void DestroyVertexShader(VertexShader *pVertexShader) override;
void DestroyPixelShader(PixelShader *pPixelShader) override;
void DestroyGeometryShader(GeometryShader *pGeometryShader) override;
void DestroyHullShader(HullShader *pHullShader) override;
void DestroyDomainShader(DomainShader *pDomainShader) override;
void DestroyComputeShader(ComputeShader *pComputeShader) override;
void DestroyBlendState(BlendState *pBlendState) override;
void DestroyDepthStencilState(DepthStencilState *pDepthStencilState) override;
void DestroyRasterizerState(RasterizerState *pRasterizerState) override;
void DestroySamplerState(Sampler *pSamplerState) override;
void DestroyQuery(GPUQuery *pQuery) override;
void DestroyPipelineState(PipelineState* pso) override;
bool DownloadResource(const GPUResource* resourceToDownload, const GPUResource* resourceDest, void* dataDest) override;
void SetName(GPUResource* pResource, const std::string& name) override;
void PresentBegin(CommandList cmd) override;
void PresentEnd(CommandList cmd) override;
void WaitForGPU() override;
virtual CommandList BeginCommandList() override;
void SetResolution(int width, int height) override;
Texture2D GetBackBuffer() override;
///////////////Thread-sensitive////////////////////////
void BindScissorRects(UINT numRects, const Rect* rects, CommandList cmd) override;
void BindViewports(UINT NumViewports, const ViewPort *pViewports, CommandList cmd) override;
void BindRenderTargets(UINT NumViews, const Texture2D* const *ppRenderTargets, const Texture2D* depthStencilTexture, CommandList cmd, int subresource = -1) override;
void ClearRenderTarget(const Texture* pTexture, const FLOAT ColorRGBA[4], CommandList cmd, int subresource = -1) override;
void ClearDepthStencil(const Texture2D* pTexture, UINT ClearFlags, FLOAT Depth, UINT8 Stencil, CommandList cmd, int subresource = -1) override;
void BindResource(SHADERSTAGE stage, const GPUResource* resource, UINT slot, CommandList cmd, int subresource = -1) override;
void BindResources(SHADERSTAGE stage, const GPUResource *const* resources, UINT slot, UINT count, CommandList cmd) override;
void BindUAV(SHADERSTAGE stage, const GPUResource* resource, UINT slot, CommandList cmd, int subresource = -1) override;
void BindUAVs(SHADERSTAGE stage, const GPUResource *const* resources, UINT slot, UINT count, CommandList cmd) override;
void UnbindResources(UINT slot, UINT num, CommandList cmd) override;
void UnbindUAVs(UINT slot, UINT num, CommandList cmd) override;
void BindSampler(SHADERSTAGE stage, const Sampler* sampler, UINT slot, CommandList cmd) override;
void BindConstantBuffer(SHADERSTAGE stage, const GPUBuffer* buffer, UINT slot, CommandList cmd) override;
void BindVertexBuffers(const GPUBuffer *const* vertexBuffers, UINT slot, UINT count, const UINT* strides, const UINT* offsets, CommandList cmd) override;
void BindIndexBuffer(const GPUBuffer* indexBuffer, const INDEXBUFFER_FORMAT format, UINT offset, CommandList cmd) override;
void BindStencilRef(UINT value, CommandList cmd) override;
void BindBlendFactor(float r, float g, float b, float a, CommandList cmd) override;
void BindPipelineState(const PipelineState* pso, CommandList cmd) override;
void BindComputeShader(const ComputeShader* cs, CommandList cmd) override;
void Draw(UINT vertexCount, UINT startVertexLocation, CommandList cmd) override;
void DrawIndexed(UINT indexCount, UINT startIndexLocation, UINT baseVertexLocation, CommandList cmd) override;
void DrawInstanced(UINT vertexCount, UINT instanceCount, UINT startVertexLocation, UINT startInstanceLocation, CommandList cmd) override;
void DrawIndexedInstanced(UINT indexCount, UINT instanceCount, UINT startIndexLocation, UINT baseVertexLocation, UINT startInstanceLocation, CommandList cmd) override;
void DrawInstancedIndirect(const GPUBuffer* args, UINT args_offset, CommandList cmd) override;
void DrawIndexedInstancedIndirect(const GPUBuffer* args, UINT args_offset, CommandList cmd) override;
void Dispatch(UINT threadGroupCountX, UINT threadGroupCountY, UINT threadGroupCountZ, CommandList cmd) override;
void DispatchIndirect(const GPUBuffer* args, UINT args_offset, CommandList cmd) override;
void CopyTexture2D(const Texture2D* pDst, const Texture2D* pSrc, CommandList cmd) override;
void CopyTexture2D_Region(const Texture2D* pDst, UINT dstMip, UINT dstX, UINT dstY, const Texture2D* pSrc, UINT srcMip, CommandList cmd) override;
void MSAAResolve(const Texture2D* pDst, const Texture2D* pSrc, CommandList cmd) override;
void UpdateBuffer(const GPUBuffer* buffer, const void* data, CommandList cmd, int dataSize = -1) override;
void QueryBegin(const GPUQuery *query, CommandList cmd) override;
void QueryEnd(const GPUQuery *query, CommandList cmd) override;
bool QueryRead(const GPUQuery* query, GPUQueryResult* result) override;
void UAVBarrier(const GPUResource *const* uavs, UINT NumBarriers, CommandList cmd) override {};
void TransitionBarrier(const GPUResource *const* resources, UINT NumBarriers, RESOURCE_STATES stateBefore, RESOURCE_STATES stateAfter, CommandList cmd) override {};
GPUAllocation AllocateGPU(size_t dataSize, CommandList cmd) override;
void EventBegin(const std::string& name, CommandList cmd) override;
void EventEnd(CommandList cmd) override;
void SetMarker(const std::string& name, CommandList cmd) override;
};
}
| 60.505814 | 169 | 0.806476 |
43a8d6036b56ba83b16a3ad4607d0bea9fe513bf | 4,532 | swift | Swift | jHelper GUI/Utils/ManageControls.swift | kennethdean2010/jHelper-GUI | 5b71446769cc8fd97ddf0bf3f2c6125e9c3cc151 | [
"BSD-3-Clause"
] | 1 | 2022-02-21T05:13:02.000Z | 2022-02-21T05:13:02.000Z | jHelper GUI/Utils/ManageControls.swift | meshyamsundar/jHelper-GUI | 5b71446769cc8fd97ddf0bf3f2c6125e9c3cc151 | [
"BSD-3-Clause"
] | null | null | null | jHelper GUI/Utils/ManageControls.swift | meshyamsundar/jHelper-GUI | 5b71446769cc8fd97ddf0bf3f2c6125e9c3cc151 | [
"BSD-3-Clause"
] | 1 | 2019-12-11T16:33:58.000Z | 2019-12-11T16:33:58.000Z | //
// ManageControls.swift
// JAMF Helper GUI
//
// Created by Jordan Wisniewski on 9/29/15.
//
// Copyright (C) 2016, JAMF Software, LLC All rights reserved.
//
// THIS SOFTWARE IS PROVIDED BY JAMF SOFTWARE, LLC "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JAMF SOFTWARE, LLC BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
import Cocoa
class ManageControls: NSObject {
public static func button(_ button: NSButton, dependentPopUp: NSPopUpButton) {
if (dependentPopUp.indexOfSelectedItem > 0) {
button.isEnabled = true
} else {
button.isEnabled = false
}
}
public static func button(_ button: NSButton, dependentTextArray: [NSTextField]) {
var enable = true
for textField in dependentTextArray {
if (textField.stringValue.isEmpty) {
enable = false
}
}
button.isEnabled = enable
}
// Enable the popup button if the dependency has text entered
public static func popUpButton(_ popUpButton: NSPopUpButton, dependentText: NSTextField) {
if (dependentText.stringValue.characters.count > 0) {
popUpButton.isEnabled = true
} else {
popUpButton.isEnabled = false
popUpButton.selectItem(at: 0)
}
}
public static func popUpButton(_ popUpButton: NSPopUpButton, dependentPopUp: NSPopUpButton) {
if (dependentPopUp.indexOfSelectedItem > 0) {
popUpButton.isEnabled = true
} else {
popUpButton.isEnabled = false
popUpButton.selectItem(at: 0)
}
}
public static func textField(_ textField: NSTextField, dependentPopUp: NSPopUpButton) {
if (dependentPopUp.indexOfSelectedItem > 0) {
textField.isEnabled = true
} else {
textField.stringValue = ""
textField.isEnabled = false
}
}
public static func segmentedControl(_ segmentedControl: NSSegmentedControl, dependentText: NSTextField) {
if (dependentText.stringValue.characters.count > 0) {
enableSegmentedControl(control: segmentedControl, enable: true)
} else {
enableSegmentedControl(control: segmentedControl, enable: false)
}
}
// Enable the segmented control if the dependency has text entered or a selection made
public static func segmentedControl(_ segmentedControl: NSSegmentedControl, dependentPopUp: NSPopUpButton) {
if (dependentPopUp.indexOfSelectedItem > 0) {
enableSegmentedControl(control: segmentedControl, enable: true)
} else {
enableSegmentedControl(control: segmentedControl, enable: false)
}
}
public static func segmentedControl(_ segmentedControl: NSSegmentedControl, dependentPopUp: NSPopUpButton, popUpIndex: Int?, dependentText: NSTextField) {
if let index = popUpIndex {
if (dependentPopUp.indexOfSelectedItem == index && dependentText.stringValue.characters.count > 0) {
enableSegmentedControl(control: segmentedControl, enable: true)
} else {
enableSegmentedControl(control: segmentedControl, enable: false)
}
} else {
if (dependentPopUp.indexOfSelectedItem > 0 && dependentText.stringValue.characters.count > 0) {
enableSegmentedControl(control: segmentedControl, enable: true)
} else {
enableSegmentedControl(control: segmentedControl, enable: false)
}
}
}
private static func enableSegmentedControl(control: NSSegmentedControl, enable: Bool) {
for segment in 0 ..< control.segmentCount {
control.setEnabled(enable, forSegment: segment)
if (!enable) {
control.setSelected(enable, forSegment: segment)
}
}
}
}
| 40.106195 | 158 | 0.651809 |
2f68f5cced4c13c6a838fe879e7b5050f1c47b7b | 934 | php | PHP | resources/views/layout/layout.blade.php | IKetutMahaWiragawa/Project01 | 5d65ec0440e2c19a85b65fd1bae0f710f0a99d4f | [
"MIT"
] | null | null | null | resources/views/layout/layout.blade.php | IKetutMahaWiragawa/Project01 | 5d65ec0440e2c19a85b65fd1bae0f710f0a99d4f | [
"MIT"
] | null | null | null | resources/views/layout/layout.blade.php | IKetutMahaWiragawa/Project01 | 5d65ec0440e2c19a85b65fd1bae0f710f0a99d4f | [
"MIT"
] | null | null | null | <!DOCTYPE html>
<html >
<head>
<title>Latihan View 04</title>
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css">
</head>
<body>
<nav class="navbar navbar-inverse">
<div class="container-fluid">
<div class="navbar-header">
<a class="navbar-brand" href="#">WebSiteName</a>
</div>
<ul class="nav navbar-nav">
<li class="active"><a href="#">Home</a></li>
<li><a href="#">Page 1</a></li>
<li><a href="#">Page 2</a></li>
<li><a href="#">Page 3</a></li>
</ul>
</div>
</nav>
<div class="container-fluid">
<div class="row">
@yield('content')
</div>
</div>
<script type="text/javascript" src="https://stackpath.bootstrapcdn.com/bootstrap/3.4.1/js/bootstrap.min.js"></script>
</body>
</html> | 32.206897 | 117 | 0.514989 |
bcaf2407ef57a2b64424a92f3623c4a8595c387b | 1,678 | js | JavaScript | static/js/mainpageCtrl.js | pulpocoders/pulpoforms-django | 60d268faa492ba8256cc32b3108d6a27dabcd40f | [
"Apache-2.0"
] | 45 | 2015-07-30T21:52:00.000Z | 2020-03-25T16:53:34.000Z | static/js/mainpageCtrl.js | pulpocoders/pulpo-forms-django | 60d268faa492ba8256cc32b3108d6a27dabcd40f | [
"Apache-2.0"
] | 5 | 2016-10-18T12:17:54.000Z | 2017-11-09T10:39:34.000Z | static/js/mainpageCtrl.js | pulpocoders/pulpo-forms-django | 60d268faa492ba8256cc32b3108d6a27dabcd40f | [
"Apache-2.0"
] | 13 | 2015-08-01T01:57:35.000Z | 2022-03-28T21:14:02.000Z | 'use strict';
(function () {
var app = angular.module('dynamicFormsFrameworkAdmin');
/*
* This controller handles the logic to display the list of forms
*/
app.controller('MainPageCtrl', ['$scope','$http','$location',
function ($scope, $http, $location) {
var mainPage = this;
mainPage.formSlugParam = ($location.search()).form;
mainPage.versionIdParam = ($location.search()).ver;
mainPage.orders = [
{name: 'Id', value: 'id'},
{name: 'Owner', value: 'owner'},
{name: 'Title', value: 'title'},
];
mainPage.selectascdsc = function(ascdsc){
mainPage.ascdsc = ascdsc;
};
mainPage.url = function(){
var parser = $location.absUrl();
var arr = parser.split('/');
var crit = arr[arr.length - 3];
var sent = arr[arr.length - 2];
return ([crit, sent]);
};
mainPage.actualOrder = function(){
if (mainPage.url()[0] == 'owner'){
return mainPage.orders[1];
} else if (mainPage.url()[0] == 'title'){
return mainPage.orders[2];
} else {
return mainPage.orders[0];
}
};
if (mainPage.url()[1] == 'dsc'){
mainPage.selectascdsc('dsc');
mainPage.actualascdsc = 'DSC';
} else {
mainPage.selectascdsc('asc');
mainPage.actualascdsc = 'ASC';
}
mainPage.getOrderUrl = function(){
return urlBase+mainPage.myOrder.value+'/'+mainPage.ascdsc;
};
}]);
})();
| 28.931034 | 70 | 0.494041 |
c2d80ddc442868b858c4c9897a832a0b45a4bc70 | 1,293 | sql | SQL | migrations/20211113195822_updated_blockchain_project_relation/migration.sql | andresg747/baldur-app | 93406c2f5b89067e6fd2aab3f0d68f413076b8d2 | [
"0BSD"
] | null | null | null | migrations/20211113195822_updated_blockchain_project_relation/migration.sql | andresg747/baldur-app | 93406c2f5b89067e6fd2aab3f0d68f413076b8d2 | [
"0BSD"
] | null | null | null | migrations/20211113195822_updated_blockchain_project_relation/migration.sql | andresg747/baldur-app | 93406c2f5b89067e6fd2aab3f0d68f413076b8d2 | [
"0BSD"
] | null | null | null | /*
Warnings:
- You are about to drop the column `projectId` on the `Blockchain` table. All the data in the column will be lost.
- Added the required column `tokenId` to the `Blockchain` table without a default value. This is not possible if the table is not empty.
*/
-- DropForeignKey
ALTER TABLE `Blockchain` DROP FOREIGN KEY `Blockchain_projectId_fkey`;
-- AlterTable
ALTER TABLE `Blockchain` DROP COLUMN `projectId`,
ADD COLUMN `tokenId` VARCHAR(191) NOT NULL;
-- AlterTable
ALTER TABLE `Project` ADD COLUMN `bannerUrl` VARCHAR(191) NULL;
-- CreateTable
CREATE TABLE `_BlockchainToProject` (
`A` VARCHAR(191) NOT NULL,
`B` VARCHAR(191) NOT NULL,
UNIQUE INDEX `_BlockchainToProject_AB_unique`(`A`, `B`),
INDEX `_BlockchainToProject_B_index`(`B`)
) DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
-- AddForeignKey
ALTER TABLE `Blockchain` ADD CONSTRAINT `Blockchain_tokenId_fkey` FOREIGN KEY (`tokenId`) REFERENCES `Token`(`id`) ON DELETE RESTRICT ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE `_BlockchainToProject` ADD FOREIGN KEY (`A`) REFERENCES `Blockchain`(`id`) ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE `_BlockchainToProject` ADD FOREIGN KEY (`B`) REFERENCES `Project`(`id`) ON DELETE CASCADE ON UPDATE CASCADE;
| 36.942857 | 152 | 0.74942 |
490301996f235103083f9f733d639e25da1a8a52 | 1,478 | py | Python | test/test_compression.py | Peter42/iasi | fc799d542c2bb80c3f559bc2f9e833ac330a5506 | [
"MIT"
] | null | null | null | test/test_compression.py | Peter42/iasi | fc799d542c2bb80c3f559bc2f9e833ac330a5506 | [
"MIT"
] | 3 | 2019-05-02T12:49:21.000Z | 2019-06-12T09:11:00.000Z | test/test_compression.py | Peter42/iasi | fc799d542c2bb80c3f559bc2f9e833ac330a5506 | [
"MIT"
] | 1 | 2019-10-18T21:33:33.000Z | 2019-10-18T21:33:33.000Z | import datetime
import unittest
import luigi
import numpy as np
from netCDF4 import Dataset
from iasi.compression import (CompressDataset, CompressDateRange,
DecompressDataset)
class TestCompression(unittest.TestCase):
def test_dataset_compression(self):
task = CompressDataset(
file='test/resources/MOTIV-single-event.nc',
dst='/tmp/iasi',
force=True,
threshold=0.01,
log_file=False
)
assert luigi.build([task], local_scheduler=True)
with Dataset(task.output().path) as nc:
state = nc['state']
subgroups = state.groups.keys()
self.assertListEqual(
list(subgroups), ['GHG', 'HNO3', 'Tatm', 'Tskin', 'WV'])
def test_dataset_decompression(self):
task = DecompressDataset(
file='test/resources/MOTIV-single-event.nc',
dst='/tmp/iasi',
force=True,
log_file=False,
compress_upstream=True
)
success = luigi.build([task], local_scheduler=True)
self.assertTrue(success)
class TestDateInterval(unittest.TestCase):
def test_date_range(self):
# end date is not inclusive
interval = luigi.date_interval.Custom.parse('2016-06-01-2016-06-30')
task = CompressDateRange(date_interval=interval, dst='/tmp/iasi', src='test/resources')
luigi.build([task], local_scheduler=True)
| 30.791667 | 95 | 0.614344 |
4eea2154a162f9a0d00415dea97ed6890cd3846b | 5,474 | kt | Kotlin | src/main/kotlin/se/raneland/ffbe/ui/MainWindow.kt | Raniz85/ffbe-grinder | f20288bfd0ac3b148508b35f56747446c7acbaa9 | [
"MIT"
] | 1 | 2017-10-04T13:56:05.000Z | 2017-10-04T13:56:05.000Z | src/main/kotlin/se/raneland/ffbe/ui/MainWindow.kt | Raniz85/ffbe-grinder | f20288bfd0ac3b148508b35f56747446c7acbaa9 | [
"MIT"
] | 5 | 2017-05-09T13:15:22.000Z | 2017-05-09T13:39:36.000Z | src/main/kotlin/se/raneland/ffbe/ui/MainWindow.kt | Raniz85/ffbe-grinder | f20288bfd0ac3b148508b35f56747446c7acbaa9 | [
"MIT"
] | null | null | null | /*
* Copyright (c) 2017, Daniel Raniz Raneland
*/
package se.raneland.ffbe.ui
import ch.qos.logback.classic.Logger
import net.miginfocom.swing.MigLayout
import org.slf4j.LoggerFactory
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.context.ConfigurableApplicationContext
import org.springframework.stereotype.Component
import se.raneland.ffbe.service.DeviceController
import se.raneland.ffbe.service.Point
import se.raneland.ffbe.state.StateMachine
import se.vidstige.jadb.JadbDevice
import java.awt.Dimension
import java.awt.event.MouseAdapter
import java.awt.event.MouseEvent
import java.awt.event.WindowAdapter
import java.awt.event.WindowEvent
import java.util.concurrent.Executors
import javax.swing.JButton
import javax.swing.JComboBox
import javax.swing.JFrame
import javax.swing.JScrollPane
import javax.swing.JTextArea
/**
* @author Raniz
* @since 2017-01-14.
*/
@Component
class MainWindow// Set up the UI
@Autowired constructor(val deviceController: DeviceController, context: ConfigurableApplicationContext) : JFrame("Final Fantasy Brave Exvius Controller") {
var machine: StateMachine? = null
// UI components
val screenshot: ImagePanel
val deviceList: JComboBox<Device>
val logScroll: JScrollPane
val logView: JTextArea
val counterView: JTextArea
val startButton: JButton
val stopButton: JButton
var logAppender: TextViewAppender? = null
val actionExecutor = Executors.newSingleThreadExecutor()
init {
preferredSize = Dimension(900, 850)
addWindowListener(object: WindowAdapter() {
override fun windowClosing(e: WindowEvent?) {
context.close()
System.exit(0)
}
})
layout = MigLayout("fill", "[grow 0, shrink 0][fill][fill]", "[][][fill, grow 3][fill, grow 1]")
screenshot = ImagePanel()
screenshot.preferredSize = Dimension(450, 800)
screenshot.addMouseListener(object : MouseAdapter() {
override fun mouseClicked(e: MouseEvent) {
val x = e.x / screenshot.width.toDouble()
val y = e.y / screenshot.height.toDouble()
actionExecutor.submit {
deviceController.tap(Point(x, y))
}
}
})
add(screenshot, "cell 0 0 1 4")
counterView = JTextArea()
counterView.lineWrap = true
counterView.isEditable = false
add(counterView, "cell 1 3 2 1")
deviceList = JComboBox()
deviceController.devices.forEach {
deviceList.addItem(Device(it))
}
deviceList.addActionListener {
val item = deviceList.selectedItem
if (item is Device) {
actionExecutor.submit {
deviceController.currentDevice = item.device
}
}
}
add(deviceList, "cell 1 0 2 1")
startButton = JButton("Start")
stopButton = JButton("stop")
stopButton.isEnabled = false
startButton.addActionListener {
actionExecutor.submit {
var runningMachine = machine
if (runningMachine != null) {
runningMachine.stop()
}
val stateGraph = GraphSelector(this).select()
this.machine = stateGraph?.let {
startLogging()
StateMachine(deviceController, it.initialState).also {
it.addListener { state ->
counterView.text = state.counters.map { entry -> "${entry.key}: ${entry.value}" }.joinToString("\n")
}
}
}
startButton.isEnabled = this.machine == null
stopButton.isEnabled = this.machine != null
}
}
add(startButton, "cell 1 1")
stopButton.addActionListener {
actionExecutor.submit {
var runningMachine = machine
if (runningMachine != null) {
runningMachine.stop()
}
this.machine = null
startButton.isEnabled = true
stopButton.isEnabled = false
}
}
add(stopButton, "cell 2 1")
logView = JTextArea()
logView.lineWrap = true
logView.isEditable = false
logScroll = JScrollPane(logView)
logScroll.horizontalScrollBarPolicy = JScrollPane.HORIZONTAL_SCROLLBAR_NEVER
add(logScroll, "cell 1 2 2 1")
pack()
deviceController.addScreenshotListener {
screenshot.image = it
machine?.enqueue(it)
}
deviceController.collectScreenshots = true
if (deviceController.devices.size > 0) {
deviceList.selectedIndex = 0
}
}
private fun startLogging() {
val rootLogger = LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME) as Logger
val runningAppender = logAppender
if (runningAppender != null) {
runningAppender.stop()
rootLogger.detachAppender(runningAppender)
}
val appender = TextViewAppender(logView)
rootLogger.addAppender(appender)
appender.start()
logAppender = appender
}
class Device(val device: JadbDevice) {
override fun toString() = device.serial
}
}
| 32.011696 | 155 | 0.607234 |
4b841fb18595e15d819be19191d7c5af5e199372 | 149 | sql | SQL | src/test/resources/test_data/seed-approved-licences.sql | ministryofjustice/create-and-vary-a-licence-api | 95b546f6e62e6abe76ac342ea6bb3f2f76a144b0 | [
"MIT"
] | 1 | 2021-08-09T13:08:38.000Z | 2021-08-09T13:08:38.000Z | src/test/resources/test_data/seed-approved-licences.sql | ministryofjustice/create-and-vary-a-licence-api | 95b546f6e62e6abe76ac342ea6bb3f2f76a144b0 | [
"MIT"
] | 15 | 2021-07-27T12:46:38.000Z | 2022-02-24T07:30:59.000Z | src/test/resources/test_data/seed-approved-licences.sql | ministryofjustice/create-and-vary-a-licence-api | 95b546f6e62e6abe76ac342ea6bb3f2f76a144b0 | [
"MIT"
] | null | null | null | insert into licence (
id,
com_staff_id,
type_code,
status_code
) values
(1,1,'AP','APPROVED'),
(2,1,'AP','APPROVED'),
(3,1,'AP','APPROVED');
| 14.9 | 22 | 0.624161 |
26313df65297b72e49b40e9880f48d2ccb3cf734 | 1,572 | java | Java | src/ua/org/slovo/securesms/contacts/avatars/GeneratedContactPhoto.java | varkon/MilChat | c68b8046f8767ffc7aea48e3801188bfde2501e6 | [
"Apache-2.0"
] | 1 | 2018-04-15T13:30:15.000Z | 2018-04-15T13:30:15.000Z | src/ua/org/slovo/securesms/contacts/avatars/GeneratedContactPhoto.java | varkon/MilChat | c68b8046f8767ffc7aea48e3801188bfde2501e6 | [
"Apache-2.0"
] | null | null | null | src/ua/org/slovo/securesms/contacts/avatars/GeneratedContactPhoto.java | varkon/MilChat | c68b8046f8767ffc7aea48e3801188bfde2501e6 | [
"Apache-2.0"
] | null | null | null | package ua.org.slovo.securesms.contacts.avatars;
import android.content.Context;
import android.graphics.Color;
import android.graphics.drawable.Drawable;
import android.support.annotation.NonNull;
import android.support.v4.content.ContextCompat;
import com.amulyakhare.textdrawable.TextDrawable;
import ua.org.slovo.securesms.R;
public class GeneratedContactPhoto implements ContactPhoto {
private final String name;
GeneratedContactPhoto(@NonNull String name) {
this.name = name;
}
@Override
public Drawable asDrawable(Context context, int color) {
return asDrawable(context, color, false);
}
@Override
public Drawable asDrawable(Context context, int color, boolean inverted) {
int targetSize = context.getResources().getDimensionPixelSize(R.dimen.contact_photo_target_size);
return TextDrawable.builder()
.beginConfig()
.width(targetSize)
.height(targetSize)
.textColor(inverted ? color : Color.WHITE)
.endConfig()
.buildRound(getCharacter(name), inverted ? Color.WHITE : color);
}
private String getCharacter(String name) {
String cleanedName = name.replaceFirst("[^\\p{L}\\p{Nd}\\p{P}\\p{S}]+", "");
if (cleanedName.isEmpty()) {
return "#";
} else {
return String.valueOf(cleanedName.charAt(0));
}
}
@Override
public Drawable asCallCard(Context context) {
return ContextCompat.getDrawable(context, R.drawable.ic_contact_picture_large);
}
}
| 29.111111 | 101 | 0.676209 |
53ae3d723c75d0781ac84df992e37a7cc926aeb3 | 3,186 | java | Java | DataMining/src/salp/Individual.java | HadiAwad/TestCasePrioritization | b00fe953c57087c896f24c15531685e0063ea96d | [
"Apache-2.0"
] | null | null | null | DataMining/src/salp/Individual.java | HadiAwad/TestCasePrioritization | b00fe953c57087c896f24c15531685e0063ea96d | [
"Apache-2.0"
] | null | null | null | DataMining/src/salp/Individual.java | HadiAwad/TestCasePrioritization | b00fe953c57087c896f24c15531685e0063ea96d | [
"Apache-2.0"
] | null | null | null | package salp;
import Catalano.Core.IntRange;
import utils.IObjectiveFunction;
import java.util.*;
/**
* Represents individual in the population.
* @author Diego Catalano
*/
public class Individual implements Comparable<Individual>, Cloneable {
private int[] location;
private double fitness;
public static int generateRandom(int min, int max){
return new Random().nextInt(max-min+1) +min;
}
public static int[] UniformRandom(List<IntRange> ranges){
Random rand = new Random();
int[] r = new int[ranges.size()];
Set<Integer> uniqueNumbers = new HashSet<>();
for (int i = 0; i < r.length; i++) {
IntRange range = ranges.get(i);
int generatedNumber = -1;
do{
generatedNumber = generateRandom(range.getMin(),range.getMax());
}while (uniqueNumbers.contains(generatedNumber));
r[i] = generatedNumber;
uniqueNumbers.add(generatedNumber);
}
return r;
}
public static List<Individual> CreatePopulation(int populationSize, List<IntRange> boundConstraints, IObjectiveFunction function){
List<Individual> population = new ArrayList<>(populationSize);
for (int i = 0; i < populationSize; i++) {
int[] location = UniformRandom(boundConstraints);
double fitness = function.Compute(location);
population.add(new Individual(location, fitness));
}
return population;
}
/**
* Get location in the space.
* @return Location.
*/
public int[] getLocation() {
return location;
}
/**
* Get location in the space.
* @param index Index.
* @return Value.
*/
public double getLocation(int index){
return location[index];
}
/**
* Set location in the space.
* @param location Location.
*/
public void setLocation(int[] location) {
this.location = location;
}
/**
* Set location in the space.
* @param index Index.
* @param location Location.
*/
public void setLocation(int index, int location){
this.location[index] = location;
}
/**
* Get fitness.
* @return Fitness.
*/
public double getFitness() {
return fitness;
}
/**
* Set fitness.
* @param fitness Fitness.
*/
public void setFitness(double fitness) {
this.fitness = fitness;
}
/**
* Initialize a new instance of the Individual class.
* @param location Location.
*/
public Individual(int[] location){
this(location, Double.NaN);
}
/**
* Initialize a new instance of the Individual class.
* @param location Location.
* @param fitness Fitness.
*/
public Individual(int[] location, double fitness) {
this.location = location;
this.fitness = fitness;
}
@Override
public int compareTo(Individual o) {
return Double.compare(fitness, o.getFitness());
}
public Individual getClone(){
return new Individual(Arrays.copyOf(location, location.length), fitness);
}
} | 24.697674 | 134 | 0.596359 |
28ef6fb11bb68bb25bf2b834ce2aea31f0e01743 | 3,426 | sql | SQL | imgsmlr--1.0.sql | BlosmLLC/imgsmlr | c484a15cc4ad254b0cc6cd7dc4820ad6472fcfac | [
"PostgreSQL"
] | 207 | 2015-08-08T07:09:47.000Z | 2022-03-11T07:59:36.000Z | imgsmlr--1.0.sql | RekGRpth/imgsmlr | c484a15cc4ad254b0cc6cd7dc4820ad6472fcfac | [
"PostgreSQL"
] | 12 | 2017-02-23T07:38:09.000Z | 2021-12-09T02:06:00.000Z | imgsmlr--1.0.sql | RekGRpth/imgsmlr | c484a15cc4ad254b0cc6cd7dc4820ad6472fcfac | [
"PostgreSQL"
] | 40 | 2015-11-30T08:15:02.000Z | 2021-12-17T04:39:20.000Z | /* imgsmlr/imgsmlr--1.0.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION imgsmlr" to load this file. \quit
--
-- PostgreSQL code for IMGSMLR.
--
CREATE FUNCTION pattern_in(cstring)
RETURNS pattern
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION pattern_out(pattern)
RETURNS cstring
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE TYPE pattern (
INTERNALLENGTH = -1,
INPUT = pattern_in,
OUTPUT = pattern_out,
STORAGE = extended
);
CREATE FUNCTION signature_in(cstring)
RETURNS signature
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION signature_out(signature)
RETURNS cstring
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE TYPE signature (
INTERNALLENGTH = 64,
INPUT = signature_in,
OUTPUT = signature_out,
ALIGNMENT = float
);
CREATE FUNCTION jpeg2pattern(bytea)
RETURNS pattern
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION png2pattern(bytea)
RETURNS pattern
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION gif2pattern(bytea)
RETURNS pattern
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION pattern2signature(pattern)
RETURNS signature
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION pattern_distance(pattern, pattern)
RETURNS float4
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION signature_distance(signature, signature)
RETURNS float4
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE OPERATOR <-> (
LEFTARG = pattern,
RIGHTARG = pattern,
PROCEDURE = pattern_distance
);
CREATE OPERATOR <-> (
LEFTARG = signature,
RIGHTARG = signature,
PROCEDURE = signature_distance
);
CREATE FUNCTION shuffle_pattern(pattern)
RETURNS pattern
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION signature_consistent(internal,signature,int,oid,internal)
RETURNS bool
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION signature_compress(internal)
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION signature_decompress(internal)
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION signature_penalty(internal,internal,internal)
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION signature_picksplit(internal, internal)
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION signature_union(internal, internal)
RETURNS bytea
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION signature_same(bytea, bytea, internal)
RETURNS internal
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION signature_gist_distance(internal, text, int, oid)
RETURNS float8
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE STRICT;
CREATE OPERATOR CLASS gist_signature_ops
DEFAULT FOR TYPE signature USING gist AS
OPERATOR 1 <-> FOR ORDER BY pg_catalog.float_ops,
FUNCTION 1 signature_consistent (internal, signature, int, oid, internal),
FUNCTION 2 signature_union (internal, internal),
FUNCTION 3 signature_compress (internal),
FUNCTION 4 signature_decompress (internal),
FUNCTION 5 signature_penalty (internal, internal, internal),
FUNCTION 6 signature_picksplit (internal, internal),
FUNCTION 7 signature_same (bytea, bytea, internal),
FUNCTION 8 signature_gist_distance (internal, text, int, oid),
STORAGE bytea;
| 23.958042 | 75 | 0.807647 |
fd84a9e96d6b74b816ea2fc8dcd41e5011a31c8a | 8,662 | c | C | iothub_client/src/iothubtransportmqtt_websockets.c | sinipelto/azure-iot-sdk-c | 3854b643cac5943f4b2950a435369460c3371285 | [
"MIT"
] | 1 | 2019-09-25T19:46:07.000Z | 2019-09-25T19:46:07.000Z | iothub_client/src/iothubtransportmqtt_websockets.c | sinipelto/azure-iot-sdk-c | 3854b643cac5943f4b2950a435369460c3371285 | [
"MIT"
] | 2 | 2021-07-22T08:56:51.000Z | 2021-07-30T19:07:30.000Z | iothub_client/src/iothubtransportmqtt_websockets.c | sinipelto/azure-iot-sdk-c | 3854b643cac5943f4b2950a435369460c3371285 | [
"MIT"
] | null | null | null | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
#include <stdlib.h>
#include "azure_c_shared_utility/xio.h"
#include "azure_c_shared_utility/wsio.h"
#include "azure_c_shared_utility/tlsio.h"
#include "azure_c_shared_utility/platform.h"
#include "azure_c_shared_utility/http_proxy_io.h"
#include "iothubtransportmqtt_websockets.h"
#include "internal/iothubtransport_mqtt_common.h"
static XIO_HANDLE getWebSocketsIOTransport(const char* fully_qualified_name, const MQTT_TRANSPORT_PROXY_OPTIONS* mqtt_transport_proxy_options)
{
XIO_HANDLE result;
const IO_INTERFACE_DESCRIPTION* io_interface_description = wsio_get_interface_description();
TLSIO_CONFIG tls_io_config;
HTTP_PROXY_IO_CONFIG http_proxy_io_config;
if (io_interface_description == NULL)
{
LogError("Failure constructing the provider interface");
result = NULL;
}
else
{
WSIO_CONFIG ws_io_config;
ws_io_config.hostname = fully_qualified_name;
ws_io_config.port = 443;
ws_io_config.protocol = "MQTT";
ws_io_config.resource_name = "/$iothub/websocket";
ws_io_config.underlying_io_interface = platform_get_default_tlsio();
if (ws_io_config.underlying_io_interface == NULL)
{
ws_io_config.underlying_io_parameters = NULL;
}
else
{
ws_io_config.underlying_io_parameters = &tls_io_config;
tls_io_config.hostname = fully_qualified_name;
tls_io_config.port = 443;
tls_io_config.invoke_on_send_complete_callback_for_fragments = false;
if (mqtt_transport_proxy_options != NULL)
{
tls_io_config.underlying_io_interface = http_proxy_io_get_interface_description();
if (tls_io_config.underlying_io_interface == NULL)
{
tls_io_config.underlying_io_parameters = NULL;
}
else
{
tls_io_config.underlying_io_parameters = &http_proxy_io_config;
http_proxy_io_config.proxy_hostname = mqtt_transport_proxy_options->host_address;
http_proxy_io_config.proxy_port = mqtt_transport_proxy_options->port;
http_proxy_io_config.username = mqtt_transport_proxy_options->username;
http_proxy_io_config.password = mqtt_transport_proxy_options->password;
http_proxy_io_config.hostname = fully_qualified_name;
http_proxy_io_config.port = 443;
}
}
else
{
tls_io_config.underlying_io_interface = NULL;
tls_io_config.underlying_io_parameters = NULL;
}
}
result = xio_create(io_interface_description, &ws_io_config);
}
return result;
}
static TRANSPORT_LL_HANDLE IoTHubTransportMqtt_WS_Create(const IOTHUBTRANSPORT_CONFIG* config, TRANSPORT_CALLBACKS_INFO* cb_info, void* ctx)
{
return IoTHubTransport_MQTT_Common_Create(config, getWebSocketsIOTransport, cb_info, ctx);
}
static void IoTHubTransportMqtt_WS_Destroy(TRANSPORT_LL_HANDLE handle)
{
IoTHubTransport_MQTT_Common_Destroy(handle);
}
static int IoTHubTransportMqtt_WS_Subscribe(IOTHUB_DEVICE_HANDLE handle)
{
return IoTHubTransport_MQTT_Common_Subscribe(handle);
}
static void IoTHubTransportMqtt_WS_Unsubscribe(IOTHUB_DEVICE_HANDLE handle)
{
IoTHubTransport_MQTT_Common_Unsubscribe(handle);
}
static int IoTHubTransportMqtt_WS_Subscribe_DeviceMethod(IOTHUB_DEVICE_HANDLE handle)
{
return IoTHubTransport_MQTT_Common_Subscribe_DeviceMethod(handle);
}
static void IoTHubTransportMqtt_WS_Unsubscribe_DeviceMethod(IOTHUB_DEVICE_HANDLE handle)
{
IoTHubTransport_MQTT_Common_Unsubscribe_DeviceMethod(handle);
}
static int IoTHubTransportMqtt_WS_DeviceMethod_Response(IOTHUB_DEVICE_HANDLE handle, METHOD_HANDLE methodId, const unsigned char* response, size_t response_size, int status_response)
{
return IoTHubTransport_MQTT_Common_DeviceMethod_Response(handle, methodId, response, response_size, status_response);
}
static int IoTHubTransportMqtt_WS_Subscribe_DeviceTwin(IOTHUB_DEVICE_HANDLE handle)
{
return IoTHubTransport_MQTT_Common_Subscribe_DeviceTwin(handle);
}
static void IoTHubTransportMqtt_WS_Unsubscribe_DeviceTwin(IOTHUB_DEVICE_HANDLE handle)
{
IoTHubTransport_MQTT_Common_Unsubscribe_DeviceTwin(handle);
}
static IOTHUB_CLIENT_RESULT IoTHubTransportMqtt_WS_GetTwinAsync(IOTHUB_DEVICE_HANDLE handle, IOTHUB_CLIENT_DEVICE_TWIN_CALLBACK completionCallback, void* callbackContext)
{
return IoTHubTransport_MQTT_Common_GetTwinAsync(handle, completionCallback, callbackContext);
}
static IOTHUB_PROCESS_ITEM_RESULT IoTHubTransportMqtt_WS_ProcessItem(TRANSPORT_LL_HANDLE handle, IOTHUB_IDENTITY_TYPE item_type, IOTHUB_IDENTITY_INFO* iothub_item)
{
return IoTHubTransport_MQTT_Common_ProcessItem(handle, item_type, iothub_item);
}
static void IoTHubTransportMqtt_WS_DoWork(TRANSPORT_LL_HANDLE handle)
{
IoTHubTransport_MQTT_Common_DoWork(handle);
}
static IOTHUB_CLIENT_RESULT IoTHubTransportMqtt_WS_GetSendStatus(IOTHUB_DEVICE_HANDLE handle, IOTHUB_CLIENT_STATUS *iotHubClientStatus)
{
return IoTHubTransport_MQTT_Common_GetSendStatus(handle, iotHubClientStatus);
}
static IOTHUB_CLIENT_RESULT IoTHubTransportMqtt_WS_SetOption(TRANSPORT_LL_HANDLE handle, const char* option, const void* value)
{
return IoTHubTransport_MQTT_Common_SetOption(handle, option, value);
}
static IOTHUB_DEVICE_HANDLE IoTHubTransportMqtt_WS_Register(TRANSPORT_LL_HANDLE handle, const IOTHUB_DEVICE_CONFIG* device, PDLIST_ENTRY waitingToSend)
{
return IoTHubTransport_MQTT_Common_Register(handle, device, waitingToSend);
}
static void IoTHubTransportMqtt_WS_Unregister(IOTHUB_DEVICE_HANDLE deviceHandle)
{
IoTHubTransport_MQTT_Common_Unregister(deviceHandle);
}
static STRING_HANDLE IoTHubTransportMqtt_WS_GetHostname(TRANSPORT_LL_HANDLE handle)
{
return IoTHubTransport_MQTT_Common_GetHostname(handle);
}
static int IoTHubTransportMqtt_WS_SetRetryPolicy(TRANSPORT_LL_HANDLE handle, IOTHUB_CLIENT_RETRY_POLICY retryPolicy, size_t retryTimeoutLimitinSeconds)
{
return IoTHubTransport_MQTT_Common_SetRetryPolicy(handle, retryPolicy, retryTimeoutLimitinSeconds);
}
static IOTHUB_CLIENT_RESULT IoTHubTransportMqtt_WS_SendMessageDisposition(IOTHUB_DEVICE_HANDLE handle, IOTHUB_MESSAGE_HANDLE messageHandle, IOTHUBMESSAGE_DISPOSITION_RESULT disposition)
{
return IoTHubTransport_MQTT_Common_SendMessageDisposition(handle, messageHandle, disposition);
}
static int IoTHubTransportMqtt_WS_Subscribe_InputQueue(IOTHUB_DEVICE_HANDLE handle)
{
(void)handle;
LogError("IoTHubTransportMqtt_WS_Subscribe_InputQueue not implemented\n");
return MU_FAILURE;
}
static void IoTHubTransportMqtt_WS_Unsubscribe_InputQueue(IOTHUB_DEVICE_HANDLE handle)
{
LogError("IoTHubTransportMqtt_WS_Unsubscribe_InputQueue not implemented\n");
(void)handle;
}
static int IotHubTransportMqtt_WS_SetCallbackContext(TRANSPORT_LL_HANDLE handle, void* ctx)
{
return IoTHubTransport_MQTT_SetCallbackContext(handle, ctx);
}
static int IotHubTransportMqtt_WS_GetSupportedPlatformInfo(TRANSPORT_LL_HANDLE handle, PLATFORM_INFO_OPTION* info)
{
return IoTHubTransport_MQTT_GetSupportedPlatformInfo(handle, info);
}
static TRANSPORT_PROVIDER thisTransportProvider_WebSocketsOverTls = {
IoTHubTransportMqtt_WS_SendMessageDisposition,
IoTHubTransportMqtt_WS_Subscribe_DeviceMethod,
IoTHubTransportMqtt_WS_Unsubscribe_DeviceMethod,
IoTHubTransportMqtt_WS_DeviceMethod_Response,
IoTHubTransportMqtt_WS_Subscribe_DeviceTwin,
IoTHubTransportMqtt_WS_Unsubscribe_DeviceTwin,
IoTHubTransportMqtt_WS_ProcessItem,
IoTHubTransportMqtt_WS_GetHostname,
IoTHubTransportMqtt_WS_SetOption,
IoTHubTransportMqtt_WS_Create,
IoTHubTransportMqtt_WS_Destroy,
IoTHubTransportMqtt_WS_Register,
IoTHubTransportMqtt_WS_Unregister,
IoTHubTransportMqtt_WS_Subscribe,
IoTHubTransportMqtt_WS_Unsubscribe,
IoTHubTransportMqtt_WS_DoWork,
IoTHubTransportMqtt_WS_SetRetryPolicy,
IoTHubTransportMqtt_WS_GetSendStatus,
IoTHubTransportMqtt_WS_Subscribe_InputQueue,
IoTHubTransportMqtt_WS_Unsubscribe_InputQueue,
IotHubTransportMqtt_WS_SetCallbackContext,
IoTHubTransportMqtt_WS_GetTwinAsync,
IotHubTransportMqtt_WS_GetSupportedPlatformInfo
};
const TRANSPORT_PROVIDER* MQTT_WebSocket_Protocol(void)
{
return &thisTransportProvider_WebSocketsOverTls;
}
| 37.991228 | 185 | 0.796814 |
f5488435d64262c7dd9f44906615d66334d279d8 | 634 | css | CSS | data/usercss/83072.user.css | 33kk/uso-archive | 2c4962d1d507ff0eaec6dcca555efc531b37a9b4 | [
"MIT"
] | 118 | 2020-08-28T19:59:28.000Z | 2022-03-26T16:28:40.000Z | data/usercss/83072.user.css | 33kk/uso-archive | 2c4962d1d507ff0eaec6dcca555efc531b37a9b4 | [
"MIT"
] | 38 | 2020-09-02T01:08:45.000Z | 2022-01-23T02:47:24.000Z | data/usercss/83072.user.css | 33kk/uso-archive | 2c4962d1d507ff0eaec6dcca555efc531b37a9b4 | [
"MIT"
] | 21 | 2020-08-19T01:12:43.000Z | 2022-03-15T21:55:17.000Z | /* ==UserStyle==
@name Hoechboners Likes Button
@namespace USO Archive
@author baxtersaurus
@description `A likes button for the discriminating Tyler Hoechlin fan.`
@version 20130214.9.6
@license NO-REDISTRIBUTION
@preprocessor uso
==/UserStyle== */
.controls_section li .likes:before {
position: absolute !important;
height: 30px !important;
line-height: 28px !important;
width: 100px !important;
font: inherit !important;
}
a.likes div.hide_overflow{
visibility: hidden !important;
}
.controls_section li .likes:before {
content: "Hoechboners" !important;
} | 25.36 | 75 | 0.682965 |
b2df4c7100c8722718818621cd9edd2daa3814a9 | 20,300 | rs | Rust | src/join.rs | Darksonn/pasts | a4a86f6959f8323da17faecdd6fbd377cd04a168 | [
"Zlib",
"Apache-2.0"
] | 1 | 2021-06-19T00:53:04.000Z | 2021-06-19T00:53:04.000Z | src/join.rs | Darksonn/pasts | a4a86f6959f8323da17faecdd6fbd377cd04a168 | [
"Zlib",
"Apache-2.0"
] | null | null | null | src/join.rs | Darksonn/pasts | a4a86f6959f8323da17faecdd6fbd377cd04a168 | [
"Zlib",
"Apache-2.0"
] | null | null | null | // Pasts
//
// Copyright (c) 2019-2020 Jeron Aldaron Lau
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// https://apache.org/licenses/LICENSE-2.0>, or the Zlib License, <LICENSE-ZLIB
// or http://opensource.org/licenses/Zlib>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::future::Future;
use core::pin::Pin;
use core::task::{Context, Poll};
use std::mem::MaybeUninit;
/// Trait for joining a tuple of futures into a single future.
#[allow(single_use_lifetimes)]
pub trait Join<'a, Z> {
/// Poll multiple futures concurrently, and return a tuple of returned
/// values from each future.
///
/// Futures that are ready first will be executed first. This makes
/// `(a, b).join().await` faster than the alternative `(a.await, b.await)`.
///
/// ```rust
/// #![forbid(unsafe_code)]
///
/// use pasts::prelude::*;
///
/// async fn one() -> i32 {
/// 42
/// }
///
/// async fn two() -> char {
/// 'a'
/// }
///
/// async fn example() {
/// // Joined await on the two futures.
/// let ret = (one(), two()).join().await;
/// assert_eq!(ret, (42, 'a'));
/// }
///
/// pasts::ThreadInterrupt::block_on(example());
/// ```
fn join(&'a mut self) -> Z;
}
// unsafe: For pinning projections, MaybeUninit return tuple
#[allow(
unsafe_code,
missing_debug_implementations,
missing_copy_implementations
)]
mod tuple {
use super::*;
// 0-Tuple
pub struct Join0();
impl Future for Join0 {
type Output = ();
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
Poll::Ready(())
}
}
impl<'a> Join<'a, Join0> for () {
fn join(&'a mut self) -> Join0 {
Join0()
}
}
// 1-Tuple
pub struct Join1<'b, T, A: Future<Output = T>>(&'b mut (A,));
impl<T, A: Future<Output = T>> Future for Join1<'_, T, A> {
type Output = (T,);
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<(T,)> {
match unsafe { self.map_unchecked_mut(|s| &mut s.0 .0) }.poll(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(out) => Poll::Ready((out,)),
}
}
}
impl<'a, T, A: 'a + Future<Output = T>> Join<'a, Join1<'a, T, A>> for (A,) {
fn join(&'a mut self) -> Join1<'a, T, A> {
Join1(self)
}
}
// 2-Tuple
pub struct Join2<'b, T, A: Future<Output = T>, U, B: Future<Output = U>>(
&'b mut (A, B),
(bool, bool),
MaybeUninit<(T, U)>,
);
impl<T, A, U, B> Future for Join2<'_, T, A, U, B>
where
A: Future<Output = T>,
B: Future<Output = U>,
{
type Output = (T, U);
fn poll(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<(T, U)> {
let mut complete = true;
if self.1 .0 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .0) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).0 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .0 = false }
} else {
complete = false;
}
}
if self.1 .1 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .1) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).1 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .1 = false }
} else {
complete = false;
}
}
if complete {
Poll::Ready(unsafe { std::ptr::read(self.2.as_ptr()) })
} else {
Poll::Pending
}
}
}
impl<'a, T, A, U, B> Join<'a, Join2<'a, T, A, U, B>> for (A, B)
where
A: 'a + Future<Output = T>,
B: 'a + Future<Output = U>,
{
fn join(&'a mut self) -> Join2<'a, T, A, U, B> {
Join2(self, (true, true), MaybeUninit::uninit())
}
}
// 3-Tuple
pub struct Join3<
'b,
T,
A: Future<Output = T>,
U,
B: Future<Output = U>,
V,
C: Future<Output = V>,
>(
&'b mut (A, B, C),
(bool, bool, bool),
MaybeUninit<(T, U, V)>,
);
impl<T, A, U, B, V, C> Future for Join3<'_, T, A, U, B, V, C>
where
A: Future<Output = T>,
B: Future<Output = U>,
C: Future<Output = V>,
{
type Output = (T, U, V);
fn poll(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<(T, U, V)> {
let mut complete = true;
if self.1 .0 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .0) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).0 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .0 = false }
} else {
complete = false;
}
}
if self.1 .1 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .1) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).1 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .1 = false }
} else {
complete = false;
}
}
if self.1 .2 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .2) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).2 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .2 = false }
} else {
complete = false;
}
}
if complete {
Poll::Ready(unsafe { std::ptr::read(self.2.as_ptr()) })
} else {
Poll::Pending
}
}
}
impl<'a, T, A, U, B, V, C> Join<'a, Join3<'a, T, A, U, B, V, C>> for (A, B, C)
where
A: 'a + Future<Output = T>,
B: 'a + Future<Output = U>,
C: 'a + Future<Output = V>,
{
fn join(&'a mut self) -> Join3<'a, T, A, U, B, V, C> {
Join3(self, (true, true, true), MaybeUninit::uninit())
}
}
// 4-Tuple
pub struct Join4<
'b,
T,
A: Future<Output = T>,
U,
B: Future<Output = U>,
V,
C: Future<Output = V>,
W,
D: Future<Output = W>,
>(
&'b mut (A, B, C, D),
(bool, bool, bool, bool),
MaybeUninit<(T, U, V, W)>,
);
impl<T, A, U, B, V, C, W, D> Future for Join4<'_, T, A, U, B, V, C, W, D>
where
A: Future<Output = T>,
B: Future<Output = U>,
C: Future<Output = V>,
D: Future<Output = W>,
{
type Output = (T, U, V, W);
fn poll(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<(T, U, V, W)> {
let mut complete = true;
if self.1 .0 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .0) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).0 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .0 = false }
} else {
complete = false;
}
}
if self.1 .1 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .1) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).1 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .1 = false }
} else {
complete = false;
}
}
if self.1 .2 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .2) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).2 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .2 = false }
} else {
complete = false;
}
}
if self.1 .3 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .3) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).3 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .3 = false }
} else {
complete = false;
}
}
if complete {
Poll::Ready(unsafe { std::ptr::read(self.2.as_ptr()) })
} else {
Poll::Pending
}
}
}
impl<'a, T, A, U, B, V, C, W, D> Join<'a, Join4<'a, T, A, U, B, V, C, W, D>>
for (A, B, C, D)
where
A: 'a + Future<Output = T>,
B: 'a + Future<Output = U>,
C: 'a + Future<Output = V>,
D: 'a + Future<Output = W>,
{
fn join(&'a mut self) -> Join4<'a, T, A, U, B, V, C, W, D> {
Join4(self, (true, true, true, true), MaybeUninit::uninit())
}
}
// 5-Tuple
pub struct Join5<
'b,
T,
A: Future<Output = T>,
U,
B: Future<Output = U>,
V,
C: Future<Output = V>,
W,
D: Future<Output = W>,
X,
E: Future<Output = X>,
>(
&'b mut (A, B, C, D, E),
(bool, bool, bool, bool, bool),
MaybeUninit<(T, U, V, W, X)>,
);
impl<T, A, U, B, V, C, W, D, X, E> Future
for Join5<'_, T, A, U, B, V, C, W, D, X, E>
where
A: Future<Output = T>,
B: Future<Output = U>,
C: Future<Output = V>,
D: Future<Output = W>,
E: Future<Output = X>,
{
type Output = (T, U, V, W, X);
fn poll(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<(T, U, V, W, X)> {
let mut complete = true;
if self.1 .0 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .0) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).0 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .0 = false }
} else {
complete = false;
}
}
if self.1 .1 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .1) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).1 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .1 = false }
} else {
complete = false;
}
}
if self.1 .2 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .2) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).2 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .2 = false }
} else {
complete = false;
}
}
if self.1 .3 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .3) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).3 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .3 = false }
} else {
complete = false;
}
}
if self.1 .4 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .4) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).4 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .4 = false }
} else {
complete = false;
}
}
if complete {
Poll::Ready(unsafe { std::ptr::read(self.2.as_ptr()) })
} else {
Poll::Pending
}
}
}
impl<'a, T, A, U, B, V, C, W, D, X, E>
Join<'a, Join5<'a, T, A, U, B, V, C, W, D, X, E>> for (A, B, C, D, E)
where
A: 'a + Future<Output = T>,
B: 'a + Future<Output = U>,
C: 'a + Future<Output = V>,
D: 'a + Future<Output = W>,
E: 'a + Future<Output = X>,
{
fn join(&'a mut self) -> Join5<'a, T, A, U, B, V, C, W, D, X, E> {
Join5(self, (true, true, true, true, true), MaybeUninit::uninit())
}
}
// 6-Tuple
pub struct Join6<
'b,
T,
A: Future<Output = T>,
U,
B: Future<Output = U>,
V,
C: Future<Output = V>,
W,
D: Future<Output = W>,
X,
E: Future<Output = X>,
Y,
F: Future<Output = Y>,
>(
&'b mut (A, B, C, D, E, F),
(bool, bool, bool, bool, bool, bool),
MaybeUninit<(T, U, V, W, X, Y)>,
);
impl<T, A, U, B, V, C, W, D, X, E, Y, F> Future
for Join6<'_, T, A, U, B, V, C, W, D, X, E, Y, F>
where
A: Future<Output = T>,
B: Future<Output = U>,
C: Future<Output = V>,
D: Future<Output = W>,
E: Future<Output = X>,
F: Future<Output = Y>,
{
type Output = (T, U, V, W, X, Y);
fn poll(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<(T, U, V, W, X, Y)> {
let mut complete = true;
if self.1 .0 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .0) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).0 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .0 = false }
} else {
complete = false;
}
}
if self.1 .1 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .1) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).1 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .1 = false }
} else {
complete = false;
}
}
if self.1 .2 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .2) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).2 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .2 = false }
} else {
complete = false;
}
}
if self.1 .3 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .3) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).3 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .3 = false }
} else {
complete = false;
}
}
if self.1 .4 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .4) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).4 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .4 = false }
} else {
complete = false;
}
}
if self.1 .5 {
let f =
unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.0 .5) };
if let Poll::Ready(out) = f.poll(cx) {
unsafe {
(*self.as_mut().get_unchecked_mut().2.as_mut_ptr()).5 =
out
}
unsafe { self.as_mut().get_unchecked_mut().1 .5 = false }
} else {
complete = false;
}
}
if complete {
Poll::Ready(unsafe { std::ptr::read(self.2.as_ptr()) })
} else {
Poll::Pending
}
}
}
impl<'a, T, A, U, B, V, C, W, D, X, E, Y, F>
Join<'a, Join6<'a, T, A, U, B, V, C, W, D, X, E, Y, F>>
for (A, B, C, D, E, F)
where
A: 'a + Future<Output = T>,
B: 'a + Future<Output = U>,
C: 'a + Future<Output = V>,
D: 'a + Future<Output = W>,
E: 'a + Future<Output = X>,
F: 'a + Future<Output = Y>,
{
fn join(&'a mut self) -> Join6<'a, T, A, U, B, V, C, W, D, X, E, Y, F> {
Join6(
self,
(true, true, true, true, true, true),
MaybeUninit::uninit(),
)
}
}
}
#[cfg(test)]
mod test {
use crate::prelude::*;
#[test]
fn join6() {
let future = async {
(
async { 1i32 },
async { 'a' },
async { 4.0f32 },
async { "boi" },
async { [4i32, 6i32] },
async { (2i32, 'a') },
)
.join()
.await
};
assert_eq!(
crate::ThreadInterrupt::block_on(future),
(1, 'a', 4.0, "boi", [4, 6], (2, 'a'))
);
}
}
| 33.169935 | 82 | 0.383153 |
e75593e53b4c6077f8214bf85889bd132d94752b | 1,336 | js | JavaScript | server.js | devtsp/crud-clubs-API | fe5417eadfcc9f78a6e0e496b8788778f9f90222 | [
"MIT"
] | null | null | null | server.js | devtsp/crud-clubs-API | fe5417eadfcc9f78a6e0e496b8788778f9f90222 | [
"MIT"
] | null | null | null | server.js | devtsp/crud-clubs-API | fe5417eadfcc9f78a6e0e496b8788778f9f90222 | [
"MIT"
] | null | null | null | const {
createClub,
deleteClub,
getAllClubs,
getClub,
editClub,
} = require('./club_controller.js');
const fs = require('fs');
const cors = require('cors');
const express = require('express');
const PORT = 8080;
const app = express();
const multer = require('multer');
const upload = multer({ dest: 'public/uploads/img' });
app.use(cors());
app.use(express.static('public'));
app.get('/index', (req, res) => {
const clubs = getAllClubs();
const namesAndIds = [...clubs].map(club => {
return {
id: club.id,
name: club.name,
crest: club.crest,
colors: club.colors,
};
});
res.status(200).json(namesAndIds);
});
app.post('/', upload.single('crest'), (req, res) => {
const posted = createClub(req);
res.status(200).json(posted);
});
app.get('/:id', (req, res) => {
const gotten = getClub(req);
res.status(200).json(gotten);
});
app.delete('/:id', (req, res) => {
const deleted = deleteClub(req);
res.status(200).json(deleted);
});
app.post('/edit/:id', upload.single('crest'), (req, res) => {
console.log(req.method, req.url);
const edited = editClub(req);
console.log(edited);
res.json(edited);
});
app.get('*', (req, res) => {
res.status(404).json({ message: 'The resource does not exists.' });
});
app.listen(process.env.PORT || PORT);
console.log(`Listening on http://localhost:${PORT}`);
| 21.206349 | 68 | 0.631737 |
5c3879984695b3dc1d069ad6c00021e30de44eee | 1,344 | h | C | Source/Utility/utility.h | nanhasa/Blocker | e34ce35ea02468a0bf6bb65900c9209b2fe54df0 | [
"MIT"
] | null | null | null | Source/Utility/utility.h | nanhasa/Blocker | e34ce35ea02468a0bf6bb65900c9209b2fe54df0 | [
"MIT"
] | null | null | null | Source/Utility/utility.h | nanhasa/Blocker | e34ce35ea02468a0bf6bb65900c9209b2fe54df0 | [
"MIT"
] | null | null | null | #pragma once
#include <sstream>
#include <string>
namespace utility {
/**
* \brief timeSinceEpoch
* \return
*/
long timeSinceEpoch();
/**
* \brief Used to get timestamp in milliseconds, thread safe
* \return Current time in milliseconds
*/
int timestampMs();
/**
* \brief deltaTimeSec
* \param timestamp
* \return Elapsed seconds since the parameter value
*/
float deltaTimeSec(int timestamp);
/**
* \brief deltaTimeMs
* \param timestamp
* \return Elapsed milliseconds since the parameter value
*/
int deltaTimeMs(int timestamp);
// Utility function to return hex format of a number
template<typename T>
std::string toHex(T&& num)
{
std::stringstream ss;
ss << "0x" << std::hex << std::forward<T>(num);
return ss.str();
}
// Utility function to return dec format of a number
template<typename T>
std::string toDec(T&& num)
{
std::stringstream ss;
ss << std::dec << std::forward<T>(num);
return ss.str();
}
// Utility function to turn number to string
// Returns empty string if parameter is not integral or floating point
template<typename T>
std::string toStr(T num)
{
#pragma warning(suppress: 4127)
if (!std::is_integral<T>::value && !std::is_floating_point<T>::value)
return std::string();
return std::to_string(std::forward<T>(num));
}
} // namespace utility | 21.333333 | 71 | 0.677083 |
70dd4d84b5974f144f5cf81041af6de668bf6474 | 221 | h | C | Runtime/RuntimeOverview/ViewController.h | onzxgway/ZXGSeniorKit | 9cea3eb2cfb7970473bd8d359b137e53683688ad | [
"MIT"
] | null | null | null | Runtime/RuntimeOverview/ViewController.h | onzxgway/ZXGSeniorKit | 9cea3eb2cfb7970473bd8d359b137e53683688ad | [
"MIT"
] | null | null | null | Runtime/RuntimeOverview/ViewController.h | onzxgway/ZXGSeniorKit | 9cea3eb2cfb7970473bd8d359b137e53683688ad | [
"MIT"
] | null | null | null | //
// ViewController.h
// RuntimeOverview
//
// Created by 朱献国 on 2019/5/24.
// Copyright © 2019年 朱献国. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface RuntimeOverviewController : UIViewController
@end
| 13.8125 | 55 | 0.701357 |
d41f48c7508e3487c012340a8cead55b670753ea | 181 | rs | Rust | src/components/local_player.rs | olefasting/amethyst-2d-platformer | 3212712e26a8d36ac05e4a086588a88a6f8f53d7 | [
"MIT"
] | 2 | 2020-05-10T12:26:35.000Z | 2022-03-10T08:02:57.000Z | src/components/local_player.rs | olefasting/amethyst-2d-platformer | 3212712e26a8d36ac05e4a086588a88a6f8f53d7 | [
"MIT"
] | null | null | null | src/components/local_player.rs | olefasting/amethyst-2d-platformer | 3212712e26a8d36ac05e4a086588a88a6f8f53d7 | [
"MIT"
] | null | null | null | use amethyst::ecs::{Component, NullStorage};
#[derive(Debug, Default, Copy, Clone)]
pub struct LocalPlayer;
impl Component for LocalPlayer {
type Storage = NullStorage<Self>;
}
| 20.111111 | 44 | 0.740331 |
d8a7ee3434f152da916caa303ee37c4fb17a99ac | 5,043 | swift | Swift | sync-ios-app/SignatureViewController.swift | yaalisri/RHMAP | a220e44d48a00bc9ff2243bb58102161097e7f21 | [
"Apache-2.0"
] | null | null | null | sync-ios-app/SignatureViewController.swift | yaalisri/RHMAP | a220e44d48a00bc9ff2243bb58102161097e7f21 | [
"Apache-2.0"
] | null | null | null | sync-ios-app/SignatureViewController.swift | yaalisri/RHMAP | a220e44d48a00bc9ff2243bb58102161097e7f21 | [
"Apache-2.0"
] | null | null | null | //
// SignatureViewController.swift
// sync-ios-app
//
// Created by Vidhya Sri on 11/25/16.
// Copyright © 2016 FeedHenry. All rights reserved.
//
import UIKit
import SwiftSignatureView
public class SignatureViewController: UIViewController, UIAlertViewDelegate, SwiftSignatureViewDelegate {
public var item: ShoppingItem!
@IBOutlet weak var signatureImageView: UIImageView!
@IBOutlet weak var signatureView: SwiftSignatureView!
override public func viewDidLoad() {
super.viewDidLoad()
if(item.status == "closed"){
signatureImageView.hidden = false
signatureView.hidden = false
signatureView.userInteractionEnabled = false
let dataDecoded:NSData = NSData(base64EncodedString: item.digitalsign!, options: NSDataBase64DecodingOptions.IgnoreUnknownCharacters)!
signatureImageView.image = UIImage(data:dataDecoded,scale:1.0)
}else{
if(NSUserDefaults.standardUserDefaults().objectForKey(item.ticketid!) != nil ){
signatureImageView.hidden = false
signatureView.hidden = false
signatureView.userInteractionEnabled = false
let dataDecoded:NSData = NSData(base64EncodedString: NSUserDefaults.standardUserDefaults().objectForKey(item.ticketid!) as! String!, options: NSDataBase64DecodingOptions.IgnoreUnknownCharacters)!
signatureImageView.image = UIImage(data:dataDecoded,scale:1.0)
let anotherButton : UIBarButtonItem = UIBarButtonItem(title:"Clear", style:UIBarButtonItemStyle.Plain, target: self, action:#selector(SignatureViewController.ClearButtonClicked(_:)))
self.navigationItem.rightBarButtonItem = anotherButton
}else{
let anotherButton : UIBarButtonItem = UIBarButtonItem(title:"", style:UIBarButtonItemStyle.Plain, target: self, action:#selector(SignatureViewController.SaveButtonClicked(_:)))
anotherButton.image = UIImage(named: "Save.png")
self.navigationItem.rightBarButtonItem = anotherButton
signatureImageView.hidden = true
signatureView.hidden = false
signatureView.userInteractionEnabled = true
}
}
self.signatureView.delegate = self
// Do any additional setup after loading the view.
}
func SaveButtonClicked(sender:AnyObject){
let image : UIImage = signatureView!.signature!
let imageData : NSData = UIImagePNGRepresentation(image)!
print("imageData \(imageData)")
// let avatar64 = imageData.base64EncodedStringWithOptions(NSDataBase64EncodingOptions(rawValue: 0));
let avatar64:String = imageData.base64EncodedStringWithOptions(.Encoding64CharacterLineLength)
// let datastring = NSString(data:imageData, encoding:NSUTF8StringEncoding)
// if let datastring = NSString(data:imageData, encoding:NSUTF8StringEncoding){
NSUserDefaults.standardUserDefaults().setObject(avatar64, forKey: item.ticketid!)
NSUserDefaults.standardUserDefaults().synchronize()
// }
let showAlert : UIAlertView = UIAlertView.init(title: "", message: "Signature Saved", delegate: self, cancelButtonTitle:nil, otherButtonTitles:"OK")
showAlert.tag = 10
showAlert.show()
}
func ClearButtonClicked(sender:AnyObject){
let anotherButton : UIBarButtonItem = UIBarButtonItem(title:"", style:UIBarButtonItemStyle.Plain, target: self, action:#selector(SignatureViewController.SaveButtonClicked(_:)))
anotherButton.image = UIImage(named: "Save.png")
self.navigationItem.rightBarButtonItem = anotherButton
signatureImageView.hidden = true
signatureView.hidden = false
signatureView.userInteractionEnabled = true
}
override public func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
// MARK: - AlertView Delegate
public func alertView(View: UIAlertView, clickedButtonAtIndex buttonIndex: Int){
if View.tag == 10 {
self.navigationController?.popViewControllerAnimated(true)
}
}
//MARK: Delegate
public func swiftSignatureViewDidTapInside(view: SwiftSignatureView) {
// print("Did tap inside")
}
public func swiftSignatureViewDidPanInside(view: SwiftSignatureView) {
// print("Did pan inside")
}
/*
// MARK: - Navigation
// In a storyboard-based application, you will often want to do a little preparation before navigation
override func prepareForSegue(segue: UIStoryboardSegue, sender: AnyObject?) {
// Get the new view controller using segue.destinationViewController.
// Pass the selected object to the new view controller.
}
*/
}
| 40.344 | 211 | 0.673805 |
01068bb8b711c2abe138200e335c20cad0063c7f | 2,496 | lua | Lua | prods/wtf-atabimp/heightmap.lua | GitoriousLispBackup/praxis | a5c1a806c00c6589a3603a29cc9773a704821ebd | [
"MIT"
] | 103 | 2015-01-08T14:04:30.000Z | 2022-02-04T02:57:14.000Z | prods/wtf-atabimp/heightmap.lua | GitoriousLispBackup/praxis | a5c1a806c00c6589a3603a29cc9773a704821ebd | [
"MIT"
] | 6 | 2015-03-01T13:42:32.000Z | 2020-06-28T18:39:41.000Z | prods/wtf-atabimp/heightmap.lua | GitoriousLispBackup/praxis | a5c1a806c00c6589a3603a29cc9773a704821ebd | [
"MIT"
] | 10 | 2015-03-03T00:09:55.000Z | 2021-08-31T19:10:01.000Z | -- Name: heightmap.lua
--heit = {}
t = 0
t_spd = 0.02
heightmap = {}
cellsize = 10
camCellPosI = 5
camCellPosJ = 5
camAngle = 0
camAngleSpeed = 4
camOrbitCenter = { x = 5 * cellsize, y = 5 * cellsize }
camOrbitRadius = 50
camOrbitHeight = 10
-- make a guy that runs and jumps over these platforms
function heit.init()
camAngle = 0
camAngleSpeed = 4
camOrbitCenter = { x = 5 * cellsize, y = 5 * cellsize }
camOrbitRadius = 50
camOrbitHeight = 10
end
function heit.update()
t = t + t_spd
for i=1,10 do
for j=1,10 do
heightmap[i][j] = 20*math.sin(t * i * 0.1) + 20*math.sin(t * j * 0.1)
end
end
camAngle = camAngle + camAngleSpeed * math.pi / 180
camPos = { x = camOrbitCenter.x + camOrbitRadius * math.cos(camAngle), y = camOrbitCenter.y + camOrbitRadius * math.sin(camAngle) }
ahead = { x = camPos.x + -10 * math.sin(camAngle), y = camPos.y + 10 * math.cos(camAngle) }
camOrbitRadius = math.sin(camAngle * 0.25) * 15 + 20
setCamPos(camPos.x, camOrbitHeight, camPos.y)
lookAt(ahead.x, camOrbitHeight, ahead.y)
--setCamPos(getcellposition(camCellPosI,camCellPosJ))
--setCamPos(i * cellsize, heightmap[i][j]+10, j * cellsize)
-- shiftcellcam()
end
function heit.render()
beginTriGL()
renderheightmap()
endGL()
end
function createheightmap()
heightmap = {}
for i=1,10 do
heightmap[i] = {}
for j=1,10 do
heightmap[i][j] = 20
end
end
end
function rendercell(i,j)
colorGL(20 * i, 20 * j, 0, 255)
vectorGL(i * cellsize, heightmap[i][j], j * cellsize)
vectorGL((i+1) * cellsize, heightmap[i][j], j * cellsize)
vectorGL(i * cellsize, heightmap[i][j], (j+1) * cellsize)
vectorGL((i+1) * cellsize, heightmap[i][j], j * cellsize)
vectorGL((i+1) * cellsize, heightmap[i][j], (j+1) * cellsize)
vectorGL(i * cellsize, heightmap[i][j], (j+1) * cellsize)
end
function renderheightmap()
for i = 1,10 do
for j = 1,10 do
rendercell(i,j)
end
end
end
function getcellposition(i,j)
return i * cellsize + (cellsize * 0.5), heightmap[i][j]+10, j * cellsize + (cellsize * 0.5)
end
function shiftcellcam()
camCellPosI = camCellPosI + 1
if camCellPosI > 10 then
camCellPosI = 1
camCellPosJ = camCellPosJ + 1
if camCellPosJ > 10 then
camCellPosJ = 1
end
end
end
createheightmap()
| 23.327103 | 135 | 0.598157 |
e521504b2eaedb8dc65a4e8248e700b11a38bc4d | 770 | kt | Kotlin | app/src/main/java/com/vikasmaurya/corona/api/ApiClient.kt | vikas972/CoronaApp | fb49f837c53a6d3b8640b15f02b17cdba0f3e2ee | [
"Apache-2.0"
] | null | null | null | app/src/main/java/com/vikasmaurya/corona/api/ApiClient.kt | vikas972/CoronaApp | fb49f837c53a6d3b8640b15f02b17cdba0f3e2ee | [
"Apache-2.0"
] | null | null | null | app/src/main/java/com/vikasmaurya/corona/api/ApiClient.kt | vikas972/CoronaApp | fb49f837c53a6d3b8640b15f02b17cdba0f3e2ee | [
"Apache-2.0"
] | null | null | null | package com.appdid.topautocare.api
import com.google.gson.GsonBuilder
import okhttp3.OkHttpClient
import retrofit2.Retrofit
import retrofit2.converter.gson.GsonConverterFactory
object ApiClient {
private val BASE_URL = "https://corona.lmao.ninja/v2/"
private var retrofit: Retrofit? = null
var client = OkHttpClient()
var gson = GsonBuilder()
.setLenient()
.create()
val apiClient: Retrofit
get() {
if (retrofit == null) {
retrofit = Retrofit.Builder()
.baseUrl(BASE_URL)
.client(client)
.addConverterFactory(GsonConverterFactory.create(gson))
.build()
}
return retrofit!!
}
}
| 22.647059 | 75 | 0.585714 |
5deb4db9d7b01d629f7e49074993945cecd58ebf | 393 | h | C | Other/Headers/MMEmoticonEncryptUtil.h | XWJACK/WeChatPlugin-MacOS | 4241ddb10ccce9484fcf6d5bd51a6afa3b446632 | [
"MIT"
] | 2 | 2019-01-11T02:02:55.000Z | 2020-04-23T02:42:01.000Z | Other/Headers/MMEmoticonEncryptUtil.h | XWJACK/WeChatPlugin-MacOS | 4241ddb10ccce9484fcf6d5bd51a6afa3b446632 | [
"MIT"
] | null | null | null | Other/Headers/MMEmoticonEncryptUtil.h | XWJACK/WeChatPlugin-MacOS | 4241ddb10ccce9484fcf6d5bd51a6afa3b446632 | [
"MIT"
] | 1 | 2021-01-09T14:54:27.000Z | 2021-01-09T14:54:27.000Z | //
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Sep 17 2017 16:24:48).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard.
//
#import <objc/NSObject.h>
@interface MMEmoticonEncryptUtil : NSObject
{
}
+ (unsigned char)isEncryptWithHeadbyte:(int)arg1;
+ (id)emoticonDecrypt:(id)arg1;
+ (unsigned char)emoticonEncrypt:(id)arg1;
@end
| 20.684211 | 90 | 0.70229 |
418b3ffcbb7cc0d4dffd580930fee706e9c1290f | 13,031 | c | C | src/io.c | MikeLankamp/osldr | 34c2620d924a1a9cd774d6d924d5f004a3611b97 | [
"MIT"
] | 4 | 2020-06-01T06:45:49.000Z | 2022-01-06T17:35:45.000Z | src/io.c | MikeLankamp/osldr | 34c2620d924a1a9cd774d6d924d5f004a3611b97 | [
"MIT"
] | 1 | 2021-05-13T01:13:14.000Z | 2021-05-13T21:08:14.000Z | src/io.c | MikeLankamp/osldr | 34c2620d924a1a9cd774d6d924d5f004a3611b97 | [
"MIT"
] | null | null | null | #include <drive.h>
#include <errno.h>
#include <io.h>
#include <stdlib.h>
#include <string.h>
#define MAX_READ_TRY 8 /* Try to a read a sector this many times at most */
/* Registered Filesystems */
BOOL FatMount( DEVICE* Device );
BOOL RawMount( DEVICE* Device ); /* Do not add this in the list */
typedef BOOL (*FSMOUNTFUNC)(DEVICE* pdi);
/* Alter this when adding or removing a filesystem to OSLDR */
static FSMOUNTFUNC FsMountFunctions[] = {
FatMount,
NULL
};
static DEVICE* Devices;
static ULONG BootDevice;
VOID IoInitialize( ULONG device )
{
BootDevice = device;
Devices = NULL;
}
static CHAR* ParseDevice( CHAR *path, ULONG* Device )
{
CHAR* endptr;
ULONG Partition = 0xFF;
if (*path == '/')
{
/* We have no device, use boot device */
*Device = BootDevice;
return path;
}
/* Parse drive type */
if (((path[0] != 'f') && (path[0] != 'h')) || (path[1] != 'd'))
{
/* We don't have "fd" or "hd" */
errno = ENODEV;
return NULL;
}
/* Parse drive number */
*Device = strtoul( &path[2], &endptr, 10 );
if ((endptr == &path[2]) || (*Device > 0x7F))
{
/* No or invalid drive number */
if (errno == EZERO)
{
errno = ENODEV;
}
return NULL;
}
if (path[0] == 'h')
{
/* Its a hard drive, parse optional partition number */
path = endptr;
*Device |= 0x80;
if (*path++ == ',')
{
/* Partition number present */
Partition = strtoul( path, &endptr, 10 );
if ((endptr == path) || (Partition > 254))
{
/* No or invalid partition number */
if (errno == EZERO)
{
errno = ENOPART;
}
return NULL;
}
/* FIXME: When we support BSD partitions, put code here */
}
}
if ((*endptr != '/') && (*endptr != '\0'))
{
errno = ENOENT;
return NULL;
}
/* Change to Multiboot compliant representation */
*Device = (*Device << 24) | (Partition << 16) | 0xFFFF;
return endptr;
}
#define PART_UNUSED 0x00
#define PART_EXTENDED1 0x05
#define PART_EXTENDED2 0x0F
#define PART_BOOTABLE 0x80
#define IS_EXTENDED(p) (((p)->Type == PART_EXTENDED1) || ((p)->Type == PART_EXTENDED2))
#define IS_UNUSED(p) ((p)->Type == PART_UNUSED)
typedef struct _PARTITION
{
UCHAR Status;
UCHAR StartCHS[3];
UCHAR Type;
UCHAR EndCHS[3];
ULONG StartLBA;
ULONG Size;
} PACKED PARTITION;
static DEVICE* OpenDevice( ULONG Device, BOOL MountFS )
{
UCHAR Drive = (Device >> 24) & 0xFF;
UCHAR Part1 = (Device >> 16) & 0xFF;
UCHAR Part2 = (Device >> 8) & 0xFF;
UCHAR Part3 = (Device >> 0) & 0xFF;
ULONG Start;
ULONGLONG Size;
DEVICE* pdev;
INT i;
/* Check if the device was opened before */
for (pdev = Devices; pdev != NULL; pdev = pdev->Next)
{
if ((pdev->DeviceId == Device) && (pdev->hasFileSystem == MountFS))
{
return pdev;
}
}
if ((Part2 != 0xFF) || (Part3 != 0xFF))
{
/* BSD partitions and who-knows-what-else are not supported */
errno = ENOPART;
return NULL;
}
DRIVE_INFO* pdi = GetDriveParameters( Drive );
if (pdi == NULL)
{
/* Couldn't get drive parameters */
errno = EIO;
return NULL;
}
if (Part1 != 0xFF)
{
/* We have to find the wanted partition */
PARTITION* parts;
PARTITION* part = NULL;
/* Read MBR */
UCHAR* MBR = malloc( pdi->nBytesPerSector );
if (MBR == NULL)
{
errno = ENOMEM;
return NULL;
}
if (ReadDrive( Drive, 0, 1, MBR ) != 1)
{
free( MBR );
errno = EIO;
return NULL;
}
/* Find partition */
parts = (PARTITION*)(MBR + 0x1BE);
for (i = 0; i < 4; i++)
{
if (!IS_UNUSED(&parts[i]))
{
if (IS_EXTENDED(&parts[i]))
{
/* Extended partition */
if (Part1 == i)
{
/* We want to read the extended partition, we can't do that */
break;
}
if (Part1 >= 4)
{
if (part != NULL)
{
/* Two extended partition entries, table's invalid */
break;
}
part = &parts[i];
}
}
else if (Part1 == i)
{
/* This is simply the partition that we want */
part = &parts[i];
}
}
}
if ((i < 4) || (part == NULL))
{
/* Something went wrong or the partition could not be found */
errno = ENOPART;
free( MBR );
return NULL;
}
if (Part1 >= 4)
{
/* Now we have to traverse the extended partition list */
for (i = 4; part != NULL; i++)
{
if (ReadDrive( Drive, part->StartLBA, 1, MBR ) != 1)
{
free( MBR );
errno = EIO;
return NULL;
}
part = NULL;
parts = (PARTITION*)(MBR + 0x1BE);
if (IS_EXTENDED(&parts[0]))
{
/* First partition entry isn't allowed to be extended */
break;
}
if (Part1 == i)
{
/* We've found the partition we want */
part = &parts[0];
break;
}
if ((!IS_UNUSED(&parts[1])) && (IS_EXTENDED(&parts[1])))
{
/* Go to next list item */
part = &parts[1];
}
} while (part != NULL);
if (part == NULL)
{
/* We couldn't find the partition */
free( MBR );
errno = ENOPART;
return NULL;
}
}
/* Now part points to the partition entry we want */
Start = part->StartLBA;
Size = part->Size;
free( MBR );
if (Start + Size > pdi->nTotalSectors)
{
/* Invalid partition */
errno = ENOPART;
return NULL;
}
}
else
{
/* The entire drive is the partition */
Start = 0;
Size = pdi->nTotalSectors;
}
/* Create the device object */
pdev = malloc( sizeof(DEVICE) );
if (pdev == NULL)
{
errno = ENOMEM;
return NULL;
}
/* Initialize object */
pdev->DeviceId = Device;
pdev->StartSector = Start;
pdev->nSectors = Size;
pdev->hasFileSystem = MountFS;
pdev->Cache.nItems = 0;
pdev->Cache.Time = 0;
/* Mount device */
if (MountFS)
{
for (i = 0; FsMountFunctions[i] != NULL; i++)
{
if (FsMountFunctions[i]( pdev ))
{
break;
}
}
if (FsMountFunctions[i] == NULL)
{
free( pdev );
errno = ENOFSYS;
return NULL;
}
}
else
{
/* Raw device */
RawMount( pdev );
}
/* Add to linked list */
pdev->Next = Devices;
Devices = pdev;
return pdev;
}
static VOID* ReadCache( CACHE* Cache, ULONGLONG Tag )
{
ULONG i;
/* Search cache */
for (i = 0; i < Cache->nItems; i++)
{
if (Cache->Items[i].Tag == Tag)
{
/* Update access time */
Cache->Items[i].Time = Cache->Time++;
/* Return data */
return Cache->Items[i].Data;
}
}
/* Entry not cached */
return NULL;
}
static VOID WriteCache( CACHE* Cache, ULONGLONG Tag, VOID* Data )
{
ULONG i = 0;
if (Cache->nItems == CACHE_SIZE)
{
/* Cache is full, replace least recently used */
ULONG j;
/* Find and free Least Recently Used */
for (j = 1; j < CACHE_SIZE; j++)
{
if (Cache->Items[j].Time < Cache->Items[i].Time)
{
i = j;
}
}
free( Cache->Items[i].Data );
}
else
{
i = Cache->nItems++;
}
Cache->Items[i].Tag = Tag;
Cache->Items[i].Time = Cache->Time++;
Cache->Items[i].Data = Data;
}
ULONGLONG ReadSector( DEVICE* Device, ULONGLONG Sector, ULONGLONG nSectors, VOID* Buffer )
{
ULONGLONG Read = 0;
UCHAR Drive = Device->DeviceId >> 24;
DRIVE_INFO* pdi = GetDriveParameters( Drive );
if (pdi == NULL)
{
errno = EIO;
return 0;
}
if (Sector + nSectors > Device->nSectors)
{
/* Invalid sector and/or count */
errno = EIO;
return 0;
}
while (nSectors > 0)
{
VOID* tmpbuf;
ULONGLONG count = nSectors;
/* See if the current sector is cached */
tmpbuf = ReadCache( &Device->Cache, Sector );
if (tmpbuf != NULL)
{
/* Yes, copy contents */
memcpy( Buffer, tmpbuf, pdi->nBytesPerSector );
count = 1;
}
else
{
/* No, read sectors from disk and cache */
INT i;
ULONGLONG end = Sector + 1;
/* See how many consecutive sectors aren't cached */
for (count = 1; end < Sector + nSectors; end++, count++)
{
if (ReadCache( &Device->Cache, end ) != NULL)
{
break;
}
}
/* Allocate as much sectors as possible */
do
{
tmpbuf = malloc( count * pdi->nBytesPerSector );
if (tmpbuf != NULL)
{
break;
}
/* We can't allocate this much, try half as much */
count /= 2;
} while (count > 1);
if (tmpbuf == NULL)
{
/* Not enough memory for even one sector */
errno = ENOMEM;
break;
}
for (i = 0; i < MAX_READ_TRY; i++)
{
if (ReadDrive( Drive, Device->StartSector + Sector, count, tmpbuf ) == count)
{
break;
}
ResetDrive( Drive );
}
if (i == MAX_READ_TRY)
{
/* The read failed */
errno = EIO;
free( tmpbuf );
break;
}
/* Write to cache */
for (i = 0; i < count; i++)
{
VOID* secbuf = malloc( pdi->nBytesPerSector );
if (secbuf != NULL)
{
/* Only write to cache if we can allocate memory */
memcpy( secbuf, (CHAR*)tmpbuf + i * pdi->nBytesPerSector, pdi->nBytesPerSector );
WriteCache( &Device->Cache, Sector + i, secbuf );
}
}
/* Copy read data to user buffer */
memcpy( Buffer, tmpbuf, count * pdi->nBytesPerSector );
free( tmpbuf );
}
/* Adjust values */
nSectors -= count;
Sector += count;
Read += count;
Buffer = (CHAR*)Buffer + (count * pdi->nBytesPerSector);
}
return Read;
}
BOOL IsDevice( FILE* file )
{
return file->IsDevice;
}
FILE* OpenFile( CHAR* Path )
{
ULONG DeviceId;
DEVICE* pdev;
FILE* File;
Path = ParseDevice( Path, &DeviceId );
if (Path == NULL)
{
return NULL;
}
pdev = OpenDevice( DeviceId, (*Path != '\0') );
if (pdev == NULL)
{
return NULL;
}
File = pdev->OpenFile( pdev, Path );
if (File != NULL)
{
File->Device = pdev;
File->IsDevice = (*Path == '\0');
}
return File;
}
ULONGLONG ReadFile( FILE* File, VOID* Buffer, ULONGLONG nBytes )
{
return File->Device->ReadFile( File, Buffer, nBytes );
}
ULONGLONG GetFileSize( FILE* File )
{
return File->Device->GetFileSize( File );
}
BOOL SetFilePointer( FILE* File, LONGLONG Offset, INT From)
{
return File->Device->SetFilePointer( File, Offset, From );
}
ULONGLONG GetFilePointer( FILE* File )
{
return File->Device->GetFilePointer( File );
}
VOID CloseFile( FILE* File )
{
File->Device->CloseFile( File );
free( File );
}
| 23.954044 | 101 | 0.44632 |
0f7c52ff5a25811625fdc8e1860f6280f964fb52 | 1,100 | kt | Kotlin | LottieSample/src/main/kotlin/com/airbnb/lottie/samples/ViewPagerDemoActivity.kt | zicen/lottie-android | 7a44df1de7652e6007a0cad5457d5be8dc6950be | [
"Apache-2.0"
] | null | null | null | LottieSample/src/main/kotlin/com/airbnb/lottie/samples/ViewPagerDemoActivity.kt | zicen/lottie-android | 7a44df1de7652e6007a0cad5457d5be8dc6950be | [
"Apache-2.0"
] | null | null | null | LottieSample/src/main/kotlin/com/airbnb/lottie/samples/ViewPagerDemoActivity.kt | zicen/lottie-android | 7a44df1de7652e6007a0cad5457d5be8dc6950be | [
"Apache-2.0"
] | null | null | null | package com.airbnb.lottie.samples
import android.content.Context
import android.content.Intent
import android.graphics.Color
import android.os.Bundle
import android.util.Log
import android.view.View
import android.view.ViewGroup
import android.widget.LinearLayout
import androidx.appcompat.app.AppCompatActivity
import com.opensource.svgaplayer.SVGAImageView
import com.opensource.svgaplayer.SVGAParser
import com.opensource.svgaplayer.SVGAParser.ParseCompletion
import com.opensource.svgaplayer.SVGAVideoEntity
import kotlinx.android.synthetic.main.activity_custom_view_pager.*
import kotlinx.android.synthetic.main.activity_svga.*
class ViewPagerDemoActivity : AppCompatActivity() {
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_custom_view_pager)
// viewpager.adapter
}
companion object {
const val TAG = "SVGADemoActivity"
fun intent(context: Context, args: String): Intent {
return Intent(context, ViewPagerDemoActivity::class.java)
}
}
}
| 30.555556 | 69 | 0.785455 |
16d19fe97bf65f58b14ea80187cae528ac32a9d9 | 97 | ts | TypeScript | projects/dvl-fw-core/src/lib/util/index.ts | cns-iu/make-a-vis | c79cff26c908a667f8c137291bbce488780bd8dd | [
"MIT"
] | 6 | 2019-02-12T23:37:36.000Z | 2021-07-24T03:22:49.000Z | projects/dvl-fw-core/src/lib/util/index.ts | cns-iu/make-a-vis | c79cff26c908a667f8c137291bbce488780bd8dd | [
"MIT"
] | 242 | 2018-07-27T16:44:03.000Z | 2022-02-02T18:11:56.000Z | projects/dvl-fw-core/src/lib/util/index.ts | cns-iu/make-a-vis | c79cff26c908a667f8c137291bbce488780bd8dd | [
"MIT"
] | null | null | null | export * from './graphic-symbol-data';
export { ProjectSerializer } from './project-serializer';
| 32.333333 | 57 | 0.731959 |
39f869bb7a2cdb153b0201322348cc713d7adb4f | 878 | java | Java | HelloWorldSpringGRPC/src/main/java/com/hashimati/grpc/hello/HelloServiceChannel.java | hashimati/GRPC-Spring | 8258f16812829541bcefd996d2f9c1a56cd34cdc | [
"Unlicense"
] | null | null | null | HelloWorldSpringGRPC/src/main/java/com/hashimati/grpc/hello/HelloServiceChannel.java | hashimati/GRPC-Spring | 8258f16812829541bcefd996d2f9c1a56cd34cdc | [
"Unlicense"
] | null | null | null | HelloWorldSpringGRPC/src/main/java/com/hashimati/grpc/hello/HelloServiceChannel.java | hashimati/GRPC-Spring | 8258f16812829541bcefd996d2f9c1a56cd34cdc | [
"Unlicense"
] | null | null | null | package com.hashimati.grpc.hello;
import com.hashimati.io.HelloRequest;
import com.hashimati.io.HelloResponse;
import com.hashimati.io.HelloServiceGrpc;
import io.grpc.ManagedChannel;
import io.grpc.ManagedChannelBuilder;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Component;
import org.springframework.stereotype.Service;
@Component
public class HelloServiceChannel {
public HelloResponse sayHello(HelloRequest message){
ManagedChannel channel = ManagedChannelBuilder
.forAddress("localhost", 8080)
.usePlaintext()
.build();
HelloServiceGrpc.HelloServiceBlockingStub stub
= HelloServiceGrpc.newBlockingStub(channel);
HelloResponse response = stub.sayHello(message);
return response;
}
}
| 30.275862 | 61 | 0.710706 |
ddf557eaf74ce52748f0c54f05c2765d90852f83 | 667 | h | C | plugins/lua/src/lua_lib.h | IllidanS4/YALP-MTA | f7f898c2fac01548bfdbec80f14cdd156b6e9965 | [
"MIT"
] | 1 | 2020-11-08T02:00:26.000Z | 2020-11-08T02:00:26.000Z | plugins/lua/src/lua_lib.h | IllidanS4/YALP-MTA | f7f898c2fac01548bfdbec80f14cdd156b6e9965 | [
"MIT"
] | null | null | null | plugins/lua/src/lua_lib.h | IllidanS4/YALP-MTA | f7f898c2fac01548bfdbec80f14cdd156b6e9965 | [
"MIT"
] | null | null | null | #ifndef LUA_LIB_H_INCLUDED
#define LUA_LIB_H_INCLUDED
#include "lua_api.h"
#include <type_traits>
#if defined(_WIN32) || defined(__CYGWIN__)
#ifdef __GNUC__
#define LUA_EXPORT extern "C" __attribute__ ((dllexport))
#else
#define LUA_EXPORT extern "C" __declspec(dllexport)
#endif
#else
#if __GNUC__ >= 4
#define LUA_EXPORT extern "C" __attribute__ ((visibility ("default")))
#else
#define LUA_EXPORT extern "C"
#endif
#endif
LUA_EXPORT typename std::remove_pointer<lua_CFunction>::type luaopen_YALP;
LUA_EXPORT typename std::remove_pointer<lua_CFunction>::type luaopen_MTA;
LUA_EXPORT typename std::remove_pointer<lua_CFunction>::type luaopen_YALP_MTA;
#endif
| 24.703704 | 78 | 0.790105 |
290ae5aa7a3251d1428add1b795c463d0cf72c1c | 336 | kt | Kotlin | domain/src/main/java/com.ihorvitruk.telegramclient.domain/interactor/NetworkInteractor.kt | ihorvitruk/Telegram-Client | 0e80cc5706ef6921b2594d5de25f9ea8a04f406b | [
"Apache-2.0"
] | 26 | 2018-01-31T20:33:35.000Z | 2022-03-02T02:37:33.000Z | domain/src/main/java/com.ihorvitruk.telegramclient.domain/interactor/NetworkInteractor.kt | ihorvitruk/Telegram-Client | 0e80cc5706ef6921b2594d5de25f9ea8a04f406b | [
"Apache-2.0"
] | null | null | null | domain/src/main/java/com.ihorvitruk.telegramclient.domain/interactor/NetworkInteractor.kt | ihorvitruk/Telegram-Client | 0e80cc5706ef6921b2594d5de25f9ea8a04f406b | [
"Apache-2.0"
] | 12 | 2018-01-05T18:36:15.000Z | 2021-04-19T14:53:08.000Z | package com.ihorvitruk.telegramclient.domain.interactor
import com.ihorvitruk.telegramclient.domain.repository.INetworkRepository
import javax.inject.Inject
class NetworkInteractor @Inject constructor(private val networkRepository: INetworkRepository) {
fun checkNetworkConnection() = networkRepository.checkNetworkConnection()
} | 37.333333 | 96 | 0.857143 |
187b0c8f99116d43f82b206b996b6f879bc9588f | 193 | sql | SQL | Store.Infra/Scripts/Procedures/spReturnCustomer.sql | douglasrg1/Store | 9e0d09fd629882659082bd8b23becb0e76d935c3 | [
"MIT"
] | null | null | null | Store.Infra/Scripts/Procedures/spReturnCustomer.sql | douglasrg1/Store | 9e0d09fd629882659082bd8b23becb0e76d935c3 | [
"MIT"
] | null | null | null | Store.Infra/Scripts/Procedures/spReturnCustomer.sql | douglasrg1/Store | 9e0d09fd629882659082bd8b23becb0e76d935c3 | [
"MIT"
] | null | null | null | create procedure spReturnCustomer
@Id UNIQUEIDENTIFIER
as
select
[Id],
concat([FirstName],' ',[LastName]) AS [Name],
[Document],
[Email]
from
[Customer]
where
@Id = [Id] | 16.083333 | 49 | 0.632124 |
71eeffb8dfff0bcdcd74b62536f5647bf4e8691c | 9,390 | swift | Swift | Teste1/TripCaptureModules/DetectActivityModule.swift | motiv-woorti-app/woorti-ios | 10ccd7a09f0b0272e8fad7031d87db38ca96c3cb | [
"Apache-2.0"
] | null | null | null | Teste1/TripCaptureModules/DetectActivityModule.swift | motiv-woorti-app/woorti-ios | 10ccd7a09f0b0272e8fad7031d87db38ca96c3cb | [
"Apache-2.0"
] | null | null | null | Teste1/TripCaptureModules/DetectActivityModule.swift | motiv-woorti-app/woorti-ios | 10ccd7a09f0b0272e8fad7031d87db38ca96c3cb | [
"Apache-2.0"
] | null | null | null | // (C) 2017-2020 - The Woorti app is a research (non-commercial) application that was
// developed in the context of the European research project MoTiV (motivproject.eu). The
// code was developed by partner INESC-ID with contributions in graphics design by partner
// TIS. The Woorti app development was one of the outcomes of a Work Package of the MoTiV
// project.
// The Woorti app was originally intended as a tool to support data collection regarding
// mobility patterns from city and country-wide campaigns and provide the data and user
// management to campaign managers.
// The Woorti app development followed an agile approach taking into account ongoing
// feedback of partners and testing users while continuing under development. This has
// been carried out as an iterative process deploying new app versions. Along the
// timeline, various previously unforeseen requirements were identified, some requirements
// Were revised, there were requests for modifications, extensions, or new aspects in
// functionality or interaction as found useful or interesting to campaign managers and
// other project partners. Most stemmed naturally from the very usage and ongoing testing
// of the Woorti app. Hence, code and data structures were successively revised in a
// way not only to accommodate this but, also importantly, to maintain compatibility with
// the functionality, data and data structures of previous versions of the app, as new
// version roll-out was never done from scratch.
// The code developed for the Woorti app is made available as open source, namely to
// contribute to further research in the area of the MoTiV project, and the app also makes
// use of open source components as detailed in the Woorti app license.
// This project has received funding from the European Union’s Horizon 2020 research and
// innovation programme under grant agreement No. 770145.
// This file is part of the Woorti app referred to as SOFTWARE.
import Foundation
import CoreMotion
class DetectActivityModule {
//MARK:properties
static public var viewControllerSnapped: ViewController? //used to notify inferface of changes
static private var actMan: CMMotionActivityManager?
static private var lastDateSearched = Date()
static public let ActivityTimeInterval = 10*60 //seconds
public static var MockActivities = false // true for testing purposes only
public static var numOccurences = 70
public static var numOccurencesMax = 10000
public static var actNumOccurences = 0
public static var numOccurrencesSurveys = 2000
//MARK:functions
//used to start the activity detection service
static func startAD() {
if !MockActivities {
if actMan == nil {
DetectActivityModule.startContinuousAD()
}
}
}
//Start activity detection with real time detection
private static func startContinuousAD(){
//Activity request
if !MockActivities {
if CMMotionActivityManager.isActivityAvailable(){
actMan = CMMotionActivityManager()
let backgroungOQ: OperationQueue = OperationQueue()
backgroungOQ.qualityOfService = .background
let motionHandler: CMMotionActivityHandler = myHandler(activity:)
actMan!.startActivityUpdates(to: backgroungOQ, withHandler: motionHandler)
}
}
}
//Start activity detection on a timer and retrieve all activities from ActivityTimeInterval seconds ago variable
private static func startADonTimer(){
//Activity requests on timer
if !MockActivities {
if CMMotionActivityManager.isActivityAvailable(){
if (DetectActivityModule.actMan==nil) {
DetectActivityModule.actMan = CMMotionActivityManager()
}
//Query handler operation queue
let backgroungOQ: OperationQueue = OperationQueue()
backgroungOQ.qualityOfService = .utility
let now=Date()
let dateComponents: DateComponents = {
var dateComponents = DateComponents()
dateComponents.setValue(-1, for: .hour)
return dateComponents
}()
guard let startDate = NSCalendar.current.date(byAdding: dateComponents, to: now) else { return }
DetectActivityModule.actMan!.queryActivityStarting(from: startDate, to: now, to: backgroungOQ){ activities, error in
DetectActivityModule.handleMultipleActivities(activities: activities, error: error)
}
DetectActivityModule.lastDateSearched=now
Timer.scheduledTimer(timeInterval: TimeInterval(DetectActivityModule.ActivityTimeInterval), target: DetectActivityModule(), selector: #selector(ActivityTimer), userInfo: nil, repeats: true)
}
}
}
//used to stop activity detection
static func stopAD(){
actMan?.stopActivityUpdates()
actMan=nil
}
@objc func ActivityTimer(){
if (DetectActivityModule.actMan==nil) {
DetectActivityModule.actMan = CMMotionActivityManager()
}
let backgroungOQ: OperationQueue = OperationQueue()
backgroungOQ.qualityOfService = .utility
let now=Date()
DetectActivityModule.actMan!.queryActivityStarting(from: DetectActivityModule.lastDateSearched, to: now, to: backgroungOQ){ activities, error in
DetectActivityModule.handleMultipleActivities(activities: activities, error: error)
}
DetectActivityModule.lastDateSearched=now
}
static private func handleMultipleActivities(activities:[CMMotionActivity]?, error:Error?) ->Void{
if (activities != nil) {
for activity in activities! {
myHandler(activity: activity)
}
}
}
//handler for the detected ativity
static private func myHandler(activity:CMMotionActivity?) -> Void {
actNumOccurences += 1
actNumOccurences %= numOccurencesMax
if actNumOccurences % numOccurrencesSurveys == 0 {
print("NotifyingDatedSurveys")
//get the list of survey if changed
MotivRequestManager.getInstance().UpdateMySurveys()
NotificationEngine.getInstance().notifyDatedSurveys()
MotivUser.getInstance()?.trySendNextNotificationFromElsewhere()
NotificationEngine.getInstance().notifyOnceTripSurveys() // Temp func to fire all once surveys if should already have fired
}
if actNumOccurences % numOccurences == 0 {
print("Activating GPS due to periodic num occurences")
PowerManagementModule.TurnGpsOn()
}
print("actNumOccurences: \(actNumOccurences)")
if(activity==nil){
return
} else if validateActivity(activity: activity!) {
//send new activity to userInfo
UserInfo.newActivity(activity: activity!)
PowerManagementModule.processnewActivity(activity: activity!)
// print("Got Activity: " + printActivity(activity: activity!))
}
}
static private func validateActivity(activity: CMMotionActivity) -> Bool {
return activity.stationary || activity.walking || activity.running || activity.automotive || activity.cycling || activity.unknown
}
static private func printActivity(activity: CMMotionActivity) -> String {
var responseText = ""
if activity.walking {
responseText.append("walking ")
}
if activity.stationary {
responseText.append("stationary ")
}
if activity.automotive {
responseText.append("automotive ")
}
if activity.cycling {
responseText.append("cycling ")
}
if activity.running {
responseText.append("running ")
}
if activity.unknown {
responseText.append("unknown ")
}
return responseText
}
//MockHandler
static public func mockActivity(activity: String){
if DetectActivityModule.MockActivities {
let mockActivity = MockActivity(modeOfTransport: activity, Confidence: 2)
UserInfo.newMockActivity(activity: mockActivity)
}
}
// //MARK: Accelerometer Funcions
// static func startAccelerometerDetection() {
// if !MockActivities {
// if motMan == nil {
// startAD()
// }
// if let manager = motMan {
// manager.update
// }
// }
// }
}
class MockActivity {
var modeOfTransport: String = ""
var confidenceLevel: Int = 0
var StartDate = Date()
public init(modeOfTransport: String, Confidence: Int) {
self.confidenceLevel=Confidence
self.modeOfTransport=modeOfTransport
}
}
| 38.641975 | 205 | 0.640043 |
5fa8591bcd4e78196ec292c82b37247aa09e7ce1 | 2,536 | c | C | Plugins/Nektar/Veclib/math/xvrand.c | mathstuf/ParaView | e867e280545ada10c4ed137f6a966d9d2f3db4cb | [
"Apache-2.0"
] | 1 | 2020-05-21T20:20:59.000Z | 2020-05-21T20:20:59.000Z | Plugins/Nektar/Veclib/math/xvrand.c | mathstuf/ParaView | e867e280545ada10c4ed137f6a966d9d2f3db4cb | [
"Apache-2.0"
] | null | null | null | Plugins/Nektar/Veclib/math/xvrand.c | mathstuf/ParaView | e867e280545ada10c4ed137f6a966d9d2f3db4cb | [
"Apache-2.0"
] | 5 | 2016-04-14T13:42:37.000Z | 2021-05-22T04:59:42.000Z | /*
* Random number generation
*/
#include <stdio.h>
#include <math.h>
#include <assert.h>
#ifndef NULL
#define NULL (0L)
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
static int iseed;
/* Prototypes */
void InitializeRandom (int);
double Random (double, double);
double GetGaussian (double);
double GetLorentz (double);
void Get2Gaussians (double*, double*, double);
double ran1 (int*);
/* ---------------------------------------------------------------------- */
void dvrand (int n, double *x, const int incx)
{
while (n--) {
*x = Random (0.,1.);
x += incx;
}
return;
}
double drand(void) { return Random (0.,1.); }
/* ---------------------------------------------------------------------- */
double Random(double low, double high)
{
double d_r = ran1(&iseed);
d_r = d_r * (high-low) + low;
assert(d_r>=low );
assert(d_r<=high);
return d_r;
}
double GetGaussian(double sigma)
{
double theta = Random(0.0,2*M_PI);
double x = Random(0.0,1.0);
double r = sqrt( -2.0*sigma*sigma*log(x) );
return r * cos(theta);
}
double GetLorentz(double width)
{
double x = Random(-M_PI/2,M_PI/2);
return width * tan(x);
}
void Get2Gaussians(double *g1, double *g2, double sigma)
{
double theta = Random(0.0,2*M_PI);
double x = Random(0.0,1.0);
double r = sqrt( -2.0*sigma*sigma*log(x));
assert(g1!=NULL);
assert(g2!=NULL);
*g1 = r*cos(theta);
*g2 = r*sin(theta);
}
void InitializeRandom(int flag)
{
if ( flag < 0 )
iseed = time(NULL);
else
iseed = flag;
(void) ran1(&iseed);
}
#define M1 259200
#define IA1 7141
#define IC1 54773
#define RM1 (1.0/M1)
#define M2 134456
#define IA2 8121
#define IC2 28411
#define RM2 (1.0/M2)
#define M3 243000
#define IA3 4561
#define IC3 51349
double ran1(int *idum)
{
static long ix1,ix2,ix3;
static double r[98];
double temp;
static int iff=0;
int j;
if (*idum < 0 || iff == 0) {
iff=1;
ix1=(IC1-(*idum)) % M1;
ix1=(IA1*ix1+IC1) % M1;
ix2=ix1 % M2;
ix1=(IA1*ix1+IC1) % M1;
ix3=ix1 % M3;
for (j=1;j<=97;j++) {
ix1=(IA1*ix1+IC1) % M1;
ix2=(IA2*ix2+IC2) % M2;
r[j]=(ix1+ix2*RM2)*RM1;
}
*idum=1;
}
ix1=(IA1*ix1+IC1) % M1;
ix2=(IA2*ix2+IC2) % M2;
ix3=(IA3*ix3+IC3) % M3;
j =1 + ((97*ix3)/M3);
temp=r[j];
r[j]=(ix1+ix2*RM2)*RM1;
return temp;
}
#undef M1
#undef IA1
#undef IC1
#undef RM1
#undef M2
#undef IA2
#undef IC2
#undef RM2
#undef M3
#undef IA3
#undef IC3
| 17.489655 | 76 | 0.556782 |
38b03905216996ea22ad40561dc1b68493b52a31 | 2,009 | c | C | atividades/at02/mini-calculadora.c | studTon/inf029-evertondasilva | 901b90fb9c52b9659a05bd399aa219cf3869b333 | [
"MIT"
] | 1 | 2021-02-24T23:54:06.000Z | 2021-02-24T23:54:06.000Z | atividades/at02/mini-calculadora.c | studTon/INF029-EvertondaSilva | 901b90fb9c52b9659a05bd399aa219cf3869b333 | [
"MIT"
] | 16 | 2021-04-15T14:05:17.000Z | 2021-07-02T19:17:35.000Z | atividades/at02/mini-calculadora.c | studTon/inf029-evertondasilva | 901b90fb9c52b9659a05bd399aa219cf3869b333 | [
"MIT"
] | null | null | null | #include <stdio.h>
//Funções para as operações da calculadora
float somar(float numA, float numB)
{
return numA + numB;
}
float subtrair(float numA, float numB)
{
return numA - numB;
}
float multiplicar(float numA, float numB)
{
return numA * numB;
}
float dividir(float numA, float numB)
{
return numA / numB;
}
//Corpo principal do programa
int main()
{
int escolha = 1;
float numeroA, numeroB, resultado;
while(escolha >= 1 && escolha <= 4)
{
printf("Digite o primeiro numero: ");
scanf("%f", &numeroA);
printf("Digite o segundo numero: ");
scanf("%f", &numeroB);
printf("\nEscolha uma das operacoes:");
printf("\n0 - Sair\n1 - Somar\n2 - Subtrair\n3 - Multiplicar\n4 - Dividir\n");
printf("Sua escolha: ");
scanf("%d", &escolha);
if(escolha == 0)
{
printf("\nSair\n");
break;
}
switch(escolha)
{
case 0: printf("\nSair\n"); break;
case 1: {
resultado = somar(numeroA, numeroB);
printf("\nResultado: %.3f", resultado);
}break;
case 2: {
resultado = subtrair(numeroA, numeroB);
printf("\nResultado: %.3f", resultado);
}break;
case 3: {
resultado = multiplicar(numeroA, numeroB);
printf("\nResultado: %.3f", resultado);
}break;
case 4: {
if(numeroB != 0)
{
resultado = dividir(numeroA, numeroB);
printf("\nResultado: %.3f", resultado);
}
else
{
printf("\nNao pode dividir por zero!");
}
}break;
default: {
printf("Digite uma das 5 alternativas");
}break;
}
printf("\n\n");
}
return 0;
} | 23.360465 | 86 | 0.467894 |
50fe3fdc5b3b6c9c6750ef53f2b8e18bb7e2e47b | 3,652 | go | Go | ws/broker.go | aurawing/auramq-ws | 071a6e872d30fc4998f8f934a5fdec294543833d | [
"Apache-2.0"
] | null | null | null | ws/broker.go | aurawing/auramq-ws | 071a6e872d30fc4998f8f934a5fdec294543833d | [
"Apache-2.0"
] | null | null | null | ws/broker.go | aurawing/auramq-ws | 071a6e872d30fc4998f8f934a5fdec294543833d | [
"Apache-2.0"
] | null | null | null | package ws
import (
"log"
"net/http"
"github.com/aurawing/auramq"
"github.com/aurawing/auramq/msg"
"github.com/golang/protobuf/proto"
"github.com/gorilla/websocket"
)
//Broker websocket broker
type Broker struct {
server *http.Server
router *auramq.Router
addr string
auth bool
authFunc func([]byte) bool
readBufferSize int
writeBufferSize int
subscriberBufferSize int
pingWait int
readWait int
writeWait int
}
//NewBroker create new websocket broker
func NewBroker(router *auramq.Router, addr string, auth bool, authFunc func([]byte) bool, subscriberBufferSize, readBufferSize, writeBufferSize, pingWait, readWait, writeWait int) auramq.Broker {
if subscriberBufferSize == 0 {
subscriberBufferSize = 1024
}
if readBufferSize == 0 {
readBufferSize = 4096
}
if writeBufferSize == 0 {
writeBufferSize = 4096
}
if pingWait == 0 {
pingWait = 30
}
if readWait == 0 {
readWait = 60
}
if writeWait == 0 {
writeWait = 10
}
return &Broker{
router: router,
addr: addr,
auth: auth,
authFunc: authFunc,
readBufferSize: readBufferSize,
writeBufferSize: writeBufferSize,
subscriberBufferSize: subscriberBufferSize,
pingWait: pingWait,
readWait: readWait,
writeWait: writeWait,
}
}
//Run start websocket broker
func (broker *Broker) Run() {
var upgrader = websocket.Upgrader{
ReadBufferSize: broker.readBufferSize,
WriteBufferSize: broker.writeBufferSize,
EnableCompression: true,
CheckOrigin: func(r *http.Request) bool {
return true
},
}
srv := &http.Server{Addr: broker.addr}
http.HandleFunc("/ws", func(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println("subscribe error:", err)
return
}
if broker.NeedAuth() {
_, b, err := conn.ReadMessage()
if err != nil {
log.Println("read auth message failed:", err)
// conn.WriteMessage(websocket.CloseMessage, []byte{})
conn.Close()
return
}
// if msgType != websocket.BinaryMessage {
// log.Println("auth message should be binary format")
// return
// }
if !broker.Auth(b) {
log.Println("auth failed")
// conn.WriteMessage(websocket.CloseMessage, []byte{})
conn.Close()
return
}
}
_, b, err := conn.ReadMessage()
if err != nil {
log.Printf("error when read topics for subscribing: %s\n", err)
// conn.WriteMessage(websocket.CloseMessage, []byte{})
conn.Close()
return
}
subscribeMsg := new(msg.SubscribeMsg)
err = proto.Unmarshal(b, subscribeMsg)
if err != nil {
log.Printf("error when decode topics for subscribing: %s\n", err)
conn.Close()
return
}
subscriber := NewWsSubscriber(broker.router, conn, broker.subscriberBufferSize, broker.pingWait, broker.readWait, broker.writeWait)
subscriber.Run()
broker.router.Register(subscriber, subscribeMsg.Topics)
})
go func() {
if err := srv.ListenAndServe(); err != nil {
log.Printf("httpserver: ListenAndServe() error: %s", err)
}
}()
broker.server = srv
}
//NeedAuth if need auth when subscribe
func (broker *Broker) NeedAuth() bool {
return broker.auth
}
//Auth authencate when subscribing
func (broker *Broker) Auth(authMsg []byte) bool {
return broker.authFunc(authMsg)
}
//Close close http server
func (broker *Broker) Close() {
if err := broker.server.Shutdown(nil); err != nil {
log.Printf("httpserver: Shutdown() error: %s", err)
}
broker.router.Close()
}
| 25.538462 | 195 | 0.650329 |
908dff4d2569e28ef9e0402585297af47fa5a697 | 685 | kt | Kotlin | src/main/kotlin/com/opt2code/core/obj/PrevV_.kt | alexn0/core | faed6269e08a35a521b8a1c631f4cd3d30b1ec95 | [
"BSD-2-Clause"
] | null | null | null | src/main/kotlin/com/opt2code/core/obj/PrevV_.kt | alexn0/core | faed6269e08a35a521b8a1c631f4cd3d30b1ec95 | [
"BSD-2-Clause"
] | null | null | null | src/main/kotlin/com/opt2code/core/obj/PrevV_.kt | alexn0/core | faed6269e08a35a521b8a1c631f4cd3d30b1ec95 | [
"BSD-2-Clause"
] | null | null | null | /**
* Copyright (C) 2019-present alexn0
* All rights reserved.
*/
package com.opt2code.core.obj
open class PrevV_<S: ObjectV, T: ObjectV> private constructor(
override val s: S,
override val t: T,
data: Data,
o: PreObjectV = PreObjectV_.preObjectV(data)
): PreObjectV by o, PrevV<S, T> {
override fun new_(substitute: Boolean): PrevV<S, T> = PrevV_(s, t, data_.copy(substitute = substitute))
override fun elements(): List<ObjectV> {
return arrayListOf(s, t)
}
companion object {
fun <S: ObjectV, T: ObjectV> prevV(s: S, t: T, data: Data = Data.data(ordering = true)): PrevV<S, T> = PrevV_<S, T>(s, t, data)
}
} | 29.782609 | 135 | 0.620438 |
c267bccd3a5bb25f9459721aefc4b4d576cebb0f | 2,467 | kt | Kotlin | common/src/main/java/co/railgun/common/StringUtil.kt | ProjectRailgun/Spica | 394f3303fc261289a82e7a39dd77a9e413ca27ed | [
"MIT"
] | null | null | null | common/src/main/java/co/railgun/common/StringUtil.kt | ProjectRailgun/Spica | 394f3303fc261289a82e7a39dd77a9e413ca27ed | [
"MIT"
] | null | null | null | common/src/main/java/co/railgun/common/StringUtil.kt | ProjectRailgun/Spica | 394f3303fc261289a82e7a39dd77a9e413ca27ed | [
"MIT"
] | null | null | null | package co.railgun.common
import co.railgun.common.model.Bangumi
import co.railgun.common.model.Episode
import co.railgun.common.model.EpisodeDetail
import java.text.SimpleDateFormat
import java.util.*
import java.util.concurrent.TimeUnit
/**
* Created by roya on 2017/7/20.
*/
object StringUtil {
private var dayFormatter = SimpleDateFormat("EEEE", Locale.getDefault())
private const val oneDay = 86400000
fun dayOfWeek(day: Int): String {
return dayFormatter.format(day * oneDay + 3 * oneDay)
}
private fun addPadding(string: String): String {
return (if (string.length < 2) "0" else "") + string
}
fun microsecondFormat(ms: Long): String =
addPadding("" + TimeUnit.MILLISECONDS.toMinutes(ms)) +
":" +
addPadding(
"" + (TimeUnit.MILLISECONDS.toSeconds(ms) -
TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(ms)))
)
fun getName(bangumi: Bangumi): String = when {
isDisplayLanguageChinese -> when {
bangumi.name_cn.isNotEmpty() -> bangumi.name_cn
else -> bangumi.name
}
else -> when {
bangumi.name.isNotEmpty() -> bangumi.name
else -> bangumi.name_cn
}
}
fun getName(episode: Episode): String = when {
isDisplayLanguageChinese -> when {
episode.name_cn.isEmpty() -> episode.name
else -> episode.name_cn
}
else -> when {
episode.name.isEmpty() -> episode.name_cn
else -> episode.name
}
}
fun getName(episodeDetail: EpisodeDetail): String = when {
isDisplayLanguageChinese -> when {
episodeDetail.name_cn.isEmpty() -> episodeDetail.name
else -> episodeDetail.name_cn
}
else -> when {
episodeDetail.name.isEmpty() -> episodeDetail.name_cn
else -> episodeDetail.name
}
}
fun subTitle(bangumi: Bangumi): String = when {
isDisplayLanguageChinese -> when {
bangumi.name.isNotEmpty() -> bangumi.name
else -> bangumi.name_cn
}
else -> when {
bangumi.name_cn.isNotEmpty() -> bangumi.name_cn
else -> bangumi.name
}
}
private val isDisplayLanguageChinese: Boolean
get() = Locale.getDefault().displayLanguage == Locale.CHINESE.displayLanguage
}
| 30.45679 | 92 | 0.593028 |
df9507808c840025d31a6e46207ab4b626e715be | 146 | ts | TypeScript | src/ts/util/anyWindow.ts | kkysen/Polybius | ee63653fbaafa6719413b04020e678e8f6be6963 | [
"MIT"
] | 1 | 2018-07-17T16:26:17.000Z | 2018-07-17T16:26:17.000Z | src/ts/util/anyWindow.ts | kkhan01/Polybius | ee63653fbaafa6719413b04020e678e8f6be6963 | [
"MIT"
] | null | null | null | src/ts/util/anyWindow.ts | kkhan01/Polybius | ee63653fbaafa6719413b04020e678e8f6be6963 | [
"MIT"
] | 2 | 2018-11-15T23:33:05.000Z | 2019-02-16T01:54:04.000Z | export const anyWindow: any = window;
export const globals = function(o: Object): void {
Object.assign(anyWindow, o);
};
globals({globals}); | 20.857143 | 50 | 0.69863 |
3e95318e93734e49f717343482776d7b3ffbf97f | 607 | h | C | YJGoogleMapDemo/Pods/YJUIKit/YJUIKit/YJWebViewController/ViewModels/YJWebViewViewModel/YJWebViewViewModel.h | MOyejin/YJGoogleMap | abb78f4ba8e0b6d99c2fa030aa476935ce4f0a1e | [
"MIT"
] | 1 | 2020-12-01T05:08:38.000Z | 2020-12-01T05:08:38.000Z | YJGoogleMapDemo/Pods/YJUIKit/YJUIKit/YJWebViewController/ViewModels/YJWebViewViewModel/YJWebViewViewModel.h | MOyejin/YJGoogleMap | abb78f4ba8e0b6d99c2fa030aa476935ce4f0a1e | [
"MIT"
] | null | null | null | YJGoogleMapDemo/Pods/YJUIKit/YJUIKit/YJWebViewController/ViewModels/YJWebViewViewModel/YJWebViewViewModel.h | MOyejin/YJGoogleMap | abb78f4ba8e0b6d99c2fa030aa476935ce4f0a1e | [
"MIT"
] | null | null | null | //
// YJWebViewViewModel.h
// YJFoundationDemo
//
// Created by Moyejin668 on 2020/1/16.
// Copyright © 2020 Moyejin. All rights reserved.
//
#import <Foundation/Foundation.h>
#import "YJWebViewController.h"
NS_ASSUME_NONNULL_BEGIN
@interface YJWebViewViewModel : NSObject
/**
只读的YJWebViewController
*/
@property (nonatomic, weak, readonly) YJWebViewController *yj_webViewController;
/**
初始化YJWebViewViewModel
@param controller YJWebViewController
@return YJWebViewViewModel
*/
- (instancetype)initWebViewModelWithController:(YJWebViewController *)controller;
@end
NS_ASSUME_NONNULL_END
| 18.393939 | 81 | 0.782537 |
12afa6e9b367d151876d3528075930f48af99b06 | 2,181 | h | C | Source/VTKExtensions/Client/vtkSMSceneContourSourceProxy.h | developkits/cmb | caaf9cd7ffe0b7c1ac3be9edbce0f9430068d2cb | [
"BSD-3-Clause"
] | null | null | null | Source/VTKExtensions/Client/vtkSMSceneContourSourceProxy.h | developkits/cmb | caaf9cd7ffe0b7c1ac3be9edbce0f9430068d2cb | [
"BSD-3-Clause"
] | null | null | null | Source/VTKExtensions/Client/vtkSMSceneContourSourceProxy.h | developkits/cmb | caaf9cd7ffe0b7c1ac3be9edbce0f9430068d2cb | [
"BSD-3-Clause"
] | null | null | null | //=========================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//=========================================================================
// .NAME vtkSMSceneContourSourceProxy - "data-centric" proxy for VTK source on a server
// .SECTION Description
// vtkSMSceneContourSourceProxy adds a CopyData method to the vtkSMSourceProxy API
// to give a "data-centric" behaviour; the output data of the input
// vtkSMSourceProxy (to CopyData) is copied by the VTK object managed
// by the vtkSMSceneContourSourceProxy.
// .SECTION See Also
// vtkSMSourceProxy vtkSMNewWidgetRepresentationProxy
#ifndef __vtkSMSceneContourSourceProxy_h
#define __vtkSMSceneContourSourceProxy_h
#include "cmbSystemConfig.h"
#include "vtkCMBClientModule.h" // For export macro
#include "vtkSMSourceProxy.h"
class vtkSMNewWidgetRepresentationProxy;
class VTKCMBCLIENT_EXPORT vtkSMSceneContourSourceProxy : public vtkSMSourceProxy
{
public:
static vtkSMSceneContourSourceProxy* New();
vtkTypeMacro(vtkSMSceneContourSourceProxy, vtkSMSourceProxy);
void PrintSelf(ostream& os, vtkIndent indent) override;
// Description:
// Copies data from a widget proxy to object represented by this
// source proxy object.
void CopyData(vtkSMNewWidgetRepresentationProxy* widgetProxy);
// Description:
// Copies the data this proxy output to the input of the widget
void EditData(vtkSMNewWidgetRepresentationProxy* widgetProxy, bool& closed);
// Dexcription:
//copies the data from a source proxy using the GetOutput method
void ExtractContour(vtkSMSourceProxy* sourceProxy);
//BTX
protected:
vtkSMSceneContourSourceProxy();
~vtkSMSceneContourSourceProxy() override;
private:
vtkSMSceneContourSourceProxy(const vtkSMSceneContourSourceProxy&); // Not implemented
void operator=(const vtkSMSceneContourSourceProxy&); // Not implemented
//ETX
};
#endif
| 36.35 | 87 | 0.731316 |
f072f90f05851bf118abac79a95ef4ec1e8b1416 | 614 | js | JavaScript | test/polyfill.js | zoubin/tick-node | 739a74bf8e600fb2a56682bece3d5e9f29ce691f | [
"MIT"
] | 16 | 2016-01-08T14:56:38.000Z | 2022-02-10T11:57:57.000Z | test/polyfill.js | zoubin/debug-nexttick | 739a74bf8e600fb2a56682bece3d5e9f29ce691f | [
"MIT"
] | null | null | null | test/polyfill.js | zoubin/debug-nexttick | 739a74bf8e600fb2a56682bece3d5e9f29ce691f | [
"MIT"
] | 2 | 2017-05-23T07:51:29.000Z | 2017-05-23T15:58:25.000Z | var test = require('tap').test
var debugNextTick = require('..')
function polyfill() {
debugNextTick.polyfill({ log: false })
}
test('polyfill', function (t) {
t.plan(4)
polyfill()
t.equal(process.nextTick._tick, undefined, 'no debug')
process.nextTick(function () {
process.env.NODE_DEBUG = 'nexttick'
polyfill()
t.equal(process.nextTick._tick, 0, 'initialized')
process.nextTick(function () {
t.equal(process.nextTick._tick, 1, 'first tick')
var nextTick = process.nextTick
polyfill()
t.equal(process.nextTick, nextTick, 'polyfill twice')
})
})
})
| 20.466667 | 59 | 0.648208 |
bcbd1ba80f48975fa39944d102632a469ade212b | 10,298 | js | JavaScript | server/src/main/resources/eclairjs/ml/classification/DecisionTreeClassifier.js | ckadner/eclairjs | e354253af9a92cb868783060feb6fb7a6ee12403 | [
"Apache-2.0"
] | 138 | 2016-10-10T22:18:46.000Z | 2022-01-20T18:47:52.000Z | server/src/main/resources/eclairjs/ml/classification/DecisionTreeClassifier.js | ckadner/eclairjs | e354253af9a92cb868783060feb6fb7a6ee12403 | [
"Apache-2.0"
] | 27 | 2016-10-12T19:58:45.000Z | 2022-02-16T13:55:39.000Z | server/src/main/resources/eclairjs/ml/classification/DecisionTreeClassifier.js | ckadner/eclairjs | e354253af9a92cb868783060feb6fb7a6ee12403 | [
"Apache-2.0"
] | 25 | 2016-09-30T18:08:58.000Z | 2020-07-07T03:05:53.000Z | /*
* Copyright 2016 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
(function () {
var PipelineStage = require(EclairJS_Globals.NAMESPACE + '/ml/PipelineStage');
var Logger = require(EclairJS_Globals.NAMESPACE + '/Logger');
var Utils = require(EclairJS_Globals.NAMESPACE + '/Utils');
/**
* @classdesc
* [Decision tree]{@link http://en.wikipedia.org/wiki/Decision_tree_learning} learning algorithm
* for classification.
* It supports both binary and multiclass labels, as well as both continuous and categorical
* features.
* @class
* @extends module:eclairjs/ml.PipelineStage
* @memberof module:eclairjs/ml/classification
* @param {string} [uid]
*/
var DecisionTreeClassifier = function(uid) {
this.logger = Logger.getLogger("ml.classification.DecisionTreeClassifier_js");
var jvmObject;
if (uid) {
if (uid instanceof org.apache.spark.ml.classification.DecisionTreeClassifier) {
jvmObject = uid;
} else {
jvmObject = new org.apache.spark.ml.classification.DecisionTreeClassifier(uid);
}
} else {
jvmObject = new org.apache.spark.ml.classification.DecisionTreeClassifier();
}
PipelineStage.call(this, jvmObject);
};
DecisionTreeClassifier.prototype = Object.create(PipelineStage.prototype);
DecisionTreeClassifier.prototype.constructor = DecisionTreeClassifier;
/**
* Accessor for supported impurities: entropy, gini
* @returns {string[]}
*/
DecisionTreeClassifier.prototype.supportedImpurities = function() {
return this.getJavaObject().supportedImpurities();
};
/**
* An immutable unique ID for the object and its derivatives.
* @returns {string}
*/
DecisionTreeClassifier.prototype.uid = function () {
return this.getJavaObject().uid();
};
/**
* @param {integer} value
* @returns {module:eclairjs/ml/classification.DecisionTreeClassifier}
*/
DecisionTreeClassifier.prototype.setMaxDepth = function(value) {
var javaObject = this.getJavaObject().setMaxDepth(value);
return new DecisionTreeClassifier(javaObject);
};
/**
* @param {integer} value
* @returns {module:eclairjs/ml/classification.DecisionTreeClassifier}
*/
DecisionTreeClassifier.prototype.setMaxBins = function(value) {
var javaObject = this.getJavaObject().setMaxBins(value);
return new DecisionTreeClassifier(javaObject);
};
/**
* @param {integer} value
* @returns {module:eclairjs/ml/classification.DecisionTreeClassifier}
*/
DecisionTreeClassifier.prototype.setMinInstancesPerNode = function(value) {
var javaObject = this.getJavaObject().setMinInstancesPerNode(value);
return new DecisionTreeClassifier(javaObject);
};
/**
* @param {float} value
* @returns {module:eclairjs/ml/classification.DecisionTreeClassifier}
*/
DecisionTreeClassifier.prototype.setMinInfoGain = function(value) {
var javaObject = this.getJavaObject().setMinInfoGain(value);
return new DecisionTreeClassifier(javaObject);
};
/**
* @param {integer} value
* @returns {module:eclairjs/ml/classification.DecisionTreeClassifier}
*/
DecisionTreeClassifier.prototype.setMaxMemoryInMB = function(value) {
var javaObject = this.getJavaObject().setMaxMemoryInMB(value);
return new DecisionTreeClassifier(javaObject);
};
/**
* @param {boolean} value
* @returns {module:eclairjs/ml/classification.DecisionTreeClassifier}
*/
DecisionTreeClassifier.prototype.setCacheNodeIds = function(value) {
var javaObject = this.getJavaObject().setCacheNodeIds(value);
return new DecisionTreeClassifier(javaObject);
};
/**
* @param {integer} value
* @returns {module:eclairjs/ml/classification.DecisionTreeClassifier}
*/
DecisionTreeClassifier.prototype.setCheckpointInterval = function(value) {
var javaObject = this.getJavaObject().setCheckpointInterval(value);
return new DecisionTreeClassifier(javaObject);
};
/**
* @param {string} value
* @returns {module:eclairjs/ml/classification.DecisionTreeClassifier}
*/
DecisionTreeClassifier.prototype.setImpurity = function(value) {
var javaObject = this.getJavaObject().setImpurity(value);
return new DecisionTreeClassifier(javaObject);
};
/**
* @param {integer} value
* @returns {module:eclairjs/ml/classification.DecisionTreeClassifier}
*/
DecisionTreeClassifier.prototype.setSeed = function(value) {
var javaObject = this.getJavaObject().setSeed(value);
return new DecisionTreeClassifier(javaObject);
};
/**
* @param {module:eclairjs/ml/param.ParamMap} extra
* @returns {module:eclairjs/ml/classification.DecisionTreeClassifier}
*/
DecisionTreeClassifier.prototype.copy = function(extra) {
var extra_uw = Utils.unwrapObject(extra);
var javaObject = this.getJavaObject().copy(extra_uw);
return new DecisionTreeClassifier(javaObject);
};
/**
* Validates and transforms the input schema with the provided param map.
* @param {module:eclairjs/sql/types.StructType} schema
* @param {boolean} fitting whether this is in fitting
* @param {module:eclairjs/sql/types.DataType} featuresDataType SQL DataType for FeaturesType.
* E.g., {@link module:eclairjs/sql/types.VectorUDT}for vector features
* @returns {module:eclairjs/sql/types.StructType}
*/
DecisionTreeClassifier.prototype.validateAndTransformSchema = function (schema, fitting, featuresDataType) {
var schema_uw = Utils.unwrapObject(schema);
var featuresDataType_uw = Utils.unwrapObject(featuresDataType);
var javaObject = this.getJavaObject().validateAndTransformSchema(schema_uw, fitting, featuresDataType_uw);
return Utils.javaToJs(javaObject);
};
/**
* Param for raw prediction (a.k.a. confidence) column name.
* @returns {module:eclairjs/ml/param.Param}
*/
DecisionTreeClassifier.prototype.rawPredictionCol = function() {
var javaObject = this.getJavaObject().rawPredictionCol();
return Utils.javaToJs(javaObject);
};
/**
* @returns {string}
*/
DecisionTreeClassifier.prototype.getRawPredictionCol = function() {
return this.getJavaObject().getRawPredictionCol();
};
/**
* Param for label column name.
* @returns {module:eclairjs/ml/param.Param}
*/
DecisionTreeClassifier.prototype.labelCol = function() {
var javaObject = this.getJavaObject().labelCol();
return Utils.javaToJs(javaObject);
};
/**
* @returns {string}
*/
DecisionTreeClassifier.prototype.getLabelCol = function() {
return this.getJavaObject().getLabelCol();
};
/**
* @param {string} value
* @returns {module:eclairjs/ml/classification.DecisionTreeClassifier} value
*/
DecisionTreeClassifier.prototype.setLabelCol = function(value) {
return Utils.javaToJs(this.getJavaObject().setLabelCol(value));
};
/**
* Param for features column name.
* @returns {module:eclairjs/ml/param.Param}
*/
DecisionTreeClassifier.prototype.featuresCol = function() {
var javaObject = this.getJavaObject().featuresCol();
return Utils.javaToJs(javaObject);
};
/**
* @returns {string}
*/
DecisionTreeClassifier.prototype.getFeaturesCol = function() {
return this.getJavaObject().getFeaturesCol();
};
/**
* @param {string} value
* @returns {module:eclairjs/ml/classification.DecisionTreeClassifier} value
*/
DecisionTreeClassifier.prototype.setFeaturesCol = function(value) {
return Utils.javaToJs(this.getJavaObject().setFeaturesCol(value));
};
/**
* Param for prediction column name.
* @returns {module:eclairjs/ml/param.Param}
*/
DecisionTreeClassifier.prototype.predictionCol = function() {
var javaObject = this.getJavaObject().predictionCol();
return Utils.javaToJs(javaObject);
};
/**
* @returns {string}
*/
DecisionTreeClassifier.prototype.getPredictionCol = function() {
return this.getJavaObject().getPredictionCol();
};
/**
* @param {string} value
* @returns {module:eclairjs/ml/classification.DecisionTreeClassifier} value
*/
DecisionTreeClassifier.prototype.setPredictionCol = function(value) {
return Utils.javaToJs(this.getJavaObject().setPredictionCol(value));
};
//
// static methods
//
/**
* @param {string} path
* @returns {module:eclairjs/ml/classification.DecisionTreeClassifier}
*/
DecisionTreeClassifier.load = function(path) {
var javaObject = org.apache.spark.ml.classification.DecisionTreeClassifier.load(path);
return new DecisionTreeClassifier(javaObject);
};
module.exports = DecisionTreeClassifier;
})(); | 35.510345 | 114 | 0.637308 |
15f995974c06eba5637d7f77bf679de3b45bef89 | 2,011 | rb | Ruby | lib/jekyll/zettel/tags.rb | mgerzabek/jekyll-zettel | 7f3b47ba382cb51c3dca81c4af89efd615bf3615 | [
"MIT"
] | 1 | 2022-01-19T09:42:53.000Z | 2022-01-19T09:42:53.000Z | lib/jekyll/zettel/tags.rb | mgerzabek/jekyll-zettel | 7f3b47ba382cb51c3dca81c4af89efd615bf3615 | [
"MIT"
] | null | null | null | lib/jekyll/zettel/tags.rb | mgerzabek/jekyll-zettel | 7f3b47ba382cb51c3dca81c4af89efd615bf3615 | [
"MIT"
] | null | null | null | module Jekyll
module Zettel
# Generate tags.json from page front matter
class Tags < Jekyll::Generator
include Jekyll::Zettel
SLUG_FORMAT = %r{glosse/(?<slug>.*)/index.(?<ext>html|md)}i.freeze
attr_reader :site
def generate(site)
@site = site
@site.data['aliases'] = {}
@site.data['tags'] = {}
@site.data['tag2glosse'] = {}
register
write_catalog 'aliases'
write_catalog 'tags'
write_catalog 'tag2glosse'
end
def register
@site.pages.each do |page|
next unless SLUG_FORMAT.match?(page.path.to_s)
register_tag(page)
register_tags(page)
register_aliases(page)
end
end
def register_tag(doc)
parts = doc.path.to_s.match(SLUG_FORMAT)
@site.data['tags'][parts[:slug]] = {
'slug' => parts[:slug],
'tag' => doc.data['tag'] || 'Missing @tag',
'title' => doc.data['title'] || 'Missing @title',
'description' => doc.data['description'] || 'Missing @description',
'tags' => doc.data['tags']
}
doc.data['slug'] = parts[:slug]
end
def register_aliases(doc)
@site.data['aliases'][doc.data['tag']] = {
'slug' => doc.data['slug'],
'tag' => doc.data['tag'],
'description' => doc.data['description']
}
return unless doc.data.key?('aliases')
doc.data['aliases'].each do |item|
@site.data['aliases'][item] = {
'slug' => doc.data['slug'],
'tag' => doc.data['tag'],
'description' => doc.data['description']
}
end
end
def register_tags(doc)
return unless doc.data.key?('tags')
doc.data['tags'].each do |tag|
@site.data['tag2glosse'][tag] = [] unless @site.data['tag2glosse'].key?(tag)
@site.data['tag2glosse'][tag] << doc.data['slug']
end
end
end
end
end
| 26.460526 | 86 | 0.521631 |
5fe2f4d04dfcb1cf2f2a1787fd8dbfe8e90eaf05 | 912 | h | C | include/pressswitch.h | chris-hunt-98/sokoban-3D | a27b1678b02566d19287b2aee080feb3f1ae66e9 | [
"MIT"
] | null | null | null | include/pressswitch.h | chris-hunt-98/sokoban-3D | a27b1678b02566d19287b2aee080feb3f1ae66e9 | [
"MIT"
] | null | null | null | include/pressswitch.h | chris-hunt-98/sokoban-3D | a27b1678b02566d19287b2aee080feb3f1ae66e9 | [
"MIT"
] | null | null | null | #ifndef PRESSSWITCH_H
#define PRESSSWITCH_H
#include "objectmodifier.h"
#include "switch.h"
class MapFileI;
class MapFileO;
class RoomMap;
class DeltaFrame;
class GraphicsManager;
class PressSwitch: public Switch {
public:
PressSwitch(GameObject* parent, int color, bool persistent, bool active);
virtual ~PressSwitch();
std::string name();
ModCode mod_code();
void serialize(MapFileO& file);
static void deserialize(MapFileI&, RoomMap*, GameObject*);
void map_callback(RoomMap*, DeltaFrame*, MoveProcessor*);
void check_send_signal(RoomMap*, DeltaFrame*);
bool should_toggle(RoomMap*);
void setup_on_put(RoomMap*);
void cleanup_on_take(RoomMap*);
void draw(GraphicsManager*, FPoint3);
std::unique_ptr<ObjectModifier> duplicate(GameObject*, RoomMap*, DeltaFrame*);
private:
int color_;
friend class ModifierTab;
};
#endif // PRESSSWITCH_H
| 21.209302 | 82 | 0.726974 |
e2dabd1f4b52550e7949b855cf6d5ab24213540d | 989 | kt | Kotlin | app/src/main/java/com/github/satoshun/example/main/ViewPropertyAnimatorFragment.kt | satoshun-android-example/DependencyTracker | ecf829f817ac033ae065dc579303ffc52faa1417 | [
"Apache-2.0"
] | null | null | null | app/src/main/java/com/github/satoshun/example/main/ViewPropertyAnimatorFragment.kt | satoshun-android-example/DependencyTracker | ecf829f817ac033ae065dc579303ffc52faa1417 | [
"Apache-2.0"
] | null | null | null | app/src/main/java/com/github/satoshun/example/main/ViewPropertyAnimatorFragment.kt | satoshun-android-example/DependencyTracker | ecf829f817ac033ae065dc579303ffc52faa1417 | [
"Apache-2.0"
] | null | null | null | package com.github.satoshun.example.main
import android.os.Bundle
import android.view.View
import android.view.animation.OvershootInterpolator
import androidx.fragment.app.Fragment
import androidx.lifecycle.lifecycleScope
import com.github.satoshun.example.R
import com.github.satoshun.example.databinding.PropertyValuesHolderFragBinding
import kotlinx.coroutines.delay
import kotlinx.coroutines.launch
class ViewPropertyAnimatorFragment : Fragment(R.layout.property_values_holder_frag) {
private lateinit var binding: PropertyValuesHolderFragBinding
override fun onViewCreated(view: View, savedInstanceState: Bundle?) {
super.onViewCreated(view, savedInstanceState)
binding = PropertyValuesHolderFragBinding.bind(view)
lifecycleScope.launch {
while (true) {
delay(2000)
binding.title.scaleX = 0.5f
binding.title.animate()
.scaleX(1.0f)
.setInterpolator(OvershootInterpolator())
.start()
}
}
}
}
| 29.969697 | 85 | 0.763397 |
6b6bec13459e1b3f4d16f78f85d285650d38eac0 | 586 | h | C | nrf51-marine-display/src/config.h | chacal/arduino | 6b77e59138a36d0627ab6f529e6f2ea7e65bcdfd | [
"Apache-2.0"
] | 4 | 2016-12-10T13:20:52.000Z | 2019-10-25T19:47:44.000Z | nrf51-marine-display/src/config.h | chacal/arduino | 6b77e59138a36d0627ab6f529e6f2ea7e65bcdfd | [
"Apache-2.0"
] | null | null | null | nrf51-marine-display/src/config.h | chacal/arduino | 6b77e59138a36d0627ab6f529e6f2ea7e65bcdfd | [
"Apache-2.0"
] | 1 | 2019-05-03T17:31:38.000Z | 2019-05-03T17:31:38.000Z | #pragma once
#define PB_CMD_MAX_SIZE 35 // Enough to transfer 20 character string
#define PB_CMD_SEQ_MAX_LENGTH 5
#define MAX_BLE_MESSAGE_SIZE (PB_CMD_MAX_SIZE * PB_CMD_SEQ_MAX_LENGTH)
#define APP_SCHEDULER_QUEUE_SIZE (PB_CMD_SEQ_MAX_LENGTH + 1)
#define APP_SCHEDULER_EVENT_SIZE sizeof(DisplayCommand)
#define DISPLAY_LIST_LENGTH 10
#if (NRF_SD_BLE_API_VERSION == 2)
#define GATT_ATTR_TABLE_SIZE 420 // Minimum needed - found by trial & error
#else
#define GATT_ATTR_TABLE_SIZE 433 // Minimum needed - found by trial & error
#endif
| 34.470588 | 81 | 0.745734 |
8581b12a1ade18f5f071e59e20aa535bd5eacf89 | 403 | js | JavaScript | jsonp/320581.js | c-tsy/data_location | f7db57bf8cfec889b7d90f5d30b3a062490b2125 | [
"MIT"
] | null | null | null | jsonp/320581.js | c-tsy/data_location | f7db57bf8cfec889b7d90f5d30b3a062490b2125 | [
"MIT"
] | null | null | null | jsonp/320581.js | c-tsy/data_location | f7db57bf8cfec889b7d90f5d30b3a062490b2125 | [
"MIT"
] | null | null | null | if(_area_jsonp_320581){_area_jsonp_320581({"320581001":"虞山街道","320581002":"常福街道","320581003":"琴川街道","320581004":"莫城街道","320581005":"碧溪街道","320581006":"东南街道","320581101":"梅李镇","320581102":"海虞镇","320581104":"古里镇","320581105":"沙家浜镇","320581106":"支塘镇","320581107":"董浜镇","320581110":"辛庄镇","320581111":"尚湖镇","320581401":"常熟经济技术开发区","320581402":"常熟高新技术产业开发区","320581406":"常熟昆承湖管理委员会","320581407":"常熟国家大学科技园"})} | 403 | 403 | 0.71464 |
228aae4223ab38c1f780ec6b9758909108f360ea | 272 | asm | Assembly | dev_os/boot_simpleos_print.asm | sdtm66/OS_dev | 947bed3dead0f59e3e71811c57f46f05ae5b0db9 | [
"MIT"
] | null | null | null | dev_os/boot_simpleos_print.asm | sdtm66/OS_dev | 947bed3dead0f59e3e71811c57f46f05ae5b0db9 | [
"MIT"
] | null | null | null | dev_os/boot_simpleos_print.asm | sdtm66/OS_dev | 947bed3dead0f59e3e71811c57f46f05ae5b0db9 | [
"MIT"
] | null | null | null | print:
pusha
start:
mov al,[bx]
cmp al,0
je done
mov ah,0x0e
int 0x10
add bx,1
jmp start
done:
popa
ret
print_nl:
pusha
mov ah,0x0e
mov al,0x0a;huan hang
int 0x10
mov al,0x0d;hui che
int 0x10
popa
ret | 12.363636 | 25 | 0.551471 |
77c9858e01db8c858985bb8fef70f77803afc0e8 | 1,859 | kt | Kotlin | android/src/main/kotlin/com/tomasznajda/ktx/android/LogExt.kt | tomasznajda/useful-ktx | 5efbd11492dbcc634c34990d8dcb90e9fd07ce17 | [
"Apache-2.0"
] | 19 | 2018-10-29T20:02:17.000Z | 2020-10-22T08:42:40.000Z | android/src/main/kotlin/com/tomasznajda/ktx/android/LogExt.kt | tomasznajda/useful-ktx | 5efbd11492dbcc634c34990d8dcb90e9fd07ce17 | [
"Apache-2.0"
] | 1 | 2019-02-20T03:15:14.000Z | 2019-02-28T08:05:57.000Z | android/src/main/kotlin/com/tomasznajda/ktx/android/LogExt.kt | tomasznajda/useful-ktx | 5efbd11492dbcc634c34990d8dcb90e9fd07ce17 | [
"Apache-2.0"
] | 3 | 2019-02-03T14:55:54.000Z | 2021-01-17T19:48:38.000Z | package com.tomasznajda.ktx.android
import android.util.Log
fun logwtf(tag: String, msg: String = "", e: Throwable? = null) =
e?.let { Log.wtf(tag, msg, e) } ?: Log.wtf(tag, msg)
fun loge(tag: String, msg: String = "", e: Throwable? = null) =
e?.let { Log.e(tag, msg, e) } ?: Log.e(tag, msg)
fun logw(tag: String, msg: String = "", e: Throwable? = null) =
e?.let { Log.w(tag, msg, e) } ?: Log.w(tag, msg)
fun logi(tag: String, msg: String = "", e: Throwable? = null) =
e?.let { Log.i(tag, msg, e) } ?: Log.i(tag, msg)
fun logd(tag: String, msg: String = "", e: Throwable? = null) =
e?.let { Log.d(tag, msg, e) } ?: Log.d(tag, msg)
fun logv(tag: String, msg: String = "", e: Throwable? = null) =
e?.let { Log.v(tag, msg, e) } ?: Log.v(tag, msg)
fun Any.loge(tag: String, format: (String) -> String = { it }) = Log.e(tag, format.invoke(toString()))
fun Any.logw(tag: String, format: (String) -> String = { it }) = Log.w(tag, format.invoke(toString()))
fun Any.logi(tag: String, format: (String) -> String = { it }) = Log.i(tag, format.invoke(toString()))
fun Any.logd(tag: String, format: (String) -> String = { it }) = Log.d(tag, format.invoke(toString()))
fun Any.logv(tag: String, format: (String) -> String = { it }) = Log.v(tag, format.invoke(toString()))
fun Any.println(format: (String) -> String = { it }) = println(format.invoke(toString()))
fun Throwable.logwtf(tag: String, msg: String = "") = logwtf(tag, msg, this)
fun Throwable.loge(tag: String, msg: String = "") = loge(tag, msg, this)
fun Throwable.logw(tag: String, msg: String = "") = logw(tag, msg, this)
fun Throwable.logi(tag: String, msg: String = "") = logi(tag, msg, this)
fun Throwable.logd(tag: String, msg: String = "") = logd(tag, msg, this)
fun Throwable.logv(tag: String, msg: String = "") = logv(tag, msg, this)
| 39.553191 | 102 | 0.604088 |
1252a4556b72ded51698ac1683a59518d00e4724 | 1,438 | h | C | Zhirnov/HW_6/format.h | mtrempoltsev/msu_cpp_autumn_2018 | 9272511ddfaa78332cfabda071b5fa3a9aee79cf | [
"MIT"
] | 16 | 2018-09-27T13:59:59.000Z | 2019-10-01T21:33:40.000Z | Zhirnov/HW_6/format.h | mtrempoltsev/msu_cpp_autumn_2018 | 9272511ddfaa78332cfabda071b5fa3a9aee79cf | [
"MIT"
] | 2 | 2018-10-17T20:56:15.000Z | 2018-10-24T00:02:42.000Z | Zhirnov/HW_6/format.h | mtrempoltsev/msu_cpp_autumn_2018 | 9272511ddfaa78332cfabda071b5fa3a9aee79cf | [
"MIT"
] | 22 | 2018-09-27T14:00:16.000Z | 2019-12-17T19:44:33.000Z | #pragma once
#include <string>
#include <sstream>
#include <vector>
#include <stdexcept>
#include <stdlib.h>
template <class T>
std::string to_string(T&& arg) {
std::ostringstream os;
os << arg;
return os.str();
}
template<class... ArgsT>
std::string format(const std::string& str, ArgsT&&... args) {
std::vector<std::string> strArgs{to_string(std::forward<ArgsT>(args))...};
std::ostringstream result;
for (int i = 0; i < str.length(); ++i) {
if (str[i] == '{') {
int acc = 0;
++i;
if (str[i] == '}') {
throw std::runtime_error("error");
}
while (i < str.length() && str[i] != '}') {
if (isdigit(str[i])) {
acc *= 10;
acc += str[i] - '0';
} else {
throw std::runtime_error("error");
}
++i;
}
if (i == str.length()) {
throw std::runtime_error("error");
}
if (acc >= strArgs.size()) {
throw std::runtime_error("error");
}
result << strArgs[acc];
} else if (str[i] == '}') {
throw std::runtime_error("error");
} else {
result << str[i];
}
}
return result.str();
}
| 23.966667 | 78 | 0.40751 |
8561862c7fc79d6ad2020ae37a5cf870e0e07686 | 744 | js | JavaScript | routes/api/thought-routes.js | davidjaguilar104/nosql-social-network-api | 90a0725b55abe1ef7d36e9f23dbe41dd9a1566d4 | [
"MIT"
] | null | null | null | routes/api/thought-routes.js | davidjaguilar104/nosql-social-network-api | 90a0725b55abe1ef7d36e9f23dbe41dd9a1566d4 | [
"MIT"
] | null | null | null | routes/api/thought-routes.js | davidjaguilar104/nosql-social-network-api | 90a0725b55abe1ef7d36e9f23dbe41dd9a1566d4 | [
"MIT"
] | null | null | null | const router = require("express").Router();
const {
getAllThoughts,
getThoughtById,
createThought,
updateThought,
deleteThought,
addReaction,
deleteReaction
} = require("../../controllers/thought-controller");
// get all thoughts
router.route("/").get(getAllThoughts);
// post a thought
router.route("/:userId").post(createThought);
// get a single thought by id and also update and delete a thought by id
router.route("/:id").get(getThoughtById).put(updateThought).delete(deleteThought);
// post a reaction to a thought
router.route("/:thoughtId/reactions").post(addReaction)
// delete reaction from a thought
router.route("/:thoughtId/reactions/:reactionId").delete(deleteReaction);
module.exports = router; | 27.555556 | 82 | 0.728495 |
ef6bf844941c26f00f6ff37a215778dbe073f503 | 436 | asm | Assembly | oeis/101/A101563.asm | neoneye/loda-programs | 84790877f8e6c2e821b183d2e334d612045d29c0 | [
"Apache-2.0"
] | 11 | 2021-08-22T19:44:55.000Z | 2022-03-20T16:47:57.000Z | oeis/101/A101563.asm | neoneye/loda-programs | 84790877f8e6c2e821b183d2e334d612045d29c0 | [
"Apache-2.0"
] | 9 | 2021-08-29T13:15:54.000Z | 2022-03-09T19:52:31.000Z | oeis/101/A101563.asm | neoneye/loda-programs | 84790877f8e6c2e821b183d2e334d612045d29c0 | [
"Apache-2.0"
] | 3 | 2021-08-22T20:56:47.000Z | 2021-09-29T06:26:12.000Z | ; A101563: a(n)=(-1)^n*coefficient of x^n in sum{k>=1,x^(k-1)/(1+10x^k)}.
; Submitted by Christian Krause
; 1,9,101,1009,10001,99909,1000001,10001009,100000101,999990009,10000000001,100000100909,1000000000001,9999999000009,100000000010101,1000000010001009,10000000000000001,99999999900099909
add $0,1
mov $2,$0
lpb $0
div $1,-1
mul $1,10
mov $3,$2
dif $3,$0
sub $0,1
cmp $3,$2
cmp $3,0
add $1,$3
lpe
add $1,1
gcd $0,$1
| 22.947368 | 185 | 0.690367 |
c369b39bf440c0c7a46955a79d1ae62aa7a7399f | 435 | go | Go | models/person.go | cy422396350/cygin | b7f4a67eddbc8292b3c21efd270c209b5ae0af26 | [
"Apache-2.0"
] | null | null | null | models/person.go | cy422396350/cygin | b7f4a67eddbc8292b3c21efd270c209b5ae0af26 | [
"Apache-2.0"
] | null | null | null | models/person.go | cy422396350/cygin | b7f4a67eddbc8292b3c21efd270c209b5ae0af26 | [
"Apache-2.0"
] | null | null | null | package models
import (
redis2 "github.com/chasex/redis-go-cluster"
"github.com/cy422396350/cygin/redis"
"log"
)
type HandlePerson interface {
GetKey(key string) (name string, err error)
}
type Person struct {
Id int
Name string
Password string
}
func (p *Person) GetKey(key string) (name string, err error) {
name, err = redis2.String(redis.RedisCy.Do("GET", key))
if err != nil {
log.Fatal(err)
}
return
}
| 16.730769 | 62 | 0.685057 |
d1d882cf21a5d2c9d2f5c9a91883eba10adaeee7 | 2,465 | kt | Kotlin | app/src/main/java/com/czech/muvies/models/PersonMovies.kt | IkemNwodo/Muvies | 7f1af79d5da40fe3cc608dd45bb70226887e1922 | [
"MIT"
] | 4 | 2020-05-18T19:06:13.000Z | 2021-06-02T20:25:24.000Z | app/src/main/java/com/czech/muvies/models/PersonMovies.kt | segunfrancis/Muvies | 82c9a919ce8f7558f69beea01676dc8e48ccd36e | [
"MIT"
] | null | null | null | app/src/main/java/com/czech/muvies/models/PersonMovies.kt | segunfrancis/Muvies | 82c9a919ce8f7558f69beea01676dc8e48ccd36e | [
"MIT"
] | 3 | 2021-05-22T07:27:06.000Z | 2021-09-23T19:41:20.000Z | package com.czech.muvies.models
import android.os.Parcelable
import com.google.gson.annotations.SerializedName
import kotlinx.android.parcel.Parcelize
import kotlinx.android.parcel.RawValue
@Parcelize
data class PersonMovies(
val cast: List<Cast?>? = null,
val crew: List<Crew?>? = null,
val id: Int? = null
): Parcelable {
@Parcelize
data class Cast(
val adult: Boolean? = null,
@SerializedName("backdrop_path")
val backdropPath: @RawValue Any? = null,
val character: String? = null,
@SerializedName("credit_id")
val creditId: String? = null,
@SerializedName("genre_ids")
val genreIds: List<Int?>? = null,
val id: Int? = null,
@SerializedName("original_language")
val originalLanguage: String? = null,
@SerializedName("original_title")
val originalTitle: String? = null,
val overview: String? = null,
val popularity: Double? = null,
@SerializedName("poster_path")
val posterPath: String? = null,
@SerializedName("release_date")
val releaseDate: String? = null,
val title: String? = null,
val video: Boolean? = null,
@SerializedName("vote_average")
val voteAverage: Double? = null,
@SerializedName("vote_count")
val voteCount: Int? = null
): Parcelable
@Parcelize
data class Crew(
val adult: Boolean? = null,
@SerializedName("backdrop_path")
val backdropPath: @RawValue Any? = null,
@SerializedName("credit_id")
val creditId: String? = null,
val department: String? = null,
@SerializedName("genre_ids")
val genreIds: List<Int?>? = null,
val id: Int? = null,
val job: String? = null,
@SerializedName("original_language")
val originalLanguage: String? = null,
@SerializedName("original_title")
val originalTitle: String? = null,
val overview: String? = null,
val popularity: Double? = null,
@SerializedName("poster_path")
val posterPath: @RawValue Any? = null,
@SerializedName("release_date")
val releaseDate: String? = null,
val title: String? = null,
val video: Boolean? = null,
@SerializedName("vote_average")
val voteAverage: Double? = null,
@SerializedName("vote_count")
val voteCount: Int? = null
): Parcelable
} | 33.310811 | 49 | 0.612982 |
dd6d8b8225f453f48e1ac18fa9388f74aed8a058 | 160 | php | PHP | src/RPC/Tunnel/Context.php | Sirius-social/sirius-sdk-php | 611322a340033f78d5a4460b5f67a68e489cf677 | [
"Apache-2.0"
] | 2 | 2020-10-13T14:06:44.000Z | 2021-11-25T13:03:17.000Z | src/RPC/Tunnel/Context.php | Sirius-social/sirius-sdk-php | 611322a340033f78d5a4460b5f67a68e489cf677 | [
"Apache-2.0"
] | 10 | 2021-09-17T15:09:19.000Z | 2021-10-04T11:28:08.000Z | src/RPC/Tunnel/Context.php | Sirius-social/sirius-sdk-php | 611322a340033f78d5a4460b5f67a68e489cf677 | [
"Apache-2.0"
] | 2 | 2021-11-26T16:25:09.000Z | 2022-03-27T12:20:45.000Z | <?php
namespace Siruis\RPC\Tunnel;
class Context
{
public $encrypted;
public function __construct()
{
$this->encrypted = false;
}
} | 10.666667 | 33 | 0.60625 |
98d6db5bf4892ee9c331f32ec11193a7eccd6b74 | 2,898 | lua | Lua | scripts/screenhelper.lua | 58115310/isaac-mod-config-menu | 612071c448e29095917a3902f73031a4a5722feb | [
"MIT"
] | 2 | 2021-12-27T23:01:49.000Z | 2022-03-20T22:55:39.000Z | scripts/screenhelper.lua | 58115310/isaac-mod-config-menu | 612071c448e29095917a3902f73031a4a5722feb | [
"MIT"
] | 5 | 2021-12-14T15:43:26.000Z | 2022-01-29T04:49:05.000Z | scripts/screenhelper.lua | 58115310/isaac-mod-config-menu | 612071c448e29095917a3902f73031a4a5722feb | [
"MIT"
] | 4 | 2021-12-14T15:39:45.000Z | 2022-01-29T04:12:11.000Z | ------------------------------------------------------------------------------
-- IMPORTANT: DO NOT EDIT THIS FILE!!! --
------------------------------------------------------------------------------
-- This file relies on other versions of itself being the same. --
-- If you need something in this file changed, please let the creator know! --
------------------------------------------------------------------------------
-- CODE STARTS BELOW --
-------------
-- version --
-------------
local fileVersion = 1
--prevent older/same version versions of this script from loading
if ScreenHelper and ScreenHelper.Version >= fileVersion then
return ScreenHelper
end
if not ScreenHelper then
ScreenHelper = {}
ScreenHelper.Version = fileVersion
elseif ScreenHelper.Version < fileVersion then
local oldVersion = ScreenHelper.Version
-- handle old versions
ScreenHelper.Version = fileVersion
end
---------------------
--hud offset helper--
---------------------
ScreenHelper.CurrentScreenOffset = ScreenHelper.CurrentScreenOffset or 0
function ScreenHelper.SetOffset(num)
num = math.min(math.max(math.floor(num),0),10)
ScreenHelper.CurrentScreenOffset = num
return num
end
function ScreenHelper.GetOffset()
return ScreenHelper.CurrentScreenOffset
end
------------------------------------
--screen size and corner functions--
------------------------------------
local vecZero = Vector(0,0)
function ScreenHelper.GetScreenSize() --based off of code from kilburn
local game = Game()
local room = game:GetRoom()
local pos = room:WorldToScreenPosition(vecZero) - room:GetRenderScrollOffset() - game.ScreenShakeOffset
local rx = pos.X + 60 * 26 / 40
local ry = pos.Y + 140 * (26 / 40)
return Vector(rx*2 + 13*26, ry*2 + 7*26)
end
function ScreenHelper.GetScreenCenter()
return ScreenHelper.GetScreenSize() / 2
end
function ScreenHelper.GetScreenBottomRight(offset)
offset = offset or ScreenHelper.GetOffset()
local pos = ScreenHelper.GetScreenSize()
local hudOffset = Vector(-offset * 2.2, -offset * 1.6)
pos = pos + hudOffset
return pos
end
function ScreenHelper.GetScreenBottomLeft(offset)
offset = offset or ScreenHelper.GetOffset()
local pos = Vector(0, ScreenHelper.GetScreenBottomRight(0).Y)
local hudOffset = Vector(offset * 2.2, -offset * 1.6)
pos = pos + hudOffset
return pos
end
function ScreenHelper.GetScreenTopRight(offset)
offset = offset or ScreenHelper.GetOffset()
local pos = Vector(ScreenHelper.GetScreenBottomRight(0).X, 0)
local hudOffset = Vector(-offset * 2.2, offset * 1.2)
pos = pos + hudOffset
return pos
end
function ScreenHelper.GetScreenTopLeft(offset)
offset = offset or ScreenHelper.GetOffset()
local pos = vecZero
local hudOffset = Vector(offset * 2, offset * 1.2)
pos = pos + hudOffset
return pos
end
return ScreenHelper
| 21.466667 | 104 | 0.632505 |
21167907bf4261e027631a865df7583b69d3fca3 | 6,622 | lua | Lua | src/wesenGemaMod/GemaScoreManager/ScoreContextProvider.lua | wesen1/wesenGemaMod | 15a4b44cce7f1e5c18ce6fea9242562f15ecce93 | [
"MIT"
] | 1 | 2020-11-05T05:45:36.000Z | 2020-11-05T05:45:36.000Z | src/wesenGemaMod/GemaScoreManager/ScoreContextProvider.lua | wesen1/wesenGemaMod | 15a4b44cce7f1e5c18ce6fea9242562f15ecce93 | [
"MIT"
] | 64 | 2018-03-24T16:31:56.000Z | 2021-12-31T19:18:40.000Z | src/wesenGemaMod/GemaScoreManager/ScoreContextProvider.lua | wesen1/wesenGemaMod | 15a4b44cce7f1e5c18ce6fea9242562f15ecce93 | [
"MIT"
] | 2 | 2021-01-05T00:05:46.000Z | 2021-06-25T04:39:08.000Z | ---
-- @author wesen
-- @copyright 2021 wesen <wesen-ac@web.de>
-- @release 0.1
-- @license MIT
--
local LuaServerApi = require "AC-LuaServer.Core.LuaServerApi"
local Object = require "classic"
---
-- Provides the score contexts for the map tops and server tops.
-- At the moment it provides a "main" context (fastest time per player) and
-- weapon contexts (fastest time per player per weapon).
--
-- @type ScoreContextProvider
--
local ScoreContextProvider = Object:extend()
-- The available score contexts
ScoreContextProvider.CONTEXT_MAIN = 1
ScoreContextProvider.CONTEXT_KNIFE = 2
ScoreContextProvider.CONTEXT_PISTOL = 3
ScoreContextProvider.CONTEXT_ASSAULT_RIFLE = 4
ScoreContextProvider.CONTEXT_SUBMACHINE_GUN = 5
ScoreContextProvider.CONTEXT_SNIPER_RIFLE = 6
ScoreContextProvider.CONTEXT_SHOTGUN = 7
ScoreContextProvider.CONTEXT_CARBINE = 8
---
-- The mapping of weapon ID's to score contexts
--
-- @tfield int[] WEAPON_SCORE_CONTEXTS
--
ScoreContextProvider.WEAPON_SCORE_CONTEXTS = {
[LuaServerApi.GUN_KNIFE] = ScoreContextProvider.CONTEXT_KNIFE,
[LuaServerApi.GUN_PISTOL] = ScoreContextProvider.CONTEXT_PISTOL,
[LuaServerApi.GUN_ASSAULT] = ScoreContextProvider.CONTEXT_ASSAULT_RIFLE,
[LuaServerApi.GUN_SUBGUN] = ScoreContextProvider.CONTEXT_SUBMACHINE_GUN,
[LuaServerApi.GUN_SNIPER] = ScoreContextProvider.CONTEXT_SNIPER_RIFLE,
[LuaServerApi.GUN_SHOTGUN] = ScoreContextProvider.CONTEXT_SHOTGUN,
[LuaServerApi.GUN_CARBINE] = ScoreContextProvider.CONTEXT_CARBINE
}
---
-- The list of score context aliases that can be used in commands or config files
-- to reference specific score contexts
-- This list is in the format { <alias> = <context>, ... }
--
-- @tfield int[] scoreContextAliases
--
ScoreContextProvider.scoreContextAliases = nil
---
-- The list of preferred score context aliases that should be shown to players
-- This list is in the format { <context> = <alias>, ... }
--
-- @tfield string[] preferredScoreContextAliases
--
ScoreContextProvider.preferredScoreContextAliases = nil
---
-- ScoreContextProvider constructor.
--
function ScoreContextProvider:new()
self.scoreContextAliases = {}
self.preferredScoreContextAliases = {}
end
-- Public Methods
---
-- Initializes this ScoreContextProvider.
--
function ScoreContextProvider:initialize()
self:initializeScoreContextAliases()
end
---
-- Returns the preferred score context alias for a given score context.
--
-- @tparam int _context The context whose preferred alias to return
--
-- @treturn string|nil The preferred score context alias for the given context
--
function ScoreContextProvider:getPreferredAliasForScoreContext(_context)
return self.preferredScoreContextAliases[_context]
end
---
-- Adds a score context alias.
--
-- @tparam int _context The score context to add an alias for
-- @tparam string _alias The alias to add for the score context
-- @tparam bool _isPreferredAlias True if the given alias should be the preferred alias for the context, false otherwise
--
function ScoreContextProvider:addScoreContextAlias(_context, _alias, _isPreferredAlias)
self.scoreContextAliases[_alias] = _context
if (_isPreferredAlias or not self.preferredScoreContextAliases[_context]) then
self.preferredScoreContextAliases[_context] = _alias
end
end
---
-- Returns a score context for a given alias.
--
-- @tparam string _alias The alias whose corresponding score context to return
--
-- @treturn int|nil The score context for the given alias
--
function ScoreContextProvider:getScoreContextByAlias(_alias)
return self.scoreContextAliases[_alias]
end
---
-- Returns the score contexts for a given list of aliases.
--
-- @tparam string[] _aliases The aliases whose corresponding score contexts to return
--
-- @treturn int[] The score contexts for the given aliases
--
function ScoreContextProvider:getScoreContextsByAliases(_aliases)
local scoreContexts = {}
for _, alias in ipairs(_aliases) do
table.insert(scoreContexts, self:getScoreContextByAlias(alias))
end
return scoreContexts
end
---
-- Returns whether a given score context is a weapon context.
--
-- @tparam int _context The context to check
--
-- @treturn bool True if the given context is a weapon context, false otherwise
--
function ScoreContextProvider:isWeaponScoreContext(_context)
return (self:scoreContextToWeaponId(_context) ~= nil)
end
---
-- Converts a given score context to a weapon ID.
--
-- @tparam int _context The score context whose corresponding weapon ID to return
--
-- @treturn int|nil The weapon ID for the score context
--
function ScoreContextProvider:scoreContextToWeaponId(_context)
for weaponId, scoreContext in pairs(ScoreContextProvider.WEAPON_SCORE_CONTEXTS) do
if (scoreContext == _context) then
return weaponId
end
end
end
---
-- Converts a given weapon ID to a score context.
--
-- @tparam int _weaponId The weapon ID whose corresponding score context to return
--
-- @treturn int|nil The score context for the weapon ID
--
function ScoreContextProvider:weaponIdToScoreContext(_weaponId)
return ScoreContextProvider.WEAPON_SCORE_CONTEXTS[_weaponId]
end
-- Private Methods
---
-- Initializes the default score context aliases.
--
function ScoreContextProvider:initializeScoreContextAliases()
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_MAIN, "main", true)
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_KNIFE, "knife", true)
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_KNIFE, "knife-only")
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_PISTOL, "pistol", true)
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_PISTOL, "pistol-only")
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_ASSAULT_RIFLE, "assault-rifle", true)
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_ASSAULT_RIFLE, "assault")
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_ASSAULT_RIFLE, "ar")
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_SUBMACHINE_GUN, "submachine-gun", true)
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_SUBMACHINE_GUN, "submachine")
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_SUBMACHINE_GUN, "smg")
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_SNIPER_RIFLE, "sniper-rifle", true)
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_SNIPER_RIFLE, "sniper")
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_SHOTGUN, "shotgun", true)
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_SHOTGUN, "sg")
self:addScoreContextAlias(ScoreContextProvider.CONTEXT_CARBINE, "carbine", true)
end
return ScoreContextProvider
| 31.990338 | 120 | 0.792812 |
57609f7a52e38824da2f35c302daf4d5af9f4d97 | 1,972 | h | C | BlocksEngine/include/BlocksEngine/Core/Dispatch/BaseDispatchQueue.h | jorgeparavicini/Blocks | dd1654d5643ca3707d39ae6ef21667e9b72130ef | [
"Apache-2.0"
] | null | null | null | BlocksEngine/include/BlocksEngine/Core/Dispatch/BaseDispatchQueue.h | jorgeparavicini/Blocks | dd1654d5643ca3707d39ae6ef21667e9b72130ef | [
"Apache-2.0"
] | 3 | 2022-01-02T10:10:49.000Z | 2022-01-02T10:25:53.000Z | BlocksEngine/include/BlocksEngine/Core/Dispatch/BaseDispatchQueue.h | jorgeparavicini/Blocks | dd1654d5643ca3707d39ae6ef21667e9b72130ef | [
"Apache-2.0"
] | null | null | null | //
// Copyright (c) 2021, Severin Goddon & Jorge Paravicini
// All rights reserved.
//
// This source code is licensed under the MIT-style license found in LICENSE file in the root directory of this source tree.
//
// Author: Jorge Paravicini
// File: BaseDispatchQueue.h
#pragma once
#include <mutex>
#include <queue>
#include "BlocksEngine/Core/Dispatch/DispatchWorkItem.h"
namespace BlocksEngine
{
class BaseDispatchQueue;
}
class BlocksEngine::BaseDispatchQueue
{
public:
//------------------------------------------------------------------------------
// Constructors
//------------------------------------------------------------------------------
BaseDispatchQueue() = default;
//------------------------------------------------------------------------------
// Copy Constructor & Assignment
//------------------------------------------------------------------------------
BaseDispatchQueue(const BaseDispatchQueue&) = delete;
BaseDispatchQueue& operator=(const BaseDispatchQueue&) = delete;
//------------------------------------------------------------------------------
// Move Constructor & Assignment
//------------------------------------------------------------------------------
BaseDispatchQueue(const BaseDispatchQueue&&) = delete;
BaseDispatchQueue& operator=(const BaseDispatchQueue&&) = delete;
//------------------------------------------------------------------------------
// Destructors
//------------------------------------------------------------------------------
virtual ~BaseDispatchQueue() = default;
//------------------------------------------------------------------------------
// Methods
//------------------------------------------------------------------------------
virtual void Async(std::shared_ptr<DispatchObject> workItem);
protected:
std::mutex lock_;
std::queue<std::shared_ptr<DispatchObject>> queue_{};
};
| 32.327869 | 124 | 0.411258 |
41316c011f5ad5a0460fdb2073507d9f0dd2c28d | 1,401 | h | C | stheno/src/org/cracs/stheno/common/UID.h | rolandomar/stheno | 6b41f56f25be1e7d56c8be4973203bf943e4f041 | [
"Apache-2.0"
] | 7 | 2015-08-17T16:24:22.000Z | 2022-03-16T15:54:19.000Z | stheno/src/org/cracs/stheno/common/UID.h | rolandomar/stheno | 6b41f56f25be1e7d56c8be4973203bf943e4f041 | [
"Apache-2.0"
] | null | null | null | stheno/src/org/cracs/stheno/common/UID.h | rolandomar/stheno | 6b41f56f25be1e7d56c8be4973203bf943e4f041 | [
"Apache-2.0"
] | null | null | null | /*
* Copyright 2012 Rolando Martins, CRACS & INESC-TEC, DCC/FCUP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/*
* File: UID.h
* Author: Rolando Martins (rolando.martins@gmail.com)
*
* Created on May 13, 2010, 4:42 PM
*/
#ifndef UID_H
#define UID_H
/**
* Universal identifier
*/
#include <euryale/common/uuid/UUID.h>
#include <euryale/serialization/Serializable.h>
class UID: public Serializable {
public:
UID(UUIDPtr& uuid, UUIDPtr& fid, UUIDPtr& sid);
UID(const UID& orig);
virtual ~UID();
UUIDPtr& getPID() ;
UUIDPtr& getFID() ;
UUIDPtr& getSID();
virtual void serialize(OutputStream& outputStream) throw (SerializationException&);
virtual void deserialize(InputStream& inputStream) throw (SerializationException&);
protected:
UUIDPtr m_uuid;
UUIDPtr m_fid;
UUIDPtr m_sid;
};
#endif /* UID_H */
| 25.472727 | 87 | 0.695218 |
24c8638f678402a484cfdc1ee90ae2b67ff78427 | 3,832 | go | Go | internal/cli/atlas_clusters_update.go | p-mongo/mongocli | 7c0010167ae649910b0da2581aab031dc862b661 | [
"Apache-2.0"
] | null | null | null | internal/cli/atlas_clusters_update.go | p-mongo/mongocli | 7c0010167ae649910b0da2581aab031dc862b661 | [
"Apache-2.0"
] | null | null | null | internal/cli/atlas_clusters_update.go | p-mongo/mongocli | 7c0010167ae649910b0da2581aab031dc862b661 | [
"Apache-2.0"
] | null | null | null | // Copyright 2020 MongoDB Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cli
import (
atlas "github.com/mongodb/go-client-mongodb-atlas/mongodbatlas"
"github.com/mongodb/mongocli/internal/description"
"github.com/mongodb/mongocli/internal/file"
"github.com/mongodb/mongocli/internal/flags"
"github.com/mongodb/mongocli/internal/json"
"github.com/mongodb/mongocli/internal/store"
"github.com/mongodb/mongocli/internal/usage"
"github.com/spf13/afero"
"github.com/spf13/cobra"
)
type atlasClustersUpdateOpts struct {
globalOpts
name string
instanceSize string
diskSizeGB float64
mdbVersion string
filename string
fs afero.Fs
store store.ClusterStore
}
func (opts *atlasClustersUpdateOpts) initStore() error {
var err error
opts.store, err = store.New()
return err
}
func (opts *atlasClustersUpdateOpts) Run() error {
cluster, err := opts.cluster()
if err != nil {
return err
}
if opts.filename == "" {
opts.patchOpts(cluster)
} else {
cluster.GroupID = opts.ProjectID()
}
result, err := opts.store.UpdateCluster(cluster)
if err != nil {
return err
}
return json.PrettyPrint(result)
}
func (opts *atlasClustersUpdateOpts) cluster() (*atlas.Cluster, error) {
var cluster *atlas.Cluster
var err error
if opts.filename != "" {
cluster = new(atlas.Cluster)
err = file.Load(opts.fs, opts.filename, cluster)
if opts.name != "" {
cluster.Name = opts.name
}
} else {
cluster, err = opts.store.Cluster(opts.projectID, opts.name)
}
return cluster, err
}
func (opts *atlasClustersUpdateOpts) patchOpts(out *atlas.Cluster) {
// There can only be one
if out.ReplicationSpecs != nil {
out.ReplicationSpec = nil
}
// This can't be sent
out.MongoURI = ""
out.MongoURIWithOptions = ""
out.MongoURIUpdated = ""
out.StateName = ""
out.MongoDBVersion = ""
if opts.mdbVersion != "" {
out.MongoDBMajorVersion = opts.mdbVersion
}
if opts.diskSizeGB > 0 {
out.DiskSizeGB = &opts.diskSizeGB
}
if opts.instanceSize != "" {
out.ProviderSettings.InstanceSizeName = opts.instanceSize
}
}
// mongocli atlas cluster(s) update name --projectId projectId [--instanceSize M#] [--diskSizeGB N] [--mdbVersion]
func AtlasClustersUpdateBuilder() *cobra.Command {
opts := &atlasClustersUpdateOpts{
fs: afero.NewOsFs(),
}
cmd := &cobra.Command{
Use: "update [name]",
Short: description.UpdateCluster,
Example: ` mongocli atlas cluster update myCluster --projectId=1 --instanceSize M2 --mdbVersion 4.2 --diskSizeGB 2`,
Args: cobra.MaximumNArgs(1),
PreRunE: func(cmd *cobra.Command, args []string) error {
if opts.filename == "" && len(args) == 0 {
return errMissingClusterName
}
if len(args) != 0 {
opts.name = args[0]
}
return opts.PreRunE(opts.initStore)
},
RunE: func(cmd *cobra.Command, args []string) error {
return opts.Run()
},
}
cmd.Flags().StringVar(&opts.instanceSize, flags.InstanceSize, "", usage.InstanceSize)
cmd.Flags().Float64Var(&opts.diskSizeGB, flags.DiskSizeGB, 0, usage.DiskSizeGB)
cmd.Flags().StringVar(&opts.mdbVersion, flags.MDBVersion, "", usage.MDBVersion)
cmd.Flags().StringVarP(&opts.filename, flags.File, flags.FileShort, "", usage.Filename)
cmd.Flags().StringVar(&opts.projectID, flags.ProjectID, "", usage.ProjectID)
return cmd
}
| 27.970803 | 119 | 0.708246 |
c17859710fb4b83d390a60cce1c8cc1f320726af | 655 | kt | Kotlin | core/src/main/java/gr/blackswamp/core/data/Response.kt | JMavrelos/RatingsDiary | 9bf2635582fbb6c5ce82860241cf5f206f995729 | [
"Apache-2.0"
] | null | null | null | core/src/main/java/gr/blackswamp/core/data/Response.kt | JMavrelos/RatingsDiary | 9bf2635582fbb6c5ce82860241cf5f206f995729 | [
"Apache-2.0"
] | null | null | null | core/src/main/java/gr/blackswamp/core/data/Response.kt | JMavrelos/RatingsDiary | 9bf2635582fbb6c5ce82860241cf5f206f995729 | [
"Apache-2.0"
] | null | null | null | package gr.blackswamp.core.data
class Response<T : Any?>(private val value: Any?) {
companion object {
fun <T> success(value: T): Response<T> = Response(value)
fun <T> failure(exception: Throwable): Response<T> = Response(Failure(exception))
fun <T> failure(message: String, cause: Throwable? = null): Response<T> = Response(Failure(Throwable(message, cause)))
}
val hasError: Boolean get() = value is Failure
@Suppress("UNCHECKED_CAST")
val get: T
get() = value as T
val error: Throwable
get() = (value as Failure).exception
internal data class Failure(val exception: Throwable)
} | 31.190476 | 126 | 0.653435 |
7f914907f07730305b1155ab6af25e9b2bd81143 | 46 | go | Go | model/public_endpoint.go | prachidamle/v2-api | 097a723c797f1bbb096576b74cbea04f2f90abd2 | [
"Apache-2.0"
] | 1 | 2016-03-06T15:55:50.000Z | 2016-03-06T15:55:50.000Z | model/public_endpoint.go | prachidamle/v2-api | 097a723c797f1bbb096576b74cbea04f2f90abd2 | [
"Apache-2.0"
] | 5 | 2016-02-24T19:16:07.000Z | 2020-07-31T16:03:29.000Z | model/public_endpoint.go | prachidamle/v2-api | 097a723c797f1bbb096576b74cbea04f2f90abd2 | [
"Apache-2.0"
] | 4 | 2016-02-23T17:49:05.000Z | 2021-01-24T11:55:58.000Z | package model
type PublicEndpoint struct {
}
| 9.2 | 28 | 0.782609 |
0a43ca243356a7ea274695b1faa5559ac7dcd5c1 | 263 | ts | TypeScript | src/dto/country-dto.ts | dannystyleart/country-data | f426f73ce2ff25f131a925f9642abd8de76e6f7b | [
"MIT"
] | null | null | null | src/dto/country-dto.ts | dannystyleart/country-data | f426f73ce2ff25f131a925f9642abd8de76e6f7b | [
"MIT"
] | 38 | 2022-01-02T23:53:40.000Z | 2022-03-28T14:23:00.000Z | src/dto/country-dto.ts | dannystyleart/country-data | f426f73ce2ff25f131a925f9642abd8de76e6f7b | [
"MIT"
] | null | null | null | import { ApiResponseProperty } from '@nestjs/swagger';
export class CountryDto {
@ApiResponseProperty({ type: 'string' })
name: string;
@ApiResponseProperty({ type: 'string' })
iso2: string;
@ApiResponseProperty({ type: 'string' })
iso3: string;
}
| 20.230769 | 54 | 0.684411 |
04fe67c3a90f4a52aa7d7059b769c0472cbd1196 | 145 | sql | SQL | sql-install/mssql/create_schemas.sql | OsmiumKZ/DormServer | 89fbec2d5ba7f8d7155cad001e3c91fe2acef3be | [
"MIT"
] | 1 | 2020-02-22T16:07:13.000Z | 2020-02-22T16:07:13.000Z | sql-install/mssql/create_schemas.sql | OsmiumKZ/DormServer | 89fbec2d5ba7f8d7155cad001e3c91fe2acef3be | [
"MIT"
] | null | null | null | sql-install/mssql/create_schemas.sql | OsmiumKZ/DormServer | 89fbec2d5ba7f8d7155cad001e3c91fe2acef3be | [
"MIT"
] | null | null | null | -- --------------------------------------------------------
--
-- Создание БД Dorm
--
CREATE DATABASE [dorm]
COLLATE Cyrillic_General_CI_AS
GO
| 16.111111 | 60 | 0.434483 |
f070bbe66c4decee4e36b1b70a73e837c4da647e | 8,179 | kt | Kotlin | app/src/main/java/admin/mealbuffet/com/mealnbuffetadmin/nav/buffet/EditBuffetFragment.kt | Laxmanbalu/MealNBuffetAdmin | 48d307aaf717b8dc92d033991be4bb7bd165fdec | [
"Apache-2.0"
] | null | null | null | app/src/main/java/admin/mealbuffet/com/mealnbuffetadmin/nav/buffet/EditBuffetFragment.kt | Laxmanbalu/MealNBuffetAdmin | 48d307aaf717b8dc92d033991be4bb7bd165fdec | [
"Apache-2.0"
] | null | null | null | app/src/main/java/admin/mealbuffet/com/mealnbuffetadmin/nav/buffet/EditBuffetFragment.kt | Laxmanbalu/MealNBuffetAdmin | 48d307aaf717b8dc92d033991be4bb7bd165fdec | [
"Apache-2.0"
] | null | null | null | package admin.mealbuffet.com.mealnbuffetadmin.nav.buffet
import admin.mealbuffet.com.mealnbuffetadmin.R
import admin.mealbuffet.com.mealnbuffetadmin.custom.InfoDialog
import admin.mealbuffet.com.mealnbuffetadmin.model.BuffetBasicData
import admin.mealbuffet.com.mealnbuffetadmin.model.BuffetItem
import admin.mealbuffet.com.mealnbuffetadmin.model.EditBuffetData
import admin.mealbuffet.com.mealnbuffetadmin.util.PreferencesHelper
import android.app.TimePickerDialog
import android.graphics.Color
import android.graphics.drawable.ColorDrawable
import android.os.Bundle
import android.text.InputType
import android.view.View
import android.widget.EditText
import com.mealbuffet.controller.BaseFragment
import kotlinx.android.synthetic.main.fragment_addbuffet.*
class EditBuffetFragment : BaseFragment() {
private var startTimeHour = 0
private var startTimeMin = 0
private var endTimeHour = 0
private var endTimeMin = 0
private lateinit var selectedBuffetItem: BuffetItem
override fun layoutResource(): Int = R.layout.fragment_addbuffet
override fun onViewCreated(view: View, savedInstanceState: Bundle?) {
super.onViewCreated(view, savedInstanceState)
fillBuffetData()
et_buffetstartTime.inputType = InputType.TYPE_NULL
et_buffetstartTime.setOnClickListener { showStartTimePickerDialog() }
et_buffetendTime.inputType = InputType.TYPE_NULL
et_buffetendTime.setOnClickListener { showEndTimePickerDialog() }
et_cutoff_time.inputType = InputType.TYPE_NULL
et_cutoff_time.setOnClickListener {
showCutOffTimePickerDialog()
}
buffet_movenext.setOnClickListener {
val data = makeBuffetBasicObject()
if (data != null) {
wrapActionListener().onAction(EDIT_BUFFET_MOVE_NEXT, makeBuffetBasicObject())
}
}
}
private fun fillBuffetData() {
et_buffetname.setText(selectedBuffetItem.buffetName)
et_buffetdisplayname.setText(selectedBuffetItem.displayName)
et_buffetstartTime.setText(selectedBuffetItem.startTime)
et_buffetendTime.setText(selectedBuffetItem.endTime)
et_buffetadult_price.setText(selectedBuffetItem.adultPrice.toString())
et_buffetkids_price.setText(selectedBuffetItem.kidsPrice.toString())
et_buffetdesc.setText(selectedBuffetItem.typeDesc)
et_cutoff_time.setText(selectedBuffetItem.orderCutOffTime)
spin_buffettype.setSelection(getFoodType())
}
private fun getFoodType(): Int {
return when (selectedBuffetItem.type.toUpperCase()) {
requireContext().getString(R.string.breakfast).toUpperCase() -> 1
requireContext().getString(R.string.lunch).toUpperCase() -> 2
requireContext().getString(R.string.dinner).toUpperCase() -> 3
requireContext().getString(R.string.allday).toUpperCase() -> 4
else -> 0
}
}
private fun makeBuffetBasicObject(): EditBuffetData? {
if (showDataError(et_buffetname) || showDataError((et_buffetdisplayname)) || showDataError(et_buffetdesc) || showDataError((et_buffetstartTime)) ||
showDataError((et_buffetstartTime)) || showDataError((et_buffetendTime)) || showDataError((et_buffetadult_price)) || showDataError((et_buffetkids_price))) {
return null
}
if (spin_buffettype.selectedItem.toString() == getString(R.string.default_selection_msg)) {
showFoodSelectionError("Select Buffet Type")
return null
}
val restaurantId = PreferencesHelper.getRestaurantId(requireContext())
val buffetBasicData = BuffetBasicData(buffetName = et_buffetname.text.toString(), desc = et_buffetdesc.text.toString(), type = spin_buffettype.selectedItem.toString(),
startTime = et_buffetstartTime.text.toString(), endTime = et_buffetendTime.text.toString(), adultPrice = et_buffetadult_price.text.toString().toDouble(),
kidsPrice = et_buffetkids_price.text.toString().toDouble(), displayName = et_buffetdisplayname.text.toString(), restaurantId = restaurantId,
buffetCutOffTime = et_cutoff_time.text.toString())
return EditBuffetData(buffetBasicData = buffetBasicData, buffetItem = selectedBuffetItem)
}
private fun showFoodSelectionError(message: String) {
val dialog = InfoDialog.newInstance(message)
dialog.show(activity?.supportFragmentManager, message)
}
private fun showDataError(etData: EditText): Boolean {
return if (etData.text.isEmpty()) {
etData.error = getString(R.string.empty_field)
true
} else {
etData.error = null
false
}
}
private fun showCutOffTimePickerDialog() {
val timePickerDialog = TimePickerDialog(requireContext(), android.R.style.Theme_Holo_Light_Dialog,
TimePickerDialog.OnTimeSetListener { _, hourOfDay, minute ->
if (hourOfDay in startTimeHour..endTimeHour) {
et_cutoff_time.setText(String.format("%02d:%02d", hourOfDay, minute))
} else {
showTimeErrorDialog("Order CutOff Time Should be Between Start and End Time")
}
}, 0, 0, false)
timePickerDialog.window.setBackgroundDrawable(ColorDrawable(Color.TRANSPARENT))
timePickerDialog.show()
}
private fun showStartTimePickerDialog() {
val timePickerDialog = TimePickerDialog(requireContext(), android.R.style.Theme_Holo_Light_Dialog,
TimePickerDialog.OnTimeSetListener { _, hourOfDay, minute ->
startTimeHour = hourOfDay
startTimeMin = minute
if (endTimeHour != 0 || endTimeMin != 0) {
if (startTimeHour > endTimeHour || (startTimeHour == endTimeHour && startTimeMin > endTimeMin)) {
showTimeErrorDialog("Start Time Should be Before End Time.")
return@OnTimeSetListener
} else if (startTimeHour == endTimeHour && startTimeMin == endTimeMin) {
showTimeErrorDialog("StartTime & EndTime are Same")
return@OnTimeSetListener
}
}
et_buffetstartTime.setText(String.format("%02d:%02d", hourOfDay, minute))
}, 0, 0, false)
timePickerDialog.window.setBackgroundDrawable(ColorDrawable(Color.TRANSPARENT))
timePickerDialog.show()
}
private fun showEndTimePickerDialog() {
val timePickerDialog = TimePickerDialog(requireContext(), android.R.style.Theme_Holo_Light_Dialog,
TimePickerDialog.OnTimeSetListener { _, hourOfDay, minute ->
endTimeHour = hourOfDay
endTimeMin = minute
if (hourOfDay < startTimeHour || (hourOfDay == startTimeHour && minute < startTimeMin)) {
showTimeErrorDialog("End Time Should be After Start Time.")
return@OnTimeSetListener
} else if (startTimeHour == endTimeHour && startTimeMin == endTimeMin) {
showTimeErrorDialog("StartTime & EndTime are Same")
return@OnTimeSetListener
}
et_buffetendTime.setText(String.format("%02d:%02d", hourOfDay, minute))
et_cutoff_time.setText(String.format("%02d:%02d", hourOfDay, minute))
}, 0, 0, false)
timePickerDialog.window.setBackgroundDrawable(ColorDrawable(Color.TRANSPARENT))
timePickerDialog.show()
}
private fun showTimeErrorDialog(msg: String) {
val dialog = InfoDialog.newInstance(msg)
dialog.show(activity?.supportFragmentManager, msg)
}
fun setSelectedBuffetData(argBuffetBasicData: BuffetItem) {
selectedBuffetItem = argBuffetBasicData
}
companion object {
const val EDIT_BUFFET_MOVE_NEXT: String = "EditBuffetMoveNext"
}
} | 47.277457 | 175 | 0.667563 |
0cb88d9738f070179ad3791e8725e49dddde3cbd | 45 | py | Python | Weltantschauung/__init__.py | area42/Weltanschauung- | 85694740f149aa741f69a67bf234b447ba11fb22 | [
"MIT"
] | null | null | null | Weltantschauung/__init__.py | area42/Weltanschauung- | 85694740f149aa741f69a67bf234b447ba11fb22 | [
"MIT"
] | null | null | null | Weltantschauung/__init__.py | area42/Weltanschauung- | 85694740f149aa741f69a67bf234b447ba11fb22 | [
"MIT"
] | null | null | null | from .Weltantschauung import Weltantschauung
| 22.5 | 44 | 0.888889 |
9f50a3c009089b3216e6367c3e92a38ce35c1802 | 1,295 | sql | SQL | mockup-interface/sophos.sql | naumazeredo/cp-platform-proto | 3612f6b6e41835742f2ee791a09bf624960379c9 | [
"MIT"
] | null | null | null | mockup-interface/sophos.sql | naumazeredo/cp-platform-proto | 3612f6b6e41835742f2ee791a09bf624960379c9 | [
"MIT"
] | null | null | null | mockup-interface/sophos.sql | naumazeredo/cp-platform-proto | 3612f6b6e41835742f2ee791a09bf624960379c9 | [
"MIT"
] | null | null | null | -- phpMyAdmin SQL Dump
-- version 4.1.14
-- http://www.phpmyadmin.net
--
-- Host: 127.0.0.1
-- Generation Time: 02-Maio-2016 às 17:58
-- Versão do servidor: 5.6.17
-- PHP Version: 5.5.12
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
--
-- Database: `sophos`
--
-- --------------------------------------------------------
--
-- Estrutura da tabela `artigo`
--
CREATE TABLE IF NOT EXISTS `artigo` (
`id` int(11) NOT NULL,
`name` varchar(64) NOT NULL,
`id_categoria` int(11) NOT NULL,
`name_categoria` varchar(64) NOT NULL,
`conteudo` varchar(8192) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-- --------------------------------------------------------
--
-- Estrutura da tabela `categoria`
--
CREATE TABLE IF NOT EXISTS `categoria` (
`id` int(11) NOT NULL,
`name` varchar(64) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
| 25.392157 | 67 | 0.646332 |
2a475442ec3f71b5be581fbb8c5a3790342433be | 1,026 | java | Java | MerlionFlightReservationSystem-ejb/src/java/ejb/session/stateless/UserSessionBean.java | BikJeun/MerlionFlightReservationSystem | 95364c5fab027938d3f4f75f6d62d32167c39986 | [
"MIT"
] | null | null | null | MerlionFlightReservationSystem-ejb/src/java/ejb/session/stateless/UserSessionBean.java | BikJeun/MerlionFlightReservationSystem | 95364c5fab027938d3f4f75f6d62d32167c39986 | [
"MIT"
] | null | null | null | MerlionFlightReservationSystem-ejb/src/java/ejb/session/stateless/UserSessionBean.java | BikJeun/MerlionFlightReservationSystem | 95364c5fab027938d3f4f75f6d62d32167c39986 | [
"MIT"
] | 1 | 2020-11-21T17:22:31.000Z | 2020-11-21T17:22:31.000Z | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ejb.session.stateless;
import entity.UserEntity;
import exceptions.UserNotFoundException;
import javax.ejb.Stateless;
import javax.persistence.EntityManager;
import javax.persistence.PersistenceContext;
/**
*
* @author Ong Bik Jeun
*/
@Stateless
public class UserSessionBean implements UserSessionBeanRemote, UserSessionBeanLocal {
@PersistenceContext(unitName = "MerlionFlightReservationSystem-ejbPU")
private EntityManager em;
public UserSessionBean() {
}
@Override
public UserEntity retrieveUserById(Long userID) throws UserNotFoundException {
UserEntity user = em.find(UserEntity.class, userID);
if(user != null) {
return user;
} else {
throw new UserNotFoundException("User with " + userID + " does not exist!");
}
}
}
| 24.428571 | 88 | 0.704678 |
4874ced9a998440880d95e1b20db5581515786c1 | 3,227 | swift | Swift | Sources/KeyboardReactable/KeyboardReactable.swift | mobilee/fastlee | 77f73a1123ced814cdb27ca3bfdd04878e75428e | [
"MIT"
] | null | null | null | Sources/KeyboardReactable/KeyboardReactable.swift | mobilee/fastlee | 77f73a1123ced814cdb27ca3bfdd04878e75428e | [
"MIT"
] | null | null | null | Sources/KeyboardReactable/KeyboardReactable.swift | mobilee/fastlee | 77f73a1123ced814cdb27ca3bfdd04878e75428e | [
"MIT"
] | null | null | null | //
// KeyboardReactable.swift
// Fastlee
//
// Created by Lukasz Szarkowicz on 11/02/2022.
// Copyright © 2022 Mobilee. All rights reserved.
//
import UIKit
public protocol KeyboardReactable {
var scrollView: UIScrollView { get }
}
public extension KeyboardReactable where Self: UIView {
/**
Default implementation with animation when keyboard show
Example of usage:
```
NotificationCenter.default.publisher(for: UIApplication.keyboardWillShowNotification)
.receive(on: RunLoop.main)
.sink { notification in
self.handleKeyboardWillShow(notification)
}
.store(in: &bag)
```
- parameter notification: Pass received keyboard notification.
- Author: Mobilee - Łukasz Szarkowicz
*/
func handleKeyboardWillShow(_ notification: Notification) {
let safeOffset = 32.0
guard let info = notification.userInfo as? [String: AnyObject] else { return }
guard let keyboardSize = (info[UIResponder.keyboardFrameEndUserInfoKey] as? CGRect)?.size, keyboardSize.height > 0 else { return }
// Create content insets by substracting keyboard height
var contentInsets = scrollView.contentInset
contentInsets.bottom = keyboardSize.height
var contentOffset = scrollView.contentOffset
// calculate scroll position
if let firstResponsder = findFirstResponder() as? UITextField {
let screenHeight = frame.size.height
let keyboardTopPoint = screenHeight - keyboardSize.height
let offset = keyboardTopPoint / 4.0 + safeOffset
let posY = scrollView.convert(firstResponsder.frame.origin, from: firstResponsder).y
contentOffset.y = (posY - offset).clamped(to: 0...100000)
}
DispatchQueue.main.async {
UIView.animate(withDuration: 0.3, delay: 0, options: .curveEaseInOut, animations: {
self.scrollView.contentOffset = contentOffset
self.scrollView.contentInset = contentInsets
self.scrollView.scrollIndicatorInsets = contentInsets
self.layoutIfNeeded()
}, completion: nil)
}
}
/**
Default implementation with animation when keyboard hide
Example of usage:
```
NotificationCenter.default.publisher(for: UIApplication.keyboardWillHideNotification)
.receive(on: RunLoop.main)
.sink { notification in
self.handleKeyboardWillHide(notification)
}
.store(in: &bag)
```
- parameter notification: Pass received keyboard notification.
- Author: Mobilee - Łukasz Szarkowicz
*/
func handleKeyboardWillHide(_ notification: Notification) {
DispatchQueue.main.async {
UIView.animate(withDuration: 0.35, delay: 0, options: .curveEaseInOut, animations: {
self.scrollView.contentInset = UIEdgeInsets.zero
self.scrollView.scrollIndicatorInsets = UIEdgeInsets.zero
self.layoutIfNeeded()
}, completion: nil)
}
}
}
| 33.968421 | 138 | 0.630307 |
dd955d6af2c80951df0d217b92ca889a3f0da04a | 2,596 | go | Go | examples/autocert/autocert.go | cpu/acme-2 | bab39d3b6fce2e6bbc29f272d6ca97cfb708fdd3 | [
"MIT"
] | null | null | null | examples/autocert/autocert.go | cpu/acme-2 | bab39d3b6fce2e6bbc29f272d6ca97cfb708fdd3 | [
"MIT"
] | null | null | null | examples/autocert/autocert.go | cpu/acme-2 | bab39d3b6fce2e6bbc29f272d6ca97cfb708fdd3 | [
"MIT"
] | null | null | null | package main
// An example which uses autocert to issue certificates for connecting hosts
// Uses the Let's Encrypt staging environment and fake root certificate
//
// Can be tested like the following,
// - go run autocert.go
// - ngrok http 80
// - openssl s_client -connect localhost:443 -servername [NGROK FORWARDING HOSTNAME]
import (
"net/http"
"crypto/tls"
"log"
"github.com/eggsampler/acme"
"github.com/eggsampler/acme/autocert"
)
func main() {
autoCert := autocert.AutoCert{
DirectoryURL: acme.LetsEncryptStaging,
RootCert: `-----BEGIN CERTIFICATE-----
MIIEqzCCApOgAwIBAgIRAIvhKg5ZRO08VGQx8JdhT+UwDQYJKoZIhvcNAQELBQAw
GjEYMBYGA1UEAwwPRmFrZSBMRSBSb290IFgxMB4XDTE2MDUyMzIyMDc1OVoXDTM2
MDUyMzIyMDc1OVowIjEgMB4GA1UEAwwXRmFrZSBMRSBJbnRlcm1lZGlhdGUgWDEw
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDtWKySDn7rWZc5ggjz3ZB0
8jO4xti3uzINfD5sQ7Lj7hzetUT+wQob+iXSZkhnvx+IvdbXF5/yt8aWPpUKnPym
oLxsYiI5gQBLxNDzIec0OIaflWqAr29m7J8+NNtApEN8nZFnf3bhehZW7AxmS1m0
ZnSsdHw0Fw+bgixPg2MQ9k9oefFeqa+7Kqdlz5bbrUYV2volxhDFtnI4Mh8BiWCN
xDH1Hizq+GKCcHsinDZWurCqder/afJBnQs+SBSL6MVApHt+d35zjBD92fO2Je56
dhMfzCgOKXeJ340WhW3TjD1zqLZXeaCyUNRnfOmWZV8nEhtHOFbUCU7r/KkjMZO9
AgMBAAGjgeMwgeAwDgYDVR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAw
HQYDVR0OBBYEFMDMA0a5WCDMXHJw8+EuyyCm9Wg6MHoGCCsGAQUFBwEBBG4wbDA0
BggrBgEFBQcwAYYoaHR0cDovL29jc3Auc3RnLXJvb3QteDEubGV0c2VuY3J5cHQu
b3JnLzA0BggrBgEFBQcwAoYoaHR0cDovL2NlcnQuc3RnLXJvb3QteDEubGV0c2Vu
Y3J5cHQub3JnLzAfBgNVHSMEGDAWgBTBJnSkikSg5vogKNhcI5pFiBh54DANBgkq
hkiG9w0BAQsFAAOCAgEABYSu4Il+fI0MYU42OTmEj+1HqQ5DvyAeyCA6sGuZdwjF
UGeVOv3NnLyfofuUOjEbY5irFCDtnv+0ckukUZN9lz4Q2YjWGUpW4TTu3ieTsaC9
AFvCSgNHJyWSVtWvB5XDxsqawl1KzHzzwr132bF2rtGtazSqVqK9E07sGHMCf+zp
DQVDVVGtqZPHwX3KqUtefE621b8RI6VCl4oD30Olf8pjuzG4JKBFRFclzLRjo/h7
IkkfjZ8wDa7faOjVXx6n+eUQ29cIMCzr8/rNWHS9pYGGQKJiY2xmVC9h12H99Xyf
zWE9vb5zKP3MVG6neX1hSdo7PEAb9fqRhHkqVsqUvJlIRmvXvVKTwNCP3eCjRCCI
PTAvjV+4ni786iXwwFYNz8l3PmPLCyQXWGohnJ8iBm+5nk7O2ynaPVW0U2W+pt2w
SVuvdDM5zGv2f9ltNWUiYZHJ1mmO97jSY/6YfdOUH66iRtQtDkHBRdkNBsMbD+Em
2TgBldtHNSJBfB3pm9FblgOcJ0FSWcUDWJ7vO0+NTXlgrRofRT6pVywzxVo6dND0
WzYlTWeUVsO40xJqhgUQRER9YLOLxJ0O6C8i0xFxAMKOtSdodMB3RIwt7RFQ0uyt
n5Z5MqkYhlMI3J1tPRTp1nEt9fyGspBOO05gi148Qasp+3N+svqKomoQglNoAxU=
-----END CERTIFICATE-----`,
}
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Hello, World!"))
})
go func() {
log.Fatal(http.ListenAndServe(":80", autoCert.HTTPHandler(nil)))
}()
server := &http.Server{
Addr: ":443",
TLSConfig: &tls.Config{
GetCertificate: autoCert.GetCertificate,
},
}
log.Fatal(server.ListenAndServeTLS("", ""))
}
| 36.56338 | 84 | 0.861325 |
84ed9354a09873744ff5ac2aa384005a2651d37c | 814 | h | C | Term2/Lab2_01/Calculator.h | TheLeonelus/SGN_Labs | 8e979947c0e6ca714957203944cdd5fce69cad5f | [
"MIT"
] | 1 | 2022-02-10T09:20:42.000Z | 2022-02-10T09:20:42.000Z | Term2/Lab2_01/Calculator.h | TheLeonelus/SGN3_Labs | 8e979947c0e6ca714957203944cdd5fce69cad5f | [
"MIT"
] | null | null | null | Term2/Lab2_01/Calculator.h | TheLeonelus/SGN3_Labs | 8e979947c0e6ca714957203944cdd5fce69cad5f | [
"MIT"
] | null | null | null | #pragma once
#include <QtCore>
#include <QObject>
#include <QWidget>
#include <QPushButton>
#include <QGridLayout>
#include <QApplication>
#include <QLCDNumber>
#include <QtMath>
#include <QColorDialog>
#define PRECISION 0.000000001
class QLCDNumber;
class QPushButton;
class Calculator : public QWidget
{
Q_OBJECT
private:
QLCDNumber* m_plcd;
QLCDNumber* m_debug;
QStack<QString> m_stk;
QString m_strDisplay;
double sumInMemory;
public:
Calculator(QWidget* pwgt = 0);
QPushButton* createButton(const QString& str, const QColor col);
QPushButton* createButtonOfUnaryOperator(const QString& str, const QColor col);
void calculateBinaryOperation();
void abortOperation();
public slots:
void slotButtonClicked();
void unaryOperatorButtonClicked();
};
| 20.871795 | 83 | 0.732187 |
a7b04629c531a6306af7beb412671aeb6acc57b2 | 277 | kt | Kotlin | kotlin-mui/src/main/generated/muix/pickers/MobileDateTimePicker.kt | stefanthaler/kotlin-wrappers | 9cc5be239d16defa23ccbc89c86527418d625fe4 | [
"Apache-2.0"
] | 14 | 2021-11-22T17:50:52.000Z | 2022-03-16T14:15:54.000Z | mui-kotlin/src/main/kotlin/muix/pickers/MobileDateTimePicker.kt | garagum/mui-kotlin | 02e8cad0091c51567d3d9f9bcf261afd3cb25c30 | [
"Apache-2.0"
] | null | null | null | mui-kotlin/src/main/kotlin/muix/pickers/MobileDateTimePicker.kt | garagum/mui-kotlin | 02e8cad0091c51567d3d9f9bcf261afd3cb25c30 | [
"Apache-2.0"
] | 2 | 2021-11-22T17:50:55.000Z | 2022-02-28T18:51:07.000Z | // Automatically generated - do not modify!
@file:JsModule("@mui/x-date-pickers/MobileDateTimePicker")
@file:JsNonModule
package muix.pickers
external interface MobileDateTimePickerProps : react.Props
external val MobileDateTimePicker: react.FC<MobileDateTimePickerProps>
| 23.083333 | 70 | 0.823105 |