text stringlengths 1 1.05M |
|---|
<reponame>toastier/srf
(function () {
'use strict';
angular
.module('users')
.controller('NoAccessController', NoAccessController);
function NoAccessController(Navigation) {
var vm = this;
function activate() {
Navigation.clear();
Navigation.viewTitle.set('You Do Not Have Access');
}
activate();
}
})();
|
/*
* Copyright 2016 NIIT Ltd, Wipro Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributors:
*
* 1. <NAME>
* 2. <NAME>
* 3. <NAME>
* 4. <NAME>
* 5. <NAME>
* 6. <NAME>
* 7. <NAME>
*/
var express = require('express'),
router = express.Router(),
utils = require('./utils'),
path = require('path'),
Widget =require('../config/db').widgetModel,
User = require('../config/db').userModel;
router.use(utils.isAuthenticated);
//Get the widgets
router.get('/', function(req, res, next) {
Widget.getWidgets(function(data){
res.send(data);
});
});
router.get('/getNewWidgetId', function(req, res, next) {
Widget.getNewWidgetId(function(id) {
res.send(id);
});
});
router.post('/saveWidget', function(req, res, next) {
var userid;
if(typeof req.body.userid === 'undefined') {
userid = req.user._id;
} else {
userid = req.body.userid;
}
Widget.saveWidget(userid, req.body.tabs, req.body.widgetList, User);
res.send({resp:"Widgets updated successfully"});
});
router.post('/renameTitle', function(req, res, next) {
Widget.renameTitle(req.body.widgetId, req.body.title);
});
//Get the widget details
//router.get('/:id', function(req, res, next) {
// // picks :id from the URL
// var widgetId = req.params.id;
//
// Widget.getWidget(widgetId, function(data){
// res.send(data);
// });
//});
//get chartdata for respective widget
//router.get('/data/:id', function(req, res, next) {
// // picks :id from the URL
// var widgetId = req.params.id;
// Widget.getWidget(function(data){
// res.send(data);
// });
//});
module.exports = router;
|
def is_anagram(word1, word2):
# split words into list of characters
letters1 = list(word1)
letters2 = list(word2)
# sort lists
letters1.sort()
letters2.sort()
# check if the sorted lists are equal
return letters1 == letters2
# main program
words = [ 'army', 'mary', 'cat', 'act', 'rat', 'tar' ]
for i in range(len(words)):
for j in range(i + 1, len(words)):
if is_anagram(words[i], words[j]):
print('%s is an anagram of %s' % (words[i], words[j])) |
<gh_stars>0
package infinispan
import (
"fmt"
"strings"
v1 "github.com/infinispan/infinispan-operator/pkg/apis/infinispan/v1"
consts "github.com/infinispan/infinispan-operator/pkg/controller/constants"
config "github.com/infinispan/infinispan-operator/pkg/infinispan/configuration"
kube "github.com/infinispan/infinispan-operator/pkg/kubernetes"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
EncryptKeystoreName = "keystore.p12"
EncryptKeystorePath = ServerRoot + "/conf/keystore"
)
func ConfigureServerEncryption(i *v1.Infinispan, c *config.InfinispanConfiguration, client client.Client) (*reconcile.Result, error) {
if !i.IsEncryptionEnabled() {
return nil, nil
}
secretContains := func(secret *corev1.Secret, keys ...string) bool {
for _, k := range keys {
if _, ok := secret.Data[k]; !ok {
return false
}
}
return true
}
configureNewKeystore := func(c *config.InfinispanConfiguration) {
c.Keystore.CrtPath = consts.ServerEncryptKeystoreRoot
c.Keystore.Path = EncryptKeystorePath
c.Keystore.Password = "password"
c.Keystore.Alias = "server"
}
// Configure Keystore
keystoreSecret := &corev1.Secret{}
if result, err := kube.LookupResource(i.GetKeystoreSecretName(), i.Namespace, keystoreSecret, client, log, eventRec); result != nil {
return result, err
}
if i.IsEncryptionCertFromService() {
if strings.Contains(i.Spec.Security.EndpointEncryption.CertServiceName, "openshift.io") {
configureNewKeystore(c)
}
} else {
if secretContains(keystoreSecret, EncryptKeystoreName) {
// If user provide a keystore in secret then use it ...
c.Keystore.Path = fmt.Sprintf("%s/%s", consts.ServerEncryptKeystoreRoot, EncryptKeystoreName)
c.Keystore.Password = string(keystoreSecret.Data["password"])
c.Keystore.Alias = string(keystoreSecret.Data["alias"])
} else if secretContains(keystoreSecret, corev1.TLSPrivateKeyKey, corev1.TLSCertKey) {
configureNewKeystore(c)
}
}
// Configure Truststore
if i.IsClientCertEnabled() {
trustSecret := &corev1.Secret{}
if result, err := kube.LookupResource(i.GetTruststoreSecretName(), i.Namespace, trustSecret, client, log, eventRec); result != nil {
return result, err
}
c.Endpoints.ClientCert = string(i.Spec.Security.EndpointEncryption.ClientCert)
c.Truststore.Path = fmt.Sprintf("%s/%s", consts.ServerEncryptTruststoreRoot, consts.EncryptTruststoreKey)
if userPass, ok := trustSecret.Data[consts.EncryptTruststorePasswordKey]; ok {
c.Truststore.Password = string(user<PASSWORD>)
} else {
c.Truststore.Password = "password"
}
}
return nil, nil
}
|
<filename>source/ws.c
#include <winsock2.h>
#include <windows.h>
#include <psapi.h>
#include <stdio.h>
#include "ws.h"
#include "misc.h"
#include "plugins.h"
#include "list.h"
#define MAX_PACKET 4096
typedef int (WINAPI *tWS)(SOCKET, const char*, int, int); //For base functions
static DWORD WINAPI initialize(LPVOID param);
static void revert();
static int WINAPI repl_recv(SOCKET s, const char *buf, int len, int flags);
static int WINAPI repl_send(SOCKET s, const char *buf, int len, int flags);
//Trampolenes
static int (WINAPI *pRecv)(SOCKET s, const char* buf, int len, int flags) = NULL;
static int (WINAPI *pSend)(SOCKET s, const char* buf, int len, int flags) = NULL;
//Keep track to undo change before closing
static BYTE replaced_send[10];
static BYTE replaced_recv[10];
static DWORD orig_size_send = 0;
static DWORD orig_size_recv = 0;
static DWORD addr_send = 0;
static DWORD addr_recv = 0;
LIBAPI DWORD register_handler(tWS_plugin func, WS_HANDLER_TYPE type, char *comment)
{
if(comment == NULL)
comment = (char*)"";
struct WS_handler *t = (struct WS_handler*)malloc(sizeof(struct WS_handler));
t->func = func;
t->comment = (char*)malloc(sizeof(char)*strlen(comment));
strcpy(t->comment,comment);
if(type & WS_HANDLER_SEND)
list_add_tail(&(t->ws_handlers_send),&(ws_handlers.ws_handlers_send));
else
list_add_tail(&(t->ws_handlers_recv),&(ws_handlers.ws_handlers_recv));
return (DWORD)(t); //Returns pointer to node we just added
}
LIBAPI void unregister_handler(DWORD plugin_id, WS_HANDLER_TYPE type)
{
if(!plugin_id)
return;
if(type & WS_HANDLER_SEND)
list_del( &((struct WS_handler*)plugin_id)->ws_handlers_send);
else
list_del( &((struct WS_handler*)plugin_id)->ws_handlers_recv);
return;
}
BOOL APIENTRY DllMain(HINSTANCE instance, DWORD reason, LPVOID reserved)
{
switch(reason)
{
case DLL_PROCESS_ATTACH:
{
#ifdef APPLICATION_NAME
char moduleName[MAX_PATH];
GetModuleBaseName(GetCurrentProcess(), NULL, moduleName, MAX_PATH);
if (strcmp(moduleName, APPLICATION_NAME))
return FALSE;
#endif
CreateThread(NULL,0,initialize,NULL,0,NULL);
break;
}
case DLL_PROCESS_DETACH:
revert();
list_for_each(t, &ws_plugins.plugins) //TODO: Change this to use unregister_handler instead, so it'll delete the lists properly :/
FreeLibrary(list_entry(t, struct WS_plugins, plugins)->plugin);
break;
case DLL_THREAD_ATTACH:
break;
case DLL_THREAD_DETACH:
break;
}
return TRUE;
}
static DWORD WINAPI initialize(LPVOID param)
{
DWORD addr;
BYTE replaced[10];
DWORD orig_size;
addr_send = (DWORD)GetProcAddress(GetModuleHandle(TEXT("WS2_32.dll")), "send");
addr_recv = (DWORD)GetProcAddress(GetModuleHandle(TEXT("WS2_32.dll")), "recv");
//TODO: Clean this area up and move these to some inline function
addr = addr_send;
if(apply_patch(0xE9,addr,(void*)(&repl_send),&orig_size_send, replaced_send)) //Note we only store this replaced because this is the original winsock function code, which we need to put back upon closing
{
pSend = (tWS)VirtualAlloc(NULL, orig_size_send << 2, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE);
memcpy((void*)pSend,replaced_send,orig_size_send);
apply_patch(0xE9,(DWORD)pSend+orig_size_send,(void*)(addr+orig_size_send),&orig_size, replaced);
}
addr = addr_recv;
if(apply_patch(0xE9,addr,(void*)(&repl_recv),&orig_size_recv, replaced_recv))
{
pRecv = (tWS)VirtualAlloc(NULL, orig_size_recv << 2, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE);
memcpy((void*)pRecv,replaced_recv,orig_size_recv);
apply_patch(0xE9,(DWORD)pRecv+orig_size_recv,(void*)(addr+orig_size_recv),&orig_size, replaced);
}
//Initialize lists
INIT_LIST_HEAD(&ws_handlers.ws_handlers_send);
INIT_LIST_HEAD(&ws_handlers.ws_handlers_recv);
INIT_LIST_HEAD(&ws_plugins.plugins);
load_plugins("./plugins/", &ws_plugins);
return 0;
}
static void revert()
{
if(!orig_size_send && !orig_size_recv)
return;
if(addr_send)
exec_copy(addr_send, replaced_send, orig_size_send);
if(addr_recv)
exec_copy(addr_recv, replaced_recv, orig_size_recv);
return;
}
static int WINAPI repl_send(SOCKET s, const char *buf, int len, int flags)
{
list_for_each(t, &ws_handlers.ws_handlers_send)
list_entry(t, struct WS_handler, ws_handlers_send)->func(&s,buf,&len,&flags);
return pSend(s,buf,len,flags);
}
static int WINAPI repl_recv(SOCKET s, const char *buf, int len, int flags)
{
list_for_each(t, &ws_handlers.ws_handlers_recv)
list_entry(t, struct WS_handler, ws_handlers_recv)->func(&s,buf,&len,&flags);
return pRecv(s,buf,len,flags);
}
|
<gh_stars>0
package ddbt.tpcc.itx
import java.util.Date
import ddbt.tpcc.tx._
/**
* NewOrder Transaction for TPC-C Benchmark
*
* @author <NAME>
*/
trait IInMemoryTx { self =>
def setSharedData(db:AnyRef): self.type
}
trait InMemoryTxImpl extends IInMemoryTx {
var SharedData:TpccTable = null
override def setSharedData(db:AnyRef) = {
SharedData = db.asInstanceOf[TpccTable]
this
}
}
trait InMemoryTxImplViaITpccTable extends InMemoryTxImpl {
var ISharedData:ITpccTable = null
override def setSharedData(db:AnyRef) = {
ISharedData = db.asInstanceOf[ITpccTable]
this
}
}
trait InMemoryTxImplViaMVCCTpccTableV0 extends InMemoryTxImpl {
var ISharedData:MVCCTpccTableV0 = null
override def setSharedData(db:AnyRef) = {
ISharedData = db.asInstanceOf[MVCCTpccTableV0]
this
}
}
trait InMemoryTxImplViaMVCCTpccTableV1 extends InMemoryTxImpl {
var ISharedData:MVCCTpccTableV1 = null
override def setSharedData(db:AnyRef) = {
ISharedData = db.asInstanceOf[MVCCTpccTableV1]
this
}
}
trait InMemoryTxImplViaMVCCTpccTableV2 extends InMemoryTxImpl {
var ISharedData:MVCCTpccTableV2 = null
override def setSharedData(db:AnyRef) = {
ISharedData = db.asInstanceOf[MVCCTpccTableV2]
this
}
}
trait InMemoryTxImplViaMVCCTpccTableV3 extends InMemoryTxImpl {
var ISharedData:MVCCTpccTableV3 = null
override def setSharedData(db:AnyRef) = {
ISharedData = db.asInstanceOf[MVCCTpccTableV3]
this
}
}
// trait InMemoryTxImplViaMVCCTpccTableV4 extends InMemoryTxImpl {
// var ISharedData:MVCCTpccTableV4 = null
// override def setSharedData(db:AnyRef) = {
// ISharedData = db.asInstanceOf[MVCCTpccTableV4]
// this
// }
// }
trait INewOrderInMem extends INewOrder with IInMemoryTx
trait IPaymentInMem extends IPayment with IInMemoryTx
trait IOrderStatusInMem extends IOrderStatus with IInMemoryTx
trait IDeliveryInMem extends IDelivery with IInMemoryTx
trait IStockLevelInMem extends IStockLevel with IInMemoryTx
|
#!/usr/bin/env bash
PACKAGE_DIST_PATH=$1
RESPONSE_FILE=/tmp/upload.txt
STATUS_CODE=$(curl -F package=@${PACKAGE_DIST_PATH} -w '%{http_code}' https://${GEM_FURY_PUSH_TOKEN}@push.fury.io/${GEM_FURY_USERNAME}/ -o ${RESPONSE_FILE})
tail ${RESPONSE_FILE}
if [[ ${STATUS_CODE} -ne 200 ]]; then
echo "Unexpected HTTP response status code ${STATUS_CODE}"
exit 1
fi
|
import { Component, Input, Output, EventEmitter } from '@angular/core';
@Component({
selector: 'employee-count',
templateUrl: 'app/employee/views/employeeCount.component.html',
styleUrls: ['css/employeeCount.component.css']
})
export class EmployeeCountComponent {
selectedRadioButtonValue : string = 'All';
@Output()
countRadioButtonSelectionChanged : EventEmitter<string> =new EventEmitter<string>();
/**Custom event*/
@Input()
all: number ;
@Input()
male: number ;
@Input()
female: number ;
/**Raise the custom event*/
onRadioButtonSelectionChange()
{
this.countRadioButtonSelectionChanged.emit(this.selectedRadioButtonValue)
}
} |
import requests
class HumbleBundleAPI:
def __init__(self):
self.session = requests.Session()
def login(self, username, password):
login_data = {
'username': username,
'password': password
}
response = self.session.post(LOGIN_URL, data=login_data)
response.raise_for_status() # Raise an exception for 4xx/5xx status codes
# The authentication token is stored as a cookie named _simpleauth_sess
def get_order_list(self):
response = self.session.get(ORDER_LIST_URL)
response.raise_for_status()
return response.json()
def get_order_details(self, order_id):
order_url = ORDER_URL.format(order_id=order_id)
response = self.session.get(order_url)
response.raise_for_status()
return response.json()
def sign_trove_url(self, url):
data = {
'url': url,
'gamekey': TROVE_GAMEKEY
}
response = self.session.post(TROVE_SIGN_URL, data=data)
response.raise_for_status()
return response.json()
def get_trove_page(self, chunk_index):
trove_page_url = TROVE_PAGE_URL.format(chunk_index=chunk_index)
response = self.session.get(trove_page_url)
response.raise_for_status()
return response.json() |
<reponame>getmetamapper/metamapper<gh_stars>10-100
# -*- coding: utf-8 -*-
import unittest.mock as mock
import app.comments.models as models
import app.comments.serializers as serializers
import app.audit.models as audit
import testutils.cases as cases
import testutils.decorators as decorators
import testutils.factories as factories
import testutils.helpers as helpers
@mock.patch('app.omnisearch.tasks.update_single_es_object.delay')
class CreateCommentTests(cases.GraphQLTestCase):
"""Tests for creating a comment.
"""
factory = factories.CommentFactory
operation = 'createComment'
statement = '''
mutation CreateComment(
$objectId: ID!,
$html: String!,
$parentId: ID,
) {
createComment(input: {
objectId: $objectId,
html: $html,
parentId: $parentId,
}) {
comment {
html
parent {
pk
}
}
errors {
resource
field
code
}
}
}
'''
def setUp(self):
super().setUp()
self.resource = factories.ColumnFactory(workspace_id=self.workspace.pk)
self.objectid = helpers.to_global_id('ColumnType', self.resource.pk)
def _get_attributes(self, **overrides):
"""Generate testing data.
"""
attributes = {
'objectId': self.objectid,
'parentId': None,
'html': ''.join(helpers.faker.sentences(nb=3)),
}
attributes.update(**overrides)
return attributes
def execute_success_test_case(self):
"""It should create the comment.
"""
variables = self._get_attributes()
response = self.execute(variables=variables)
response = response['data'][self.operation]
self.assertEqual(response, {
'comment': {
'html': variables['html'],
'parent': None,
},
'errors': None
})
self.assertInstanceCreated(models.Comment, html=variables['html'])
self.assertInstanceCreated(
audit.Activity,
verb='commented on',
**serializers.get_audit_kwargs(models.Comment.objects.last()),
)
@decorators.as_someone(['MEMBER', 'OWNER'])
def test_valid(self, mock_es_update):
"""It should create the comment successfully.
"""
self.execute_success_test_case()
self.assertTrue(mock_es_update.called)
@decorators.as_someone(['OWNER'])
def test_valid_with_object_permission_as_owner(self, mock_es_update):
"""It should create the comment successfully.
"""
self.resource.datastore.object_permissions_enabled = True
self.resource.datastore.save()
self.execute_success_test_case()
self.assertTrue(mock_es_update.called)
@decorators.as_someone(['MEMBER'])
def test_valid_with_object_permission_as_member(self, mock_es_update):
"""It should create the comment successfully.
"""
self.resource.datastore.object_permissions_enabled = True
self.resource.datastore.save()
permissions = [
'definitions.change_datastore_settings',
'definitions.comment_on_datastore',
'definitions.change_datastore_metadata',
]
for permission in permissions:
self.resource.datastore.assign_perm(self.user, permission)
self.execute_success_test_case()
self.assertTrue(mock_es_update.called)
@decorators.as_someone(['MEMBER'])
def test_invalid_without_object_permission(self, mock_es_update):
"""It should return a "Permission Denied" error.
"""
self.resource.datastore.object_permissions_enabled = True
self.resource.datastore.save()
permissions = [
'definitions.change_datastore_settings',
'definitions.change_datastore_metadata',
]
for permission in permissions:
self.resource.datastore.assign_perm(self.user, permission)
variables = self._get_attributes()
response = self.execute(variables=variables)
self.assertPermissionDenied(response)
self.assertFalse(mock_es_update.called)
@decorators.as_someone(['READONLY'])
def test_invalid_with_object_permission_as_readonly(self, mock_es_update):
"""It should return a "Permission Denied" error.
"""
self.resource.datastore.object_permissions_enabled = True
self.resource.datastore.save()
permissions = [
'definitions.comment_on_datastore',
'definitions.change_datastore_metadata',
]
for permission in permissions:
self.resource.datastore.assign_perm(self.user, permission)
variables = self._get_attributes()
response = self.execute(variables=variables)
self.assertPermissionDenied(response)
self.assertFalse(mock_es_update.called)
@decorators.as_someone(['MEMBER', 'OWNER'])
def test_when_invalid_html(self, mock_es_update):
"""It should NOT create the comment.
"""
variables = self._get_attributes(html='')
response = self.execute(variables=variables)
response = response['data'][self.operation]
self.assertEqual(response, {
'comment': None,
'errors': [
{
'resource': 'Comment',
'field': 'html',
'code': 'blank',
},
],
})
self.assertFalse(mock_es_update.called)
@decorators.as_someone(['MEMBER', 'OWNER'])
def test_when_valid_parent(self, mock_es_update):
"""It should create the comment with a parent.
"""
parent = factories.CommentFactory(content_object=self.resource)
parent_id = helpers.to_global_id('CommentType', parent)
variables = self._get_attributes(parentId=parent_id)
response = self.execute(variables=variables)
response = response['data'][self.operation]
self.assertEqual(response, {
'comment': {
'html': variables['html'],
'parent': {
'pk': parent.pk,
},
},
'errors': None
})
self.assertInstanceCreated(models.Comment, html=variables['html'])
self.assertInstanceCreated(
audit.Activity,
verb='commented on',
**serializers.get_audit_kwargs(models.Comment.objects.last()),
)
self.assertTrue(mock_es_update.called)
@mock.patch('app.omnisearch.tasks.update_single_es_object.delay')
class UpdateCommentTests(cases.GraphQLTestCase):
"""Tests for updating a comment.
"""
factory = factories.CommentFactory
operation = 'updateComment'
statement = '''
mutation UpdateComment(
$id: ID!,
$html: String!,
) {
updateComment(input: {
id: $id,
html: $html,
}) {
comment {
pk
html
}
errors {
resource
field
code
}
}
}
'''
def setUp(self):
super().setUp()
self.resource = factories.ColumnFactory(workspace_id=self.workspace.pk)
self.objectid = helpers.to_global_id('ColumnType', self.resource.pk)
@decorators.as_someone(['MEMBER', 'OWNER'])
def test_when_valid_html(self, mock_es_update):
"""It should update the HTML of a comment.
"""
resource = factories.CommentFactory(content_object=self.resource, author=self.user)
globalid = helpers.to_global_id('CommentType', resource.pk)
response = self.execute(variables={'id': globalid, 'html': '<p>This is valid html</p>'})
response = response['data'][self.operation]
self.assertInstanceUpdated(resource, html='<p>This is valid html</p>')
self.assertEqual(response, {
'comment': {
'pk': resource.pk,
'html': '<p>This is valid html</p>',
},
'errors': None,
})
self.assertInstanceCreated(
audit.Activity,
verb='updated comment on',
**serializers.get_audit_kwargs(resource),
)
self.assertTrue(mock_es_update.called)
@decorators.as_someone(['MEMBER', 'OWNER'])
def test_when_not_exists(self, mock_es_update):
"""It should return a 404 error.
"""
globalid = helpers.to_global_id('CommentType', '1234')
response = self.execute(variables={'id': globalid, 'html': 'Test'})
self.assertPermissionDenied(response)
self.assertFalse(mock_es_update.called)
@decorators.as_someone(['OWNER', 'OUTSIDER', 'ANONYMOUS'])
def test_update_other_user_comment(self, mock_es_update):
"""It should not update the comment of another user.
"""
resource = factories.CommentFactory(content_object=self.resource, author=self.users['MEMBER'])
globalid = helpers.to_global_id('CommentType', resource.pk)
variables = {'id': globalid, 'html': 'Test'}
response = self.execute(variables=variables)
self.assertPermissionDenied(response)
self.assertInstanceDoesNotExist(
audit.Activity,
verb='updated comment on',
**serializers.get_audit_kwargs(resource),
)
self.assertFalse(mock_es_update.called)
@mock.patch('app.omnisearch.tasks.remove_single_es_object.delay')
class DeleteCommentTests(cases.GraphQLTestCase):
"""Tests for deleting a comment.
"""
factory = factories.CommentFactory
operation = 'deleteComment'
statement = '''
mutation DeleteComment(
$id: ID!,
) {
deleteComment(input: {
id: $id,
}) {
ok
errors {
resource
field
code
}
}
}
'''
def test_on_own_comment(self, mock_es_remove):
"""It should permanently delete the comment.
"""
resource = self.factory(workspace_id=self.workspace.pk, author=self.user)
globalid = helpers.to_global_id('CommentType', resource.pk)
response = self.execute(variables={'id': globalid})
response = response['data'][self.operation]
self.assertOk(response)
self.assertInstanceDeleted(models.Comment, pk=resource.pk)
self.assertTrue(mock_es_remove.called)
@decorators.as_someone(['MEMBER', 'ANONYMOUS'])
def test_update_other_user_comment(self, mock_es_remove):
"""It should not delete the comment.
"""
resource = self.factory(workspace_id=self.workspace.pk)
globalid = helpers.to_global_id('CommentType', resource.pk)
response = self.execute(variables={'id': globalid})
self.assertPermissionDenied(response)
self.assertFalse(mock_es_remove.called)
@decorators.as_someone(['MEMBER', 'OWNER'])
def test_when_not_exists(self, mock_es_remove):
"""It should return a 404 error.
"""
globalid = helpers.to_global_id('CommentType', '1234')
response = self.execute(variables={'id': globalid, 'html': 'Test'})
self.assertPermissionDenied(response)
self.assertFalse(mock_es_remove.called)
class TogglePinnedCommentTests(cases.GraphQLTestCase):
"""Tests for pinning a comment.
"""
factory = factories.CommentFactory
operation = 'togglePinnedComment'
statement = '''
mutation TogglePinnedComment(
$id: ID!,
) {
togglePinnedComment(input: {
id: $id,
}) {
comment {
pk
isPinned
pinnedBy {
email
}
}
errors {
resource
field
code
}
}
}
'''
def execute_success_test_case(self, resource):
"""It should pin the comment.
"""
globalid = helpers.to_global_id('CommentType', resource.pk)
response = self.execute(variables={'id': globalid})
response = response['data'][self.operation]
self.assertEqual(response, {
'comment': {
'pk': resource.pk,
'isPinned': True,
'pinnedBy': {
'email': self.user.email,
}
},
'errors': None,
})
def test_when_not_pinned(self):
"""It should pin the comment.
"""
resource = self.factory(workspace_id=self.workspace.pk, author=self.user)
self.execute_success_test_case(resource)
def test_when_pinned(self):
"""It should unpin the comment.
"""
resource = self.factory(workspace_id=self.workspace.pk, author=self.user)
resource.pin(self.user)
globalid = helpers.to_global_id('CommentType', resource.pk)
response = self.execute(variables={'id': globalid})
response = response['data'][self.operation]
self.assertEqual(response, {
'comment': {
'pk': resource.pk,
'isPinned': False,
'pinnedBy': None,
},
'errors': None,
})
@decorators.as_someone(['OUTSIDER', 'READONLY'])
def test_query_when_not_authorized(self):
"""Outside users should not be able to access this resource.
"""
resource = self.factory(workspace_id=self.workspace.pk, author=self.user)
globalid = helpers.to_global_id('CommentType', resource.pk)
response = self.execute(variables={'id': globalid})
self.assertPermissionDenied(response)
@decorators.as_someone(['MEMBER', 'OWNER'])
def test_valid(self):
"""It should create the comment successfully.
"""
resource = self.factory(workspace_id=self.workspace.pk, author=self.user)
self.execute_success_test_case(resource)
@decorators.as_someone(['OWNER'])
def test_valid_with_object_permission_as_owner(self):
"""It should create the comment successfully.
"""
resource = self.factory(workspace_id=self.workspace.pk, author=self.user)
datastore = resource.content_object.datastore
datastore.object_permissions_enabled = True
datastore.save()
self.execute_success_test_case(resource)
@decorators.as_someone(['MEMBER'])
def test_valid_with_object_permission_as_member(self):
"""It should create the comment successfully.
"""
resource = self.factory(workspace_id=self.workspace.pk, author=self.user)
datastore = resource.content_object.datastore
datastore.object_permissions_enabled = True
datastore.save()
permissions = [
'definitions.change_datastore_settings',
'definitions.comment_on_datastore',
'definitions.change_datastore_metadata',
]
for permission in permissions:
datastore.assign_perm(self.user, permission)
self.execute_success_test_case(resource)
@decorators.as_someone(['MEMBER'])
def test_invalid_without_object_permission(self):
"""It should return a "Permission Denied" error.
"""
resource = self.factory(workspace_id=self.workspace.pk, author=self.user)
globalid = helpers.to_global_id('CommentType', resource.pk)
datastore = resource.content_object.datastore
datastore.object_permissions_enabled = True
datastore.save()
permissions = [
'definitions.change_datastore_settings',
'definitions.change_datastore_metadata',
]
for permission in permissions:
datastore.assign_perm(self.user, permission)
response = self.execute(variables={'id': globalid})
self.assertPermissionDenied(response)
@decorators.as_someone(['READONLY'])
def test_invalid_with_object_permission_as_readonly(self):
"""It should return a "Permission Denied" error.
"""
resource = self.factory(workspace_id=self.workspace.pk, author=self.user)
globalid = helpers.to_global_id('CommentType', resource.pk)
datastore = resource.content_object.datastore
datastore.object_permissions_enabled = True
datastore.save()
permissions = [
'definitions.comment_on_datastore',
'definitions.change_datastore_metadata',
]
for permission in permissions:
datastore.assign_perm(self.user, permission)
response = self.execute(variables={'id': globalid})
self.assertPermissionDenied(response)
class VoteForCommentTests(cases.GraphQLTestCase):
"""Tests for voting for a comment.
"""
factory = factories.CommentFactory
operation = 'voteForComment'
statement = '''
mutation VoteForComment(
$id: ID!,
$action: String!,
) {
voteForComment(input: {
id: $id,
action: $action,
}) {
comment {
pk
numVoteUp
numVoteDown
}
errors {
resource
field
code
}
}
}
'''
def setUp(self):
super().setUp()
self.commentable = factories.TableFactory(workspace=self.workspace)
@decorators.as_someone(['MEMBER', 'OWNER', 'READONLY'])
def test_valid_upvote(self):
resource = self.factory(content_object=self.commentable)
globalid = helpers.to_global_id('CommentType', resource.pk)
variables = {
'id': globalid,
'action': 1,
}
response = self.execute(variables=variables)
response = response['data'][self.operation]
self.assertEqual(response, {
'comment': {
'pk': resource.pk,
'numVoteUp': 1,
'numVoteDown': 0,
},
'errors': None,
})
@decorators.as_someone(['MEMBER', 'OWNER', 'READONLY'])
def test_valid_downvote(self):
resource = self.factory(content_object=self.commentable)
resource.upvote(self.user)
global_id = helpers.to_global_id('CommentType', resource.pk)
variables = {
'id': global_id,
'action': -1,
}
response = self.execute(variables=variables)
response = response['data'][self.operation]
self.assertEqual(response, {
'comment': {
'pk': resource.pk,
'numVoteUp': 0,
'numVoteDown': 1,
},
'errors': None,
})
@decorators.as_someone(['OUTSIDER'])
def test_query_when_not_authorized(self):
"""Outside users should not be able to access this resource.
"""
resource = self.factory(content_object=self.commentable)
globalid = helpers.to_global_id('CommentType', resource.pk)
variables = {
'id': globalid,
'action': -1,
}
self.assertPermissionDenied(self.execute(variables=variables))
class UnvoteForCommentTests(cases.GraphQLTestCase):
"""Tests for removing a vote on a comment.
"""
factory = factories.CommentFactory
operation = 'unvoteForComment'
statement = '''
mutation UnvoteForComment(
$id: ID!,
) {
unvoteForComment(input: {
id: $id,
}) {
ok
errors {
resource
field
code
}
}
}
'''
def setUp(self):
super().setUp()
self.commentable = factories.TableFactory(workspace=self.workspace)
@decorators.as_someone(['MEMBER', 'OWNER', 'READONLY'])
def test_valid(self):
resource = self.factory(content_object=self.commentable)
resource.upvote(self.user)
global_id = helpers.to_global_id('CommentType', resource.pk)
variables = {
'id': global_id,
'action': -1,
}
response = self.execute(variables=variables)
response = response['data'][self.operation]
self.assertOk(response)
self.assertIsNone(resource.get_vote(self.user))
@decorators.as_someone(['MEMBER', 'OWNER', 'READONLY'])
def test_when_no_vote(self):
resource = self.factory(content_object=self.commentable)
global_id = helpers.to_global_id('CommentType', resource.pk)
variables = {
'id': global_id,
'action': -1,
}
response = self.execute(variables=variables)
response = response['data'][self.operation]
self.assertOk(response)
self.assertIsNone(resource.get_vote(self.user))
@decorators.as_someone(['OUTSIDER', 'ANONYMOUS'])
def test_query_when_not_authorized(self):
"""Outside users should not be able to access this resource.
"""
resource = self.factory(content_object=self.commentable)
globalid = helpers.to_global_id('CommentType', resource.pk)
variables = {
'id': globalid,
'action': -1,
}
self.assertPermissionDenied(self.execute(variables=variables))
|
<reponame>AndreasKl/elefantenstark
package net.andreaskluth.elefantenstark.producer;
import static java.util.Objects.requireNonNull;
import static net.andreaskluth.elefantenstark.work.WorkItemDataMapSerializer.serialize;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import net.andreaskluth.elefantenstark.work.WorkItem;
public class Producer {
public static final String INSERT_WORK_QUERY =
"INSERT INTO queue (\"key\", hash, value, data_map, version) VALUES (?, ?, ?, ?, ?)";
public boolean produce(Connection connection, WorkItem workItem) {
requireNonNull(connection);
requireNonNull(workItem);
try (PreparedStatement statement = connection.prepareStatement(INSERT_WORK_QUERY)) {
statement.setString(1, workItem.key());
statement.setInt(2, workItem.hash());
statement.setString(3, workItem.value());
statement.setBytes(4, serialize(workItem.workItemDataMap()));
statement.setLong(5, workItem.version());
return statement.execute();
} catch (SQLException e) {
throw new ProducerException(e);
}
}
}
|
<filename>api/traits/traits-router.js
const express = require('express');
const traits = require("./traits-model");
const router = express.Router()
router.get('/', (req, res, next) => {
traits.findTraits()
.then(resp => {
res.status(200).json(resp);
}).catch(next);
})
router.post('/', (req, res, next) => {
let neoTrait = req.body;
neoTrait.traitID = Date.now();
traits.createTrait(neoTrait)
.then(resp => {
res.status(201).json(resp);
}).catch(next);
})
module.exports = router; |
#!/bin/bash
# This script runs on after start in background
# as a service and gets restarted on failure
# it runs ALMOST every seconds
# INFOFILE - state data from bootstrap
infoFile="/home/admin/raspiblitz.info"
# CONFIGFILE - configuration of RaspiBlitz
configFile="/mnt/hdd/raspiblitz.conf"
# LOGS see: sudo journalctl -f -u background
# Check if HDD contains configuration
configExists=$(ls ${configFile} | grep -c '.conf')
if [ ${configExists} -eq 1 ]; then
source ${configFile}
else
source ${infoFile}
fi
echo "_background.sh STARTED"
counter=0
while [ 1 ]
do
###############################
# Prepare this loop
###############################
# count up
counter=$(($counter+1))
# gather the uptime seconds
upSeconds=$(cat /proc/uptime | grep -o '^[0-9]\+')
####################################################
# RECHECK DHCP-SERVER
# https://github.com/rootzoll/raspiblitz/issues/160
####################################################
# every 5 minutes
recheckDHCP=$((($counter % 300)+1))
if [ ${recheckDHCP} -eq 1 ]; then
echo "*** RECHECK DHCP-SERVER ***"
# get the local network IP
localip=$(ip addr | grep 'state UP' -A2 | tail -n1 | awk '{print $2}' | cut -f1 -d'/')
echo "localip(${localip})"
# detect a missing DHCP config
if [ "${localip:0:4}" = "169." ]; then
echo "Missing DHCP detected ... trying emergency reboot"
sudo shutdown -r now
else
echo "DHCP OK"
fi
fi
####################################################
# CHECK FOR UNDERVOLTAGE REPORTS
# every 1 hour scan for undervoltage reports
####################################################
recheckUndervoltage=$(($counter % 3600))
if [ ${recheckUndervoltage} -eq 1 ]; then
echo "*** RECHECK UNDERVOLTAGE ***"
countReports=$(sudo cat /var/log/syslog | grep -c "Under-voltage detected!")
echo "${countReports} undervoltage reports found in syslog"
if [ ${#undervoltageReports} -eq 0 ]; then
# write new value to info file
undervoltageReports="${countReports}"
echo "undervoltageReports=${undervoltageReports}" >> ${infoFile}
else
# update value in info file
sed -i "s/^undervoltageReports=.*/undervoltageReports=${countReports}/g" ${infoFile}
fi
fi
####################################################
# RECHECK PUBLIC IP
# when public IP changes, restart LND with new IP
####################################################
# every 15min - not too often
# because its a ping to external service
recheckPublicIP=$((($counter % 900)+1))
# prevent when lndAddress is set
if [ ${#lndAddress} -gt 3 ]; then
recheckPublicIP=0
fi
updateDynDomain=0
if [ ${recheckPublicIP} -eq 1 ]; then
echo "*** RECHECK PUBLIC IP ***"
# execute only after setup when config exists
if [ ${configExists} -eq 1 ]; then
# get actual public IP
freshPublicIP=$(curl -s http://v4.ipv6-test.com/api/myip.php 2>/dev/null)
# sanity check on IP data
# see https://github.com/rootzoll/raspiblitz/issues/371#issuecomment-472416349
echo "-> sanity check of new IP data"
if [[ $freshPublicIP =~ ^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$ ]]; then
echo "OK IPv6"
elif [[ $freshPublicIP =~ ^([0-9]{1,2}|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]{1,2}|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]{1,2}|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]{1,2}|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$ ]]; then
echo "OK IPv4"
else
echo "FAIL - not an IPv4 or IPv6 address"
freshPublicIP=""
fi
if [ ${#freshPublicIP} -eq 0 ]; then
echo "freshPublicIP is ZERO - ignoring"
# check if changed
elif [ "${freshPublicIP}" != "${publicIP}" ]; then
# 1) update config file
echo "update config value"
sed -i "s/^publicIP=.*/publicIP='${freshPublicIP}'/g" ${configFile}
publicIP='${freshPublicIP}'
# 2) only restart LND if dynDNS is activated
# because this signals that user wants "public node"
if [ ${#dynDomain} -gt 0 ]; then
echo "restart LND with new environment config"
# restart and let to auto-unlock (if activated) do the rest
sudo systemctl restart lnd.service
fi
# 2) trigger update if dnyamic domain (if set)
updateDynDomain=1
else
echo "public IP has not changed"
fi
else
echo "skip - because setup is still running"
fi
fi
###############################
# SCB Monitoring
###############################
# check every 1min
recheckSCB=$(($counter % 60))
if [ ${recheckSCB} -eq 1 ]; then
#echo "SCB Monitoring ..."
source ${configFile}
# check if channel.backup exists
scbExists=$(sudo ls /mnt/hdd/lnd/data/chain/${network}/${chain}net/channel.backup 2>/dev/null | grep -c 'channel.backup')
if [ ${scbExists} -eq 1 ]; then
#echo "Found Channel Backup File .. check if changed .."
md5checksumORG=$(sudo md5sum /mnt/hdd/lnd/data/chain/${network}/${chain}net/channel.backup 2>/dev/null | head -n1 | cut -d " " -f1)
md5checksumCPY=$(sudo md5sum /home/admin/.lnd/data/chain/${network}/${chain}net/channel.backup 2>/dev/null | head -n1 | cut -d " " -f1)
if [ "${md5checksumORG}" != "${md5checksumCPY}" ]; then
echo "--> Channel Backup File changed"
# make copy to sd card (as local basic backup)
sudo mkdir -p /home/admin/.lnd/data/chain/${network}/${chain}net/ 2>/dev/null
sudo cp /mnt/hdd/lnd/data/chain/${network}/${chain}net/channel.backup /home/admin/.lnd/data/chain/${network}/${chain}net/channel.backup
echo "OK channel.backup copied to '/home/admin/.lnd/data/chain/${network}/${chain}net/channel.backup'"
# check if a SCP backup target is set
# paramter in raspiblitz.conf:
# scpBackupTarget='[USER]@[SERVER]:[DIRPATH-WITHOUT-ENDING-/]'
# On target server add the public key of your RaspiBlitz to the authorized_keys for the user
# https://www.linode.com/docs/security/authentication/use-public-key-authentication-with-ssh/
if [ ${#scpBackupTarget} -gt 0 ]; then
echo "--> Offsite-Backup SCP Server"
# its ok to ignore known host, because data is encrypted (worst case of MiM would be: no offsite channel backup)
# but its more likely that whithout ignoriing known host, script might not run thru and that way: no offsite channel backup
sudo scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null /home/admin/.lnd/data/chain/${network}/${chain}net/channel.backup ${scpBackupTarget}/channel.backup
result=$?
if [ ${result} -eq 0 ]; then
echo "OK - SCP Backup exited with 0"
else
echo "FAIL - SCP Backup exited with ${result}"
fi
fi
# check if a DropBox backup target is set
# paramter in raspiblitz.conf:
# dropboxBackupTarget='[DROPBOX-APP-OAUTH2-TOKEN]'
# see dropbox setup: https://gist.github.com/vindard/e0cd3d41bb403a823f3b5002488e3f90
if [ ${#dropboxBackupTarget} -gt 0 ]; then
echo "--> Offsite-Backup Dropbox"
source <(sudo /home/admin/config.scripts/dropbox.upload.sh upload ${dropboxBackupTarget} /home/admin/.lnd/data/chain/${network}/${chain}net/channel.backup)
if [ ${#err} -gt 0 ]; then
echo "FAIL - ${err}"
echo "${errMore}"
else
echo "OK - ${upload}"
fi
fi
#else
# echo "Channel Backup File not changed."
fi
#else
# echo "No Channel Backup File .."
fi
fi
###############################
# LND AUTO-UNLOCK
###############################
# check every 10secs
recheckAutoUnlock=$((($counter % 10)+1))
if [ ${recheckAutoUnlock} -eq 1 ]; then
# check if auto-unlock feature if activated
if [ "${autoUnlock}" = "on" ]; then
# check if lnd is locked
locked=$(sudo -u bitcoin /usr/local/bin/lncli --chain=${network} --network=${chain}net getinfo 2>&1 | grep -c unlock)
if [ ${locked} -gt 0 ]; then
echo "STARTING AUTO-UNLOCK ..."
# building REST command
passwordC=$(sudo cat /root/lnd.autounlock.pwd)
command="sudo python /home/admin/config.scripts/lnd.unlock.py '${passwordC}'"
bash -c "${command}"
fi
fi
fi
###############################
# UPDATE DYNAMIC DOMAIN
# like afraid.org
# ! experimental
###############################
# if not activated above, update every 6 hours
if [ ${updateDynDomain} -eq 0 ]; then
# dont +1 so that it gets executed on first loop
updateDynDomain=$(($counter % 21600))
fi
if [ ${updateDynDomain} -eq 1 ]; then
echo "*** UPDATE DYNAMIC DOMAIN ***"
# check if update URL for dyn Domain is set
if [ ${#dynUpdateUrl} -gt 6 ]; then
# calling the update url
echo "calling: ${dynUpdateUrl}"
echo "to update domain: ${dynDomain}"
curl --connect-timeout 6 ${dynUpdateUrl}
else
echo "'dynUpdateUrl' not set in ${configFile}"
fi
fi
####################################################
# CHECK FOR END OF IBD (self validation)
####################################################
# check every 60secs
recheckIBD=$((($counter % 60)+1))
if [ ${recheckIBD} -eq 1 ]; then
# check if flag exists (got created on 50syncHDD.sh)
flagExists=$(ls /home/admin/selfsync.flag 2>/dev/null | grep -c "selfsync.flag")
if [ ${flagExists} -eq 1 ]; then
finishedIBD=$(sudo -u bitcoin ${network}-cli getblockchaininfo | grep "initialblockdownload" | grep -c "false")
if [ ${finishedIBD} -eq 1 ]; then
echo "CHECK FOR END OF IBD --> reduce RAM, check TOR and restart ${network}d"
# remove flag
sudo rm /home/admin/selfsync.flag
# stop bitcoind
sudo systemctl stop ${network}d
# set dbcache back to normal (to give room for other apps)
kbSizeRAM=$(sudo cat /proc/meminfo | grep "MemTotal" | sed 's/[^0-9]*//g')
if [ ${kbSizeRAM} -gt 1500000 ]; then
echo "Detected RAM >1GB --> optimizing ${network}.conf"
sudo sed -i "s/^dbcache=.*/dbcache=512/g" /mnt/hdd/${network}/${network}.conf
else
echo "Detected RAM 1GB --> optimizing ${network}.conf"
sudo sed -i "s/^dbcache=.*/dbcache=128/g" /mnt/hdd/${network}/${network}.conf
fi
# if TOR was activated during setup make sure bitcoin runs behind TOR latest from now on
if [ "${runBehindTor}" = "on" ]; then
echo "TOR is ON -> make sure bitcoin is running behind TOR after IBD"
sudo /home/admin/config.scripts/internet.tor.sh btcconf-on
else
echo "TOR is OFF after IBD"
fi
# restart bitcoind
sudo systemctl start ${network}d
fi
fi
fi
###############################
# Prepare next loop
###############################
# sleep 1 sec
sleep 1
# limit counter to max seconds per week:
# 604800 = 60sec * 60min * 24hours * 7days
if [ ${counter} -gt 604800 ]; then
counter=0
echo "counter zero reset"
fi
done
|
<filename>tests/basics/closure_defargs.py
# test closure with default args
def f():
a = 1
def bar(b = 10, c = 20):
print(a + b + c)
bar()
bar(2)
bar(2, 3)
print(f())
|
#!/bin/bash
SCRIPT=$(readlink -f "$0") && cd $(dirname "$SCRIPT")
# --- Script Init ---
set -e
set -o pipefail
mkdir -p log
rm -R -f log/*
# --- Setup run dirs ---
find output/* ! -name '*summary-info*' -type f -exec rm -f {} +
rm -R -f work/*
mkdir work/kat/
rm -R -f /tmp/%FIFO_DIR%/
mkdir -p /tmp/%FIFO_DIR%/fifo/
mkdir work/il_S1_summaryleccalc
mkfifo /tmp/%FIFO_DIR%/fifo/il_P1
mkfifo /tmp/%FIFO_DIR%/fifo/il_P2
mkfifo /tmp/%FIFO_DIR%/fifo/il_P3
mkfifo /tmp/%FIFO_DIR%/fifo/il_P4
mkfifo /tmp/%FIFO_DIR%/fifo/il_P5
mkfifo /tmp/%FIFO_DIR%/fifo/il_P6
mkfifo /tmp/%FIFO_DIR%/fifo/il_P7
mkfifo /tmp/%FIFO_DIR%/fifo/il_P8
mkfifo /tmp/%FIFO_DIR%/fifo/il_P9
mkfifo /tmp/%FIFO_DIR%/fifo/il_P10
mkfifo /tmp/%FIFO_DIR%/fifo/il_P11
mkfifo /tmp/%FIFO_DIR%/fifo/il_P12
mkfifo /tmp/%FIFO_DIR%/fifo/il_P13
mkfifo /tmp/%FIFO_DIR%/fifo/il_P14
mkfifo /tmp/%FIFO_DIR%/fifo/il_P15
mkfifo /tmp/%FIFO_DIR%/fifo/il_P16
mkfifo /tmp/%FIFO_DIR%/fifo/il_P17
mkfifo /tmp/%FIFO_DIR%/fifo/il_P18
mkfifo /tmp/%FIFO_DIR%/fifo/il_P19
mkfifo /tmp/%FIFO_DIR%/fifo/il_P20
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P1
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P2
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P3
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P4
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P5
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P6
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P7
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P8
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P9
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P10
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P11
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P12
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P13
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P14
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P15
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P16
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P17
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P18
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P19
mkfifo /tmp/%FIFO_DIR%/fifo/il_S1_summary_P20
# --- Do insured loss computes ---
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P1 work/il_S1_summaryleccalc/P1.bin > /dev/null & pid1=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P2 work/il_S1_summaryleccalc/P2.bin > /dev/null & pid2=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P3 work/il_S1_summaryleccalc/P3.bin > /dev/null & pid3=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P4 work/il_S1_summaryleccalc/P4.bin > /dev/null & pid4=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P5 work/il_S1_summaryleccalc/P5.bin > /dev/null & pid5=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P6 work/il_S1_summaryleccalc/P6.bin > /dev/null & pid6=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P7 work/il_S1_summaryleccalc/P7.bin > /dev/null & pid7=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P8 work/il_S1_summaryleccalc/P8.bin > /dev/null & pid8=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P9 work/il_S1_summaryleccalc/P9.bin > /dev/null & pid9=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P10 work/il_S1_summaryleccalc/P10.bin > /dev/null & pid10=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P11 work/il_S1_summaryleccalc/P11.bin > /dev/null & pid11=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P12 work/il_S1_summaryleccalc/P12.bin > /dev/null & pid12=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P13 work/il_S1_summaryleccalc/P13.bin > /dev/null & pid13=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P14 work/il_S1_summaryleccalc/P14.bin > /dev/null & pid14=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P15 work/il_S1_summaryleccalc/P15.bin > /dev/null & pid15=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P16 work/il_S1_summaryleccalc/P16.bin > /dev/null & pid16=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P17 work/il_S1_summaryleccalc/P17.bin > /dev/null & pid17=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P18 work/il_S1_summaryleccalc/P18.bin > /dev/null & pid18=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P19 work/il_S1_summaryleccalc/P19.bin > /dev/null & pid19=$!
tee < /tmp/%FIFO_DIR%/fifo/il_S1_summary_P20 work/il_S1_summaryleccalc/P20.bin > /dev/null & pid20=$!
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P1 < /tmp/%FIFO_DIR%/fifo/il_P1 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P2 < /tmp/%FIFO_DIR%/fifo/il_P2 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P3 < /tmp/%FIFO_DIR%/fifo/il_P3 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P4 < /tmp/%FIFO_DIR%/fifo/il_P4 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P5 < /tmp/%FIFO_DIR%/fifo/il_P5 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P6 < /tmp/%FIFO_DIR%/fifo/il_P6 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P7 < /tmp/%FIFO_DIR%/fifo/il_P7 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P8 < /tmp/%FIFO_DIR%/fifo/il_P8 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P9 < /tmp/%FIFO_DIR%/fifo/il_P9 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P10 < /tmp/%FIFO_DIR%/fifo/il_P10 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P11 < /tmp/%FIFO_DIR%/fifo/il_P11 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P12 < /tmp/%FIFO_DIR%/fifo/il_P12 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P13 < /tmp/%FIFO_DIR%/fifo/il_P13 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P14 < /tmp/%FIFO_DIR%/fifo/il_P14 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P15 < /tmp/%FIFO_DIR%/fifo/il_P15 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P16 < /tmp/%FIFO_DIR%/fifo/il_P16 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P17 < /tmp/%FIFO_DIR%/fifo/il_P17 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P18 < /tmp/%FIFO_DIR%/fifo/il_P18 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P19 < /tmp/%FIFO_DIR%/fifo/il_P19 &
summarycalc -f -1 /tmp/%FIFO_DIR%/fifo/il_S1_summary_P20 < /tmp/%FIFO_DIR%/fifo/il_P20 &
eve 1 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P1 &
eve 2 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P2 &
eve 3 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P3 &
eve 4 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P4 &
eve 5 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P5 &
eve 6 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P6 &
eve 7 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P7 &
eve 8 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P8 &
eve 9 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P9 &
eve 10 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P10 &
eve 11 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P11 &
eve 12 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P12 &
eve 13 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P13 &
eve 14 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P14 &
eve 15 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P15 &
eve 16 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P16 &
eve 17 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P17 &
eve 18 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P18 &
eve 19 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P19 &
eve 20 20 | getmodel | gulcalc -S100 -L100 -r -a1 -i - | fmcalc -a2 > /tmp/%FIFO_DIR%/fifo/il_P20 &
wait $pid1 $pid2 $pid3 $pid4 $pid5 $pid6 $pid7 $pid8 $pid9 $pid10 $pid11 $pid12 $pid13 $pid14 $pid15 $pid16 $pid17 $pid18 $pid19 $pid20
# --- Do insured loss kats ---
leccalc -r -Kil_S1_summaryleccalc -m output/il_S1_leccalc_wheatsheaf_mean_oep.csv & lpid1=$!
wait $lpid1
rm -R -f work/*
rm -R -f /tmp/%FIFO_DIR%/
|
#!/bin/bash
cd build/web/
python3 -m http.server 8080 |
import React from "react";
import Draggable from "react-draggable";
import PropertiesPanel from "./PropertiesPanel";
import { LayersPanel } from "./LayersPanel";
export class PanelArea extends React.Component {
constructor(props) {
super(props);
this.onDrag = this.onDrag.bind(this);
this.onDragEnd = this.onDragEnd.bind(this);
this.state = {
panelHeight: 400,
panelMin: 0,
panelMax: 1,
};
}
componentDidMount() {
window.addEventListener("resize", () => {
const min = this.state.panelMin * document.body.clientHeight + 25;
const max = this.state.panelMax * document.body.clientHeight - 154;
let value = this.state.panelHeight;
if (value < min) value = min;
else if (value > max) value = max;
this.setState({ panelHeight: value });
});
}
onDrag(e, ui) {
document.body.style.cursor = "ns-resize";
const min = this.state.panelMin * document.body.clientHeight + 25;
const max = this.state.panelMax * document.body.clientHeight - 154;
let value = this.state.panelHeight;
value += ui.deltaY;
if (value < min) value = min;
else if (value > max) value = max;
this.setState({ panelHeight: value });
}
onDragEnd() {
document.body.style.cursor = "default";
}
render() {
return (
<div id="PanelArea" style={{ width: this.props.panelWidth }}>
<PropertiesPanel panelHeight={this.state.panelHeight + "px"} selected={this.props.selected} />
<Draggable axis="none" onDrag={this.onDrag} onStop={this.onDragEnd}>
<div className="slider">
<div className="break"></div>
</div>
</Draggable>
<LayersPanel
layers={this.props.layers}
onLayerChange={this.props.onLayerChange}
select={this.props.select}
/>
</div>
);
}
}
|
var URL = 'http://publisher.titaniumapp.com/api/release-publish';
var TFS = Titanium.Filesystem;
var build_types =
{
'osx':['10.5_i386','10.5_i386','10.4_ppc'],
'win32':['win32'],
'linux':['32bit_i386','64bit_i386','32bit_ppc']
};
var guids = {
'distribution':'7F7FA377-E695-4280-9F1F-96126F3D2C2A',
'runtime':'A2AC5CB5-8C52-456C-9525-601A5B0725DA',
'module':'1ACE5D3A-2B52-43FB-A136-007BD166CFD0'
};
$(function()
{
var files = [];
function updateFilesDisplay()
{
var display = "<ul>";
for (var i = 0; i < files.length; i++)
{
var f = files[i];
display += "<li>";
display += f.path+" ("+f.type+") ("+f.os+") ("+f.build_type+")";
display += "</li>";
}
display += "</ul>";
$("#files_display").html(display);
}
function addFile()
{
Titanium.UI.openFiles(
function(result)
{
for (var i = 0; i < result.length; i++)
{
var new_file = Object();
new_file.path = result[i];
new_file.type = $('#type').val();
new_file.os = $('#os').val();
new_file.build_type = $('#build_type').val();
files.push(new_file);
}
updateFilesDisplay();
},
{
directories:false,
files:true,
multiple:true
});
}
function clearFiles()
{
files = []
updateFilesDisplay();
}
$('#filepicker').click(addFile);
$('#clearfiles').click(clearFiles);
function setStatus(msg)
{
$('#status').html(msg);
}
function sendFile(file)
{
$('#status').removeClass('error').fadeIn();
var type = file.type;
var os = file.os;
var build_type = file.build_type;
var path = file.path;
try {
setStatus('Preparing distribution:' + path);
var guid = guids[type];
Titanium.API.debug("1");
var tmp = TFS.createTempDirectory();
var manifest = TFS.getFile(tmp,'timanifest');
var from = TFS.getFile(path);
var name = null;
var version = null;
var toks = from.name().split('-');
if (guid == guids.module)
{
name = toks[1];
version = toks[2];
}
else if (guid==guids.runtime)
{
name = 'runtime';
version = toks[1];
}
else
{
name = 'installer';
version = toks[2];
}
var idx = version.lastIndexOf('.');
if (idx > 0) version = version.substring(0,idx);
from.copy(tmp);
var contents = {'name':name,'guid':guid,'version':version,'build_type':build_type,'os':os,'filename':from.name()};
// alert(swiss.toJSON(contents))
manifest.write(swiss.toJSON(contents));
setStatus('Sending:' + path);
var xhr = Titanium.Network.createHTTPClient();
xhr.onreadystatechange = function()
{
if (this.readyState == 4)
{
setStatus('Finished sending:' + path);
if (this.status == 200)
{
sendNextFile();
}
else
{
$('#status').addClass('error').html('Error publishing');
}
}
};
// xhr.onsendstream = function(sent,total,remaining)
// {
// setStatus('Sending...'+sent+'K of '+total+'K, remaining '+remaining+'K');
// };
var ts = new Date().getTime();
var secret = hex_md5($('#secret').val()+"$"+ts);
var url = URL+"?activate=1&secret="+encodeURIComponent(secret)+"&ts="+ts;
xhr.open("POST",url);
xhr.sendDir(tmp);
} catch (e) {
alert(e);
}
}
function sendNextFile()
{
if (files.length > 0)
{
sendFile(files.pop());
updateFilesDisplay();
}
}
$('button').click(function()
{
sendNextFile();
});
$('#os').change(function(e)
{
var n = build_types[e.srcElement.options[e.srcElement.selectedIndex].value];
var bt = $('#build_type').get(0);
bt.options.length = 0;
for (var c=0;c<n.length;c++)
{
bt.options[c] = new Option(n[c],n[c]);
}
});
})
|
source ../libarchive/plan.sh
pkg_name=libarchive-musl
pkg_origin=core
pkg_maintainer="The Habitat Maintainers <humans@habitat.sh>"
pkg_description="Multi-format archive and compression library"
pkg_upstream_url="https://www.libarchive.org"
pkg_license=('BSD')
pkg_deps=(
core/musl
core/openssl-musl
core/zlib-musl
core/bzip2-musl
core/xz-musl
)
do_prepare() {
export CC=musl-gcc
build_line "Setting CC=$CC"
dynamic_linker="$(pkg_path_for musl)/lib/ld-musl-x86_64.so.1"
LDFLAGS="$LDFLAGS -Wl,--dynamic-linker=$dynamic_linker"
}
do_check() {
# TODO fn: Currently there is one high level test that fails and the detailed
# failures look to be related to locales, most likely different between the
# Glibc & musl libc implementations. Chances are that there is a way to make
# this suite pass 100% or set particular tests up to skip.
make check || true
}
# ----------------------------------------------------------------------------
# **NOTICE:** What follows are implementation details required for building a
# first-pass, "stage1" toolchain and environment. It is only used when running
# in a "stage1" Studio and can be safely ignored by almost everyone. Having
# said that, it performs a vital bootstrapping process and cannot be removed or
# significantly altered. Thank you!
# ----------------------------------------------------------------------------
if [[ "$STUDIO_TYPE" = "stage1" ]]; then
pkg_build_deps=(
core/gcc
core/coreutils
core/sed
core/grep
core/diffutils
core/make
)
fi
|
def sum_of_squares(start, end):
""" This function calculates the sum of squared
numbers from start to end
Input:
start: starting number
end: ending number
Output:
total: total sum of squares
"""
total = 0
for num in range(start, end + 1):
total += num ** 2
return total
total_sum = sum_of_squares(1, 10)
print(total_sum) |
<reponame>jakzaizzat/gallery
import { getWhitespacePositionsFromStagedItems, insertWhitespaceBlocks } from './collectionLayout';
function generateTestNft() {
return {
id: '123',
name: 'test',
description: 'test',
image: {
url: 'https://example.com/test.jpg',
},
metadata: {
type: 'nft',
},
};
}
describe.skip('getWhitespacePositionsFromStagedItems', () => {
it('computes the correct whitespace list given a list of staged items', () => {
const stagedItems = [
{ id: 'blank-1' },
{ id: 'blank-2' },
generateTestNft(),
{ id: 'blank-3' },
generateTestNft(),
generateTestNft(),
generateTestNft(),
{ id: 'blank-4' },
generateTestNft(),
{ id: 'blank-5' },
];
const whitespaceList = getWhitespacePositionsFromStagedItems(stagedItems);
expect(whitespaceList).toEqual([0, 0, 1, 4, 5]);
});
});
describe.skip('insertWhitespaceBlocks', () => {
it('inserts white spaces into a list of nfts accordingly', () => {
const nfts = [
generateTestNft(),
generateTestNft(),
generateTestNft(),
generateTestNft(),
generateTestNft(),
];
const whitespaceList = [0, 0, 1, 4, 5];
const whitespacesAndNfts = insertWhitespaceBlocks(nfts, whitespaceList);
expect(whitespacesAndNfts.length).toEqual(10);
expect(whitespacesAndNfts[2].id).toEqual(nfts[0].id);
});
});
|
<reponame>kiraki-dev/express-oven
import lightJoin from 'light-join';
let baseUrl = '';
// later we can use this when we'll create a standalone runner
export const setBaseUrl = (url: string) => baseUrl = url;
export const getBaseUrl = () => baseUrl;
export const getAppUrl = (path: string) => lightJoin(getBaseUrl(), path);
|
<filename>Modules/Search/max_sub_sequence.hxx
/*===========================================================================================================
*
* HUC - Hurna Core
*
* Copyright (c) <NAME>
*
* Licensed under the MIT License, you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://github.com/Hurna/Hurna-Core/blob/master/LICENSE
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
*=========================================================================================================*/
#ifndef MODULE_SEARCH_MAX_SUB_SEQUENCE_HXX
#define MODULE_SEARCH_MAX_SUB_SEQUENCE_HXX
// STD includes
#include <iterator>
#include <utility>
namespace huc
{
namespace search
{
/// Max Sub Sequence
/// Identify the subarray with the maximum/minimum sum.
///
/// @details The algorithm can be seen as an elicitation of the MaxDistance one.
/// One of the problem resolved by this algorithm is:
/// "Given an array of gains/losses over time, find the period that represents the best/worst
/// cummulative gain."
/// The algorithm uses the fact that the sum operator is a transitive function.
///
/// @tparam IT type using to go through the collection.
/// @tparam Distance functor type computing the distance between two elements.
/// @tparam Compare functor type.
///
/// @param begin,end iterators to the initial and final positions of
/// the sequence to be sorted. The range used is [first,last), which contains all the elements between
/// first and last, including the element pointed by first but not the element pointed by last.
///
/// @return indexes of the array with the maximum/minimum sum, <-1,-1> in case of error.
template <typename IT,
typename Distance = std::minus<typename std::iterator_traits<IT>::value_type>,
typename Compare = std::greater<typename std::iterator_traits<IT>::value_type>>
std::pair<int, int> MaxSubSequence(const IT& begin, const IT& end)
{
if (std::distance(begin, end) < 2)
return std::pair<int, int>(-1, -1);
int minValIdx = 0;
std::pair<int, int> indexes(minValIdx, minValIdx);
auto minSum = static_cast<typename std::iterator_traits<IT>::value_type>(0);
auto currSum = *begin;
auto maxSum = *begin;
int currentIdx = 1;
for (auto it = begin + 1; it != end; ++it, ++currentIdx)
{
currSum += *it;
// keep track of the minimum sum and its first value index
if (Compare()(minSum, currSum))
{
minValIdx = currentIdx;
minSum = currSum;
}
// Keeps track of the maximal sub array and its end value index
auto curMax = Distance()(currSum, minSum);
if (Compare()(curMax, maxSum))
{
indexes.first = minValIdx + ((*(begin + minValIdx) < 0) ? 1 : 0);
indexes.second = currentIdx;
maxSum = Distance()(currSum, minSum);
}
}
return indexes;
}
}
}
#endif // MODULE_SEARCH_MAX_SUB_SEQUENCE_HXX
|
<gh_stars>10-100
//============================================================================
// Copyright 2009-2020 ECMWF.
// This software is licensed under the terms of the Apache Licence version 2.0
// which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
// In applying this licence, ECMWF does not waive the privileges and immunities
// granted to it by virtue of its status as an intergovernmental organisation
// nor does it submit to any jurisdiction.
//============================================================================
#ifndef TEXTFORMAT_HPP
#define TEXTFORMAT_HPP
#include <QColor>
#include <QList>
#include <QString>
class QAction;
namespace Viewer
{
QString formatShortCut(QString);
QString formatShortCut(QAction*);
void addShortCutToToolTip(QList<QAction*>);
QString formatBoldText(QString,QColor);
QString formatItalicText(QString,QColor);
QString formatText(QString,QColor);
QString formatTableThText(QString txt,QColor col);
QString formatTableTrBg(QString txt,QColor col);
QString formatTableTrText(QString txt);
QString formatTableTdText(QString txt,QColor col);
QString formatTableTdText(QString txt);
QString formatTableTdBg(QString txt,QColor col);
QString formatTableRow(QString col1Text,QString col2Text,QColor bg,QColor fg, bool boldCol1);
QString formatTableRow(QString col1Text,QString col2Text, bool boldCol1);
} //namespace Viewer
#endif // TEXTFORMAT_HPP
|
<filename>src/layer.js<gh_stars>1-10
//TODO: Might need a 'chart' object to pass settings/functionality to layer events
//TODO: use es6 class syntax for Layer
var Layer = function() {
//this._base = base; // base is currently being based Layer.draw call in order to put layers on top of layers, among other conveniences
this._handlers = {};
};
// name can be 'eventName' or 'eventName.namespace' (use namespaceing to allow users to remove specific handlers in the future
Layer.prototype.on = function(name, handler) {
var splitName = name.split('.'),
eventName = splitName[0],
namespace = splitName[1];
if (!this._handlers[eventName]) this._handlers[eventName] = [];
this._handlers[eventName].push({
namespace: namespace,
callback: handler
});
return this;
};
//remove and return all handlers with the same eventName and namespace ('eventName.namespace')
Layer.prototype.off = function(name) {
var splitName = name.split('.'),
eventName = splitName[0],
namespace = splitName[1],
handlers = [];
if (!this._handlers[eventName] || !this._handlers[eventName].length) return null;
for (var i in this._handlers[eventName]) {
if (this._handlers[eventName][i].namespace !== namespace) {
handlers.push(this._handlers[eventName][i]);
this._handlers.splice(i, 1);
}
}
return handlers.length ? handlers : null;
};
Layer.prototype.draw = function(base, model, instance, data) {
this._base = base;
var bound, entering, events, event, selection, handlers;
//TODO: think about how data is passed in (possibly multiple ways?)
bound = this.dataBind.call(this._base, model, instance, data);
entering = bound.enter();
events = [
{
name: 'update',
selection: bound
},
{
name: 'enter',
selection: entering,
method: this.insert //TODO: currently doesn't get model/instance, shouldn't need it, but might
},
{
name: 'merge',
selection: bound
},
{
name: 'exit',
selection: bound,
method: bound.exit
}
];
for (var i in events) {
event = events[i];
selection = event.selection;
if (typeof event.method === 'function')
selection = event.method.call(selection);
if (selection.empty()) continue; //nothing to work on
//TODO: Decide if handlers need both 'model' and 'instance' objects passed in
handlers = this._handlers[event.name];
if (handlers && handlers.length) {
for (var j in handlers) {
handlers[j].callback.call(selection, model, instance);
}
}
handlers = this._handlers[event.name + ':transition'];
if (handlers && handlers.length) {
selection = selection.transition();
for (var j in handlers) {
handlers[j].callback.call(selection, model, instance);
}
}
}
return bound; // bound is returned to allow stacking layers
};
//TODO: Consider making a way to generate a layer without a base defined initially
// *The base should only be required when you draw
d3.selection.prototype.mlayer = function(options) {
var layer = new Layer(this),
eventName;
//TODO: figure out if user should be able to override dataBind and insert after layer is defined
layer.dataBind = options.dataBind;
layer.insert = options.insert;
if ('events' in options) {
for (eventName in options.events) {
layer.on(eventName, options.events[eventName]);
}
}
this.on = function() { return layer.on.apply(layer, arguments); };
this.off = function() { return layer.off.apply(layer, arguments); };
this.draw = function() { return layer.draw.apply(layer, arguments); };
return this;
};
//TODO: Consider making a way to remove a layer (returning the layer)
// and make it possible to bind a premade layer
// (ie. after removing it from somewhere else)
if (mc)
mc.layer = function(options) {
var layer = new Layer(),
eventName;
//TODO: figure out if user should be able to override dataBind and insert after layer is defined
layer.dataBind = options.dataBind;
layer.insert = options.insert;
if ('events' in options) {
for (eventName in options.events) {
layer.on(eventName, options.events[eventName]);
}
}
return layer;
};
|
let car = document.querySelector("#car");
let car2 = document.querySelector("#car2");
let count = 0;
let count2 = 0;
let space = document.querySelector("#space");
space.addEventListener('keydown', logKey)
function logKey(e) {
if (e.code == "ArrowLeft") {
if (count >= 50) {
count -= 50;
car.style = "margin-left:" + count + "px";
}
} else if (e.code == "ArrowRight") {
if (count>1220) {
count=1220;
}else{
count += 50;
car.style = "margin-left:" + count + "px";
}
}
}
function start() {
count2 = Math.floor(Math.random() * 100);
while (count2 < 1280) {
wait(2);
count2 += 10;
console.log(count2);
}
}
function wait(i) {
setTimeout(function() {
car2.style = "margin-left:" + count2 + "px";
// Add tasks to do
}, 2000 );
} |
#!/bin/bash
#
# Copyright IBM Corp All Rights Reserved
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will orchestrate a sample end-to-end execution of the Hyperledger
# Fabric network.
#
# The end-to-end verification provisions a sample Fabric network consisting of
# two organizations, each maintaining two peers, and a “solo” ordering service.
#
# This verification makes use of two fundamental tools, which are necessary to
# create a functioning transactional network with digital signature validation
# and access control:
#
# * cryptogen - generates the x509 certificates used to identify and
# authenticate the various components in the network.
# * configtxgen - generates the requisite configuration artifacts for orderer
# bootstrap and channel creation.
#
# Each tool consumes a configuration yaml file, within which we specify the topology
# of our network (cryptogen) and the location of our certificates for various
# configuration operations (configtxgen). Once the tools have been successfully run,
# we are able to launch our network. More detail on the tools and the structure of
# the network will be provided later in this document. For now, let's get going...
# prepending $PWD/../bin to PATH to ensure we are picking up the correct binaries
# this may be commented out to resolve installed version of tools if desired
export PATH=${PWD}/../bin:${PWD}:$PATH
export FABRIC_CFG_PATH=${PWD}
export VERBOSE=false
# Print the usage message
function printHelp() {
echo "Usage: "
echo " byfn.sh <mode> [-c <channel name>] [-t <timeout>] [-d <delay>] [-f <docker-compose-file>] [-s <dbtype>] [-l <language>] [-o <consensus-type>] [-i <imagetag>] [-a] [-n] [-v]"
echo " <mode> - one of 'up', 'down', 'restart', 'generate' or 'upgrade'"
echo " - 'up' - bring up the network with docker-compose up"
echo " - 'down' - clear the network with docker-compose down"
echo " - 'restart' - restart the network"
echo " - 'generate' - generate required certificates and genesis block"
echo " - 'upgrade' - upgrade the network from version 1.3.x to 1.4.0"
echo " -c <channel name> - channel name to use (defaults to \"mychannel\")"
echo " -t <timeout> - CLI timeout duration in seconds (defaults to 10)"
echo " -d <delay> - delay duration in seconds (defaults to 3)"
echo " -f <docker-compose-file> - specify which docker-compose file use (defaults to docker-compose-cli.yaml)"
echo " -s <dbtype> - the database backend to use: goleveldb (default) or couchdb"
echo " -l <language> - the chaincode language: golang (default) or node"
echo " -o <consensus-type> - the consensus-type of the ordering service: solo (default), kafka, or etcdraft"
echo " -i <imagetag> - the tag to be used to launch the network (defaults to \"latest\")"
echo " -a - launch certificate authorities (no certificate authorities are launched by default)"
echo " -n - do not deploy chaincode (abstore chaincode is deployed by default)"
echo " -v - verbose mode"
echo " byfn.sh -h (print this message)"
echo
echo "Typically, one would first generate the required certificates and "
echo "genesis block, then bring up the network. e.g.:"
echo
echo " byfn.sh generate -c mychannel"
echo " byfn.sh up -c mychannel -s couchdb"
echo " byfn.sh up -c mychannel -s couchdb -i 1.4.0"
echo " byfn.sh up -l node"
echo " byfn.sh down -c mychannel"
echo " byfn.sh upgrade -c mychannel"
echo
echo "Taking all defaults:"
echo " byfn.sh generate"
echo " byfn.sh up"
echo " byfn.sh down"
}
# Ask user for confirmation to proceed
function askProceed() {
read -p "Continue? [Y/n] " ans
case "$ans" in
y | Y | "")
echo "proceeding ..."
;;
n | N)
echo "exiting..."
exit 1
;;
*)
echo "invalid response"
askProceed
;;
esac
}
# Obtain CONTAINER_IDS and remove them
# TODO Might want to make this optional - could clear other containers
function clearContainers() {
CONTAINER_IDS=$(docker ps -a | awk '($2 ~ /dev-peer.certkeeper.*/) {print $1}')
if [ -z "$CONTAINER_IDS" -o "$CONTAINER_IDS" == " " ]; then
echo "---- No containers available for deletion ----"
else
docker rm -f $CONTAINER_IDS
fi
}
# Delete any images that were generated as a part of this setup
# specifically the following images are often left behind:
# TODO list generated image naming patterns
function removeUnwantedImages() {
DOCKER_IMAGE_IDS=$(docker images | awk '($1 ~ /dev-peer.*.certkeep.*/) {print $3}')
if [ -z "$DOCKER_IMAGE_IDS" -o "$DOCKER_IMAGE_IDS" == " " ]; then
echo "---- No images available for deletion ----"
else
docker rmi -f $DOCKER_IMAGE_IDS
fi
}
# Versions of fabric known not to work with this release of first-network
BLACKLISTED_VERSIONS="^1\.0\. ^1\.1\.0-preview ^1\.1\.0-alpha"
# Do some basic sanity checking to make sure that the appropriate versions of fabric
# binaries/images are available. In the future, additional checking for the presence
# of go or other items could be added.
function checkPrereqs() {
# Note, we check configtxlator externally because it does not require a config file, and peer in the
# docker image because of FAB-8551 that makes configtxlator return 'development version' in docker
LOCAL_VERSION=$(configtxlator version | sed -ne 's/ Version: //p')
DOCKER_IMAGE_VERSION=$(docker run --rm hyperledger/fabric-tools:$IMAGETAG peer version | sed -ne 's/ Version: //p' | head -1)
echo "LOCAL_VERSION=$LOCAL_VERSION"
echo "DOCKER_IMAGE_VERSION=$DOCKER_IMAGE_VERSION"
if [ "$LOCAL_VERSION" != "$DOCKER_IMAGE_VERSION" ]; then
echo "=================== WARNING ==================="
echo " Local fabric binaries and docker images are "
echo " out of sync. This may cause problems. "
echo "==============================================="
fi
for UNSUPPORTED_VERSION in $BLACKLISTED_VERSIONS; do
echo "$LOCAL_VERSION" | grep -q $UNSUPPORTED_VERSION
if [ $? -eq 0 ]; then
echo "ERROR! Local Fabric binary version of $LOCAL_VERSION does not match this newer version of BYFN and is unsupported. Either move to a later version of Fabric or checkout an earlier version of fabric-samples."
exit 1
fi
echo "$DOCKER_IMAGE_VERSION" | grep -q $UNSUPPORTED_VERSION
if [ $? -eq 0 ]; then
echo "ERROR! Fabric Docker image version of $DOCKER_IMAGE_VERSION does not match this newer version of BYFN and is unsupported. Either move to a later version of Fabric or checkout an earlier version of fabric-samples."
exit 1
fi
done
}
# Generate the needed certificates, the genesis block and start the network.
function networkUp() {
checkPrereqs
# generate artifacts if they don't exist
if [ ! -d "crypto-config" ]; then
generateCerts
replacePrivateKey
generateChannelArtifacts
fi
COMPOSE_FILES="-f ${COMPOSE_FILE}"
if [ "${CERTIFICATE_AUTHORITIES}" == "true" ]; then
COMPOSE_FILES="${COMPOSE_FILES} -f ${COMPOSE_FILE_CA}"
export BYFN_CA1_PRIVATE_KEY=$(cd crypto-config/peerOrganizations/org1.example.com/ca && ls *_sk)
export BYFN_CA2_PRIVATE_KEY=$(cd crypto-config/peerOrganizations/org2.example.com/ca && ls *_sk)
fi
if [ "${CONSENSUS_TYPE}" == "kafka" ]; then
COMPOSE_FILES="${COMPOSE_FILES} -f ${COMPOSE_FILE_KAFKA}"
elif [ "${CONSENSUS_TYPE}" == "etcdraft" ]; then
COMPOSE_FILES="${COMPOSE_FILES} -f ${COMPOSE_FILE_RAFT2}"
fi
if [ "${IF_COUCHDB}" == "couchdb" ]; then
COMPOSE_FILES="${COMPOSE_FILES} -f ${COMPOSE_FILE_COUCH}"
fi
IMAGE_TAG=$IMAGETAG docker-compose ${COMPOSE_FILES} up -d 2>&1
docker ps -a
if [ $? -ne 0 ]; then
echo "ERROR !!!! Unable to start network"
exit 1
fi
if [ "$CONSENSUS_TYPE" == "kafka" ]; then
sleep 1
echo "Sleeping 10s to allow $CONSENSUS_TYPE cluster to complete booting"
sleep 9
fi
if [ "$CONSENSUS_TYPE" == "etcdraft" ]; then
sleep 1
echo "Sleeping 15s to allow $CONSENSUS_TYPE cluster to complete booting"
sleep 14
fi
# now run the end to end script
docker exec cli scripts/script.sh $CHANNEL_NAME $CLI_DELAY $LANGUAGE $CLI_TIMEOUT $VERBOSE $NO_CHAINCODE
if [ $? -ne 0 ]; then
echo "ERROR !!!! Test failed"
exit 1
fi
}
# Upgrade the network components which are at version 1.3.x to 1.4.x
# Stop the orderer and peers, backup the ledger for orderer and peers, cleanup chaincode containers and images
# and relaunch the orderer and peers with latest tag
function upgradeNetwork() {
if [[ "$IMAGETAG" == *"1.4"* ]] || [[ $IMAGETAG == "latest" ]]; then
docker inspect -f '{{.Config.Volumes}}' orderer.example.com | grep -q '/var/hyperledger/production/orderer'
if [ $? -ne 0 ]; then
echo "ERROR !!!! This network does not appear to start with fabric-samples >= v1.3.x?"
exit 1
fi
LEDGERS_BACKUP=./ledgers-backup
# create ledger-backup directory
mkdir -p $LEDGERS_BACKUP
export IMAGE_TAG=$IMAGETAG
COMPOSE_FILES="-f ${COMPOSE_FILE}"
if [ "${CERTIFICATE_AUTHORITIES}" == "true" ]; then
COMPOSE_FILES="${COMPOSE_FILES} -f ${COMPOSE_FILE_CA}"
export BYFN_CA1_PRIVATE_KEY=$(cd crypto-config/peerOrganizations/org1.example.com/ca && ls *_sk)
export BYFN_CA2_PRIVATE_KEY=$(cd crypto-config/peerOrganizations/org2.example.com/ca && ls *_sk)
fi
if [ "${CONSENSUS_TYPE}" == "kafka" ]; then
COMPOSE_FILES="${COMPOSE_FILES} -f ${COMPOSE_FILE_KAFKA}"
elif [ "${CONSENSUS_TYPE}" == "etcdraft" ]; then
COMPOSE_FILES="${COMPOSE_FILES} -f ${COMPOSE_FILE_RAFT2}"
fi
if [ "${IF_COUCHDB}" == "couchdb" ]; then
COMPOSE_FILES="${COMPOSE_FILES} -f ${COMPOSE_FILE_COUCH}"
fi
# removing the cli container
docker-compose $COMPOSE_FILES stop cli
docker-compose $COMPOSE_FILES up -d --no-deps cli
echo "Upgrading orderer"
docker-compose $COMPOSE_FILES stop orderer.example.com
docker cp -a orderer.example.com:/var/hyperledger/production/orderer $LEDGERS_BACKUP/orderer.example.com
docker-compose $COMPOSE_FILES up -d --no-deps orderer.example.com
for PEER in peer0.org1.example.com peer1.org1.example.com peer0.org2.example.com peer1.org2.example.com; do
echo "Upgrading peer $PEER"
# Stop the peer and backup its ledger
docker-compose $COMPOSE_FILES stop $PEER
docker cp -a $PEER:/var/hyperledger/production $LEDGERS_BACKUP/$PEER/
# Remove any old containers and images for this peer
CC_CONTAINERS=$(docker ps | grep dev-$PEER | awk '{print $1}')
if [ -n "$CC_CONTAINERS" ]; then
docker rm -f $CC_CONTAINERS
fi
CC_IMAGES=$(docker images | grep dev-$PEER | awk '{print $1}')
if [ -n "$CC_IMAGES" ]; then
docker rmi -f $CC_IMAGES
fi
# Start the peer again
docker-compose $COMPOSE_FILES up -d --no-deps $PEER
done
docker exec cli sh -c "SYS_CHANNEL=$CH_NAME && scripts/upgrade_to_v14.sh $CHANNEL_NAME $CLI_DELAY $LANGUAGE $CLI_TIMEOUT $VERBOSE"
if [ $? -ne 0 ]; then
echo "ERROR !!!! Test failed"
exit 1
fi
else
echo "ERROR !!!! Pass the v1.4.x image tag"
fi
}
# Tear down running network
function networkDown() {
# stop org3 containers also in addition to org1 and org2, in case we were running sample to add org3
# stop kafka and zookeeper containers in case we're running with kafka consensus-type
docker-compose -f $COMPOSE_FILE -f $COMPOSE_FILE_COUCH -f $COMPOSE_FILE_KAFKA -f $COMPOSE_FILE_RAFT2 -f $COMPOSE_FILE_CA -f $COMPOSE_FILE_ORG3 down --volumes --remove-orphans
# Don't remove the generated artifacts -- note, the ledgers are always removed
if [ "$MODE" != "restart" ]; then
# Bring down the network, deleting the volumes
#Delete any ledger backups
docker run -v $PWD:/tmp/first-network --rm hyperledger/fabric-tools:$IMAGETAG rm -Rf /tmp/first-network/ledgers-backup
#Cleanup the chaincode containers
clearContainers
#Cleanup images
removeUnwantedImages
# remove orderer block and other channel configuration transactions and certs
rm -rf channel-artifacts/*.block channel-artifacts/*.tx crypto-config ./org3-artifacts/crypto-config/ channel-artifacts/org3.json
# remove the docker-compose yaml file that was customized to the example
rm -f docker-compose-e2e.yaml
fi
}
# Using docker-compose-e2e-template.yaml, replace constants with private key file names
# generated by the cryptogen tool and output a docker-compose.yaml specific to this
# configuration
function replacePrivateKey() {
# sed on MacOSX does not support -i flag with a null extension. We will use
# 't' for our back-up's extension and delete it at the end of the function
ARCH=$(uname -s | grep Darwin)
if [ "$ARCH" == "Darwin" ]; then
OPTS="-it"
else
OPTS="-i"
fi
# Copy the template to the file that will be modified to add the private key
cp docker-compose-e2e-template.yaml docker-compose-e2e.yaml
# The next steps will replace the template's contents with the
# actual values of the private key file names for the two CAs.
CURRENT_DIR=$PWD
cd crypto-config/peerOrganizations/org1.example.com/ca/
PRIV_KEY=$(ls *_sk)
cd "$CURRENT_DIR"
sed $OPTS "s/CA1_PRIVATE_KEY/${PRIV_KEY}/g" docker-compose-e2e.yaml
cd crypto-config/peerOrganizations/org2.example.com/ca/
PRIV_KEY=$(ls *_sk)
cd "$CURRENT_DIR"
sed $OPTS "s/CA2_PRIVATE_KEY/${PRIV_KEY}/g" docker-compose-e2e.yaml
# If MacOSX, remove the temporary backup of the docker-compose file
if [ "$ARCH" == "Darwin" ]; then
rm docker-compose-e2e.yamlt
fi
}
# We will use the cryptogen tool to generate the cryptographic material (x509 certs)
# for our various network entities. The certificates are based on a standard PKI
# implementation where validation is achieved by reaching a common trust anchor.
#
# Cryptogen consumes a file - ``crypto-config.yaml`` - that contains the network
# topology and allows us to generate a library of certificates for both the
# Organizations and the components that belong to those Organizations. Each
# Organization is provisioned a unique root certificate (``ca-cert``), that binds
# specific components (peers and orderers) to that Org. Transactions and communications
# within Fabric are signed by an entity's private key (``keystore``), and then verified
# by means of a public key (``signcerts``). You will notice a "count" variable within
# this file. We use this to specify the number of peers per Organization; in our
# case it's two peers per Org. The rest of this template is extremely
# self-explanatory.
#
# After we run the tool, the certs will be parked in a folder titled ``crypto-config``.
# Generates Org certs using cryptogen tool
function generateCerts() {
which cryptogen
if [ "$?" -ne 0 ]; then
echo "cryptogen tool not found. exiting"
exit 1
fi
echo
echo "##########################################################"
echo "##### Generate certificates using cryptogen tool #########"
echo "##########################################################"
if [ -d "crypto-config" ]; then
rm -Rf crypto-config
fi
set -x
cryptogen generate --config=./crypto-config.yaml
res=$?
set +x
if [ $res -ne 0 ]; then
echo "Failed to generate certificates..."
exit 1
fi
echo
echo "Generate CCP files for Org1 and Org2"
./ccp-generate.sh
}
# The `configtxgen tool is used to create four artifacts: orderer **bootstrap
# block**, fabric **channel configuration transaction**, and two **anchor
# peer transactions** - one for each Peer Org.
#
# The orderer block is the genesis block for the ordering service, and the
# channel transaction file is broadcast to the orderer at channel creation
# time. The anchor peer transactions, as the name might suggest, specify each
# Org's anchor peer on this channel.
#
# Configtxgen consumes a file - ``configtx.yaml`` - that contains the definitions
# for the sample network. There are three members - one Orderer Org (``OrdererOrg``)
# and two Peer Orgs (``Org1`` & ``Org2``) each managing and maintaining two peer nodes.
# This file also specifies a consortium - ``SampleConsortium`` - consisting of our
# two Peer Orgs. Pay specific attention to the "Profiles" section at the top of
# this file. You will notice that we have two unique headers. One for the orderer genesis
# block - ``TwoOrgsOrdererGenesis`` - and one for our channel - ``TwoOrgsChannel``.
# These headers are important, as we will pass them in as arguments when we create
# our artifacts. This file also contains two additional specifications that are worth
# noting. Firstly, we specify the anchor peers for each Peer Org
# (``peer0.org1.example.com`` & ``peer0.org2.example.com``). Secondly, we point to
# the location of the MSP directory for each member, in turn allowing us to store the
# root certificates for each Org in the orderer genesis block. This is a critical
# concept. Now any network entity communicating with the ordering service can have
# its digital signature verified.
#
# This function will generate the crypto material and our four configuration
# artifacts, and subsequently output these files into the ``channel-artifacts``
# folder.
#
# If you receive the following warning, it can be safely ignored:
#
# [bccsp] GetDefault -> WARN 001 Before using BCCSP, please call InitFactories(). Falling back to bootBCCSP.
#
# You can ignore the logs regarding intermediate certs, we are not using them in
# this crypto implementation.
# Generate orderer genesis block, channel configuration transaction and
# anchor peer update transactions
function generateChannelArtifacts() {
which configtxgen
if [ "$?" -ne 0 ]; then
echo "configtxgen tool not found. exiting"
exit 1
fi
echo "##########################################################"
echo "######### Generating Orderer Genesis block ##############"
echo "##########################################################"
# Note: For some unknown reason (at least for now) the block file can't be
# named orderer.genesis.block or the orderer will fail to launch!
echo "CONSENSUS_TYPE="$CONSENSUS_TYPE
set -x
if [ "$CONSENSUS_TYPE" == "solo" ]; then
configtxgen -profile TwoOrgsOrdererGenesis -channelID $SYS_CHANNEL -outputBlock ./channel-artifacts/genesis.block
elif [ "$CONSENSUS_TYPE" == "kafka" ]; then
configtxgen -profile SampleDevModeKafka -channelID $SYS_CHANNEL -outputBlock ./channel-artifacts/genesis.block
elif [ "$CONSENSUS_TYPE" == "etcdraft" ]; then
configtxgen -profile SampleMultiNodeEtcdRaft -channelID $SYS_CHANNEL -outputBlock ./channel-artifacts/genesis.block
else
set +x
echo "unrecognized CONSESUS_TYPE='$CONSENSUS_TYPE'. exiting"
exit 1
fi
res=$?
set +x
if [ $res -ne 0 ]; then
echo "Failed to generate orderer genesis block..."
exit 1
fi
echo
echo "#################################################################"
echo "### Generating channel configuration transaction 'channel.tx' ###"
echo "#################################################################"
set -x
configtxgen -profile TwoOrgsChannel -outputCreateChannelTx ./channel-artifacts/channel.tx -channelID $CHANNEL_NAME
res=$?
set +x
if [ $res -ne 0 ]; then
echo "Failed to generate channel configuration transaction..."
exit 1
fi
echo
echo "#################################################################"
echo "####### Generating anchor peer update for Org1MSP ##########"
echo "#################################################################"
set -x
configtxgen -profile TwoOrgsChannel -outputAnchorPeersUpdate ./channel-artifacts/Org1MSPanchors.tx -channelID $CHANNEL_NAME -asOrg Org1MSP
res=$?
set +x
if [ $res -ne 0 ]; then
echo "Failed to generate anchor peer update for Org1MSP..."
exit 1
fi
echo
echo "#################################################################"
echo "####### Generating anchor peer update for Org2MSP ##########"
echo "#################################################################"
set -x
configtxgen -profile TwoOrgsChannel -outputAnchorPeersUpdate \
./channel-artifacts/Org2MSPanchors.tx -channelID $CHANNEL_NAME -asOrg Org2MSP
res=$?
set +x
if [ $res -ne 0 ]; then
echo "Failed to generate anchor peer update for Org2MSP..."
exit 1
fi
echo
}
# Obtain the OS and Architecture string that will be used to select the correct
# native binaries for your platform, e.g., darwin-amd64 or linux-amd64
OS_ARCH=$(echo "$(uname -s | tr '[:upper:]' '[:lower:]' | sed 's/mingw64_nt.*/windows/')-$(uname -m | sed 's/x86_64/amd64/g')" | awk '{print tolower($0)}')
# timeout duration - the duration the CLI should wait for a response from
# another container before giving up
CLI_TIMEOUT=10
# default for delay between commands
CLI_DELAY=3
# system channel name defaults to "byfn-sys-channel"
SYS_CHANNEL="byfn-sys-channel"
# channel name defaults to "mychannel"
CHANNEL_NAME="mychannel"
# use this as the default docker-compose yaml definition
COMPOSE_FILE=docker-compose-cli.yaml
#
COMPOSE_FILE_COUCH=docker-compose-couch.yaml
# org3 docker compose file
COMPOSE_FILE_ORG3=docker-compose-org3.yaml
# kafka and zookeeper compose file
COMPOSE_FILE_KAFKA=docker-compose-kafka.yaml
# two additional etcd/raft orderers
COMPOSE_FILE_RAFT2=docker-compose-etcdraft2.yaml
# certificate authorities compose file
COMPOSE_FILE_CA=docker-compose-ca.yaml
#
# use golang as the default language for chaincode
LANGUAGE=golang
# default image tag
IMAGETAG="latest"
# default consensus type
CONSENSUS_TYPE="solo"
# Parse commandline args
if [ "$1" = "-m" ]; then # supports old usage, muscle memory is powerful!
shift
fi
MODE=$1
shift
# Determine whether starting, stopping, restarting, generating or upgrading
if [ "$MODE" == "up" ]; then
EXPMODE="Starting"
elif [ "$MODE" == "down" ]; then
EXPMODE="Stopping"
elif [ "$MODE" == "restart" ]; then
EXPMODE="Restarting"
elif [ "$MODE" == "generate" ]; then
EXPMODE="Generating certs and genesis block"
elif [ "$MODE" == "upgrade" ]; then
EXPMODE="Upgrading the network"
else
printHelp
exit 1
fi
while getopts "h?c:t:d:f:s:l:i:o:anv" opt; do
case "$opt" in
h | \?)
printHelp
exit 0
;;
c)
CHANNEL_NAME=$OPTARG
;;
t)
CLI_TIMEOUT=$OPTARG
;;
d)
CLI_DELAY=$OPTARG
;;
f)
COMPOSE_FILE=$OPTARG
;;
s)
IF_COUCHDB=$OPTARG
;;
l)
LANGUAGE=$OPTARG
;;
i)
IMAGETAG=$(go env GOARCH)"-"$OPTARG
;;
o)
CONSENSUS_TYPE=$OPTARG
;;
a)
CERTIFICATE_AUTHORITIES=true
;;
n)
NO_CHAINCODE=true
;;
v)
VERBOSE=true
;;
esac
done
# Announce what was requested
if [ "${IF_COUCHDB}" == "couchdb" ]; then
echo
echo "${EXPMODE} for channel '${CHANNEL_NAME}' with CLI timeout of '${CLI_TIMEOUT}' seconds and CLI delay of '${CLI_DELAY}' seconds and using database '${IF_COUCHDB}'"
else
echo "${EXPMODE} for channel '${CHANNEL_NAME}' with CLI timeout of '${CLI_TIMEOUT}' seconds and CLI delay of '${CLI_DELAY}' seconds"
fi
# ask for confirmation to proceed
askProceed
#Create the network using docker compose
if [ "${MODE}" == "up" ]; then
networkUp
elif [ "${MODE}" == "down" ]; then ## Clear the network
networkDown
elif [ "${MODE}" == "generate" ]; then ## Generate Artifacts
generateCerts
replacePrivateKey
generateChannelArtifacts
elif [ "${MODE}" == "restart" ]; then ## Restart the network
networkDown
networkUp
elif [ "${MODE}" == "upgrade" ]; then ## Upgrade the network from version 1.2.x to 1.3.x
upgradeNetwork
else
printHelp
exit 1
fi
|
import * as Hapi from 'hapi';
import * as Joi from 'joi';
import { AuthLoginController, AuthLogoutController, AuthSignUpController } from './controllers/auth';
import { UsersController, UsersMeController } from './controllers/users';
export default class Routes {
public static async init(server: Hapi.Server): Promise<void> {
server.route({ method: [ 'POST' ], path: '/login', handler: AuthLoginController.handler, config: {
validate: {
payload: Joi.object({
username: Joi.string().required(),
password: Joi.string().required(),
})
}
}});
server.route({ method: [ 'DELETE' ], path: '/logout', handler: AuthLogoutController.handler });
server.route({ method: [ 'POST' ], path: '/signup', handler: AuthSignUpController.handler, config: {
validate: {
payload: Joi.object({
username: Joi.string().required(),
password: <PASSWORD>(),
fullname: Joi.string().required(),
})
}
}});
server.route({ method: [ 'GET' ], path: '/users/me', handler: UsersMeController.handler, config: { auth: { mode: 'required' } } });
server.route({ method: [ 'PUT' ], path: '/users/me', handler: UsersMeController.handler, config: {
auth: { mode: 'required' },
validate: {
payload: Joi.object({
fullname: Joi.string(),
active: Joi.boolean(),
password: Joi.string(),
})
}
}});
server.route({ method: [ 'GET' ], path: '/users/{id}', handler: UsersController.handler, config: {
validate: {
params: {
id: Joi.number().required()
}
}
}});
}
}
|
<filename>pkg/generate/enum_def_test.go
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package generate_test
import (
"sort"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/aws-controllers-k8s/code-generator/pkg/model"
"github.com/aws-controllers-k8s/code-generator/pkg/testutil"
)
func sortedOriginalValues(vals []model.EnumValue) []string {
res := []string{}
for _, val := range vals {
res = append(res, val.Original)
}
sort.Strings(res)
return res
}
func sortedCleanValues(vals []model.EnumValue) []string {
res := []string{}
for _, val := range vals {
res = append(res, val.Clean)
}
sort.Strings(res)
return res
}
func getEnumDefByName(name string, enumDefs []*model.EnumDef) *model.EnumDef {
for _, e := range enumDefs {
if e.Names.Original == name {
return e
}
}
return nil
}
func TestEnumDefs(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
tests := []struct {
name string
service string
expNameCamel string
expNameCamelLower string
expValuesOrig []string
expValuesClean []string
}{
{
"original same as clean value",
"ecr",
"ScanStatus",
"scanStatus",
[]string{
"COMPLETE",
"FAILED",
"IN_PROGRESS",
},
[]string{
"COMPLETE",
"FAILED",
"IN_PROGRESS",
},
},
{
"value strings need cleaning for Go output",
"ec2",
"InstanceLifecycle",
"instanceLifecycle",
[]string{
"on-demand",
"spot",
},
[]string{
"on_demand",
"spot",
},
},
}
for _, test := range tests {
g := testutil.NewGeneratorForService(t, test.service)
edefs, err := g.GetEnumDefs()
require.Nil(err)
edef := getEnumDefByName(test.expNameCamel, edefs)
require.NotNil(edef)
assert.Equal(test.expNameCamelLower, edef.Names.CamelLower)
assert.Equal(len(test.expValuesOrig), len(edef.Values))
assert.Equal(test.expValuesOrig, sortedOriginalValues(edef.Values))
assert.Equal(test.expValuesClean, sortedCleanValues(edef.Values))
}
}
|
#!/bin/bash
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
TOOLS=$( realpath $ROOT/../tools )
DATA=$( realpath $ROOT/../data )
GPUS="0"
MODEL=model.fairseq
mkdir -p $MODEL
test -f $MODEL/train-data/train.en-de.de.bin || python3.6 $TOOLS/fairseq/preprocess.py \
--source-lang en --target-lang de \
--trainpref $DATA/corpus.bpe --validpref $DATA/valid.bpe \
--destdir $MODEL/train-data
> fairseq.data.log 2>&1
CUDA_VISIBLE_DEVICES=0 python3.6 $TOOLS/fairseq/train.py \
$MODEL/train-data \
--save-dir $MODEL \
--arch lstm_luong_wmt_en_de \
\
\
\
--batch-size 256 --optimizer adam \
--log-interval 100 --max-update 1100 --seed 1111 \
--device-id $GPUS \
> fairseq.log 2>&1
bash $TOOLS/extract-wps-fairseq.sh < fairseq.log > fairseq.wps
|
package main
import (
"github.com/yunfeiyang1916/micro-go-course/go-rpc/service"
"log"
"net"
"net/http"
"net/rpc"
)
func main() {
stringService := &service.StringService{}
rpc.Register(stringService)
rpc.HandleHTTP()
l, err := net.Listen("tcp", "127.0.0.1:1234")
if err != nil {
log.Fatal("listen error:", err)
}
http.Serve(l, nil)
}
|
#!/bin/sh
#
# You can set JAVA_HOME to point ot JDK 1.3
# or shell will try to deterine java location using which
#
#JAVA_HOME=/l/jdk1.3
#
# No need to modify anything after this line.
# --------------------------------------------------------------------
if [ -z "$JAVA_HOME" ] ; then
JAVA=`/usr/bin/which java`
if [ -z "$JAVA" ] ; then
echo "Cannot find JAVA. Please set your PATH."
exit 1
fi
JAVA_BIN=`dirname $JAVA`
JAVA_HOME=$JAVA_BIN/..
else
JAVA=$JAVA_HOME/bin/java
fi
#echo "JAVA= $JAVA"
#if [ ! "`$JAVA -version 2>&1 | grep "\ 1\.3"`" ]; then
if [ ! "`$JAVA -version 2>&1 | egrep "\ 1\.[3456789].*"`" ]; then
echo Required 1.3 verion of JDK: can not use $JAVA
echo Current version is:
$JAVA -version
exit 1
fi
#POLICY=-Djava.security.policy=${TOP}/src/tests/java.policy
#JAVA_OPTS="-Djavax.net.debug=ssl"
#JAVA_OPTS=-Debug=true
#JAVA_OPTS="$JAVA_OPTS -Djava.compiler=NONE"
JAVA_OPTS="-Dorg.apache.commons.logging.Log=org.apache.commons.logging.impl.SimpleLog -Dorg.apache.commons.logging.simplelog.defaultlog=error"
#echo set required LOCALCLASSPATH
LOCALCLASSPATH=`/bin/sh $PWD/classpath.sh run`
MY_JAVA="$JAVA $JAVA_OPTS $JAVA_DEBUG_OPTS -cp $LOCALCLASSPATH"
if [ -z "$1" ] ; then
echo Please specify test name.
exit 1
fi
NAME=$1
shift
if [ "$NAME" = "registry" ] ; then
CMD="$MY_JAVA soaprmi.registry.RegistryImpl $*"
elif [ "$NAME" = "secure_registry" ] ; then
CMD="$MY_JAVA soaprmi.registry.SecureRegistryImpl $*"
elif [ "$NAME" = "hello_server" ] ; then
CMD="$MY_JAVA hello.HelloServer $*"
elif [ "$NAME" = "hello_client" ] ; then
CMD="$MY_JAVA hello.HelloClient $*"
elif [ "$NAME" = "junit" ] ; then
CMD="$MY_JAVA AllTests $*"
else
CMD="$MY_JAVA $NAME $*"
fi
echo $CMD
$CMD
|
def median(input_array):
# sort the array in ascending order
input_array.sort()
# determine the length of the array
array_length = len(input_array)
# return the median depending on if the array length is odd or even
if array_length % 2 != 0:
return input_array[int((array_length/2))]
else:
return (input_array[int((array_length/2))] + input_array[int((array_length/2)-1)])/2 |
#!/bin/bash
#docker run -d -p 8080:8080 -p 50000:50000 -v /opt/docker/jenkins:/var/jenkins_home -v /usr/bin/docker:/usr/bin/docker -v /var/run/docker.sock:/run/docker.sock wbrune/jenkins
docker run -d -p 8080:8080 -p 50000:50000 -v /opt/docker/test-jenkins:/var/jenkins_home -v /usr/bin/docker:/usr/bin/docker -v /var/run/docker.sock:/run/docker.sock registry.int.nsc.ag:5000/jenkins
#docker run -d -p 8080:8080 -p 50000:50000 -v /opt/docker/jenkins:/var/jenkins_home -v /usr/bin/docker:/usr/bin/docker -v /var/run/docker.sock:/run/docker.sock wbrune/jenkins
|
def compute_factorial(num):
factorial = 1
for i in range(1, num + 1):
factorial *= i
return factorial
result = compute_factorial(5)
print(result) |
public void MyMethod() {
// Do something
}
Thread thread = new Thread(MyMethod);
thread.Start(); |
<reponame>zhouzhigang076/everydayfresh<filename>day_fresh/df_goods/admin.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
import models
# Register your models here.
class GoodsInfoLine(admin.TabularInline):
model = models.GoodsInfo
class TypeInfoAdmin(admin.ModelAdmin):
inlines = [GoodsInfoLine]
admin.site.register(models.TypeInfo,TypeInfoAdmin)
admin.site.register(models.GoodsInfo)
|
<reponame>mylesnoton/wake-on-lan<gh_stars>0
package main
import (
"errors"
"net"
"regexp"
"strconv"
"strings"
"github.com/gookit/color"
)
// New wake on lan request
func New(macAddr string, bCast string) {
macAddrArray, err := cleanAndConvertMacAddr(macAddr)
handleErr(err)
magicPacket, err := buildMagicPacket(macAddrArray)
handleErr(err)
send(bCast, magicPacket)
}
func cleanAndConvertMacAddr(macAddr string) ([]string, error) {
matched, err := regexp.MatchString(`([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})`, macAddr)
handleErr(err)
if matched != true {
return nil, errors.New("MAC Address is not valid")
}
macAddr = strings.ToLower(macAddr)
macAddr = strings.ReplaceAll(macAddr, ":", "-")
macAddrSplit := strings.Split(macAddr, "-")
return macAddrSplit, nil
}
func send(bCast string, magicPacket []byte) {
bCastAddr, err := net.ResolveUDPAddr("udp4", bCast+":9")
handleErr(err)
conn, err := net.DialUDP("udp4", nil, bCastAddr)
handleErr(err)
defer conn.Close()
_, err = conn.Write(magicPacket)
handleErr(err)
color.Green.Println("Broadcast " + strconv.Itoa(len(magicPacket)) + " bytes")
}
func handleErr(err error) {
if err != nil {
color.Red.Println(err.Error())
return
}
}
|
<filename>src/templates/book-page.js<gh_stars>0
import React from 'react'
import PropTypes from 'prop-types'
import styled from 'styled-components'
import { graphql } from 'gatsby'
import ReactMarkdown from 'react-markdown'
import Layout from '../layouts/default'
import { HTMLContent } from '../components/Content'
import BookPageContent from '../components/Books/BookPage'
import BodyContainer from '../components/BodyContainer'
const Container = styled.div``
const BookContainer = styled.div``
export const BookPageTemplate = ({ page }) => {
return (
<Container>
<BookContainer>
<BookPageContent post={page} />
</BookContainer>
<BodyContainer>
{page.bodyIsMarkdown ? (
<ReactMarkdown source={page.html} />
) : (
<HTMLContent content={page.html} />
)}
</BodyContainer>
</Container>
)
}
const BookPage = ({ data }) => {
const { markdownRemark: page } = data
return (
<Layout>
<BookPageTemplate page={{ ...page, bodyIsMarkdown: false }} />
</Layout>
)
}
BookPage.propTypes = {
data: PropTypes.object.isRequired,
}
export default BookPage
export const bookPageQuery = graphql`
query BookPageByID($id: String!) {
markdownRemark(id: { eq: $id }) {
id
html
frontmatter {
title
coverImage {
alt
image {
childImageSharp {
fluid(maxWidth: 526, quality: 92) {
...GatsbyImageSharpFluid
}
}
}
}
description
reviews {
reviewerName
message
}
linksToBuy {
label
linkImage {
alt
image {
childImageSharp {
fluid(maxWidth: 526, quality: 92) {
...GatsbyImageSharpFluid
}
}
}
}
linkURL
}
}
}
}
`
|
export class Attribute {
readonly value: string
readonly version: number
constructor(value: string, version: number) {
this.value = value
this.version = version
}
}
export interface FingerprintIntarface {
fingerprint(fingerprint: Map<string, Attribute>): Map<string, Attribute>
}
|
def anagram(str1, str2):
# get length of string
n1 = len(str1)
n2 = len(str2)
# if str1 and str2 have different length
# then they cannot be anagrams
if n1 != n2:
return 0
# sort the strings
str1 = sorted(str1)
str2 = sorted(str2)
# compare the sorted strings
for i in range(0, n1):
if str1[i] != str2[i]:
return 0
return 1
str1 = "geeksforgeeks"
str2 = "forgeeksgeeks"
if anagram(str1, str2):
print('The strings are anagrams.')
else:
print('The strings are not anagrams.') |
#!/bin/sh
#
# Copyright 2017 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the OpenSSL license (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
#
# ====================================================================
# Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
#
#
# OpenSSL external testing using the Python Cryptography module
#
set -e
O_EXE=`pwd`/$BLDTOP/apps
O_BINC=`pwd`/$BLDTOP/include
O_SINC=`pwd`/$SRCTOP/include
O_LIB=`pwd`/$BLDTOP
export PATH=$O_EXE:$PATH
export LD_LIBRARY_PATH=$O_LIB:$LD_LIBRARY_PATH
# Check/Set openssl version
OPENSSL_VERSION=`openssl version | cut -f 2 -d ' '`
echo "------------------------------------------------------------------"
echo "Testing OpenSSL using Python Cryptography:"
echo " CWD: $PWD"
echo " SRCTOP: $SRCTOP"
echo " BLDTOP: $BLDTOP"
echo " OpenSSL version: $OPENSSL_VERSION"
echo "------------------------------------------------------------------"
cd $SRCTOP
# Create a python virtual env and activate
rm -rf venv-pycrypto
virtualenv venv-pycrypto
. ./venv-pycrypto/bin/activate
cd pyca-cryptography
pip install .[test]
echo "------------------------------------------------------------------"
echo "Building cryptography"
echo "------------------------------------------------------------------"
python ./setup.py clean
CFLAGS="-I$O_BINC -I$O_SINC -L$O_LIB" python ./setup.py build
echo "------------------------------------------------------------------"
echo "Running tests"
echo "------------------------------------------------------------------"
CFLAGS="-I$O_BINC -I$O_SINC -L$O_LIB" python ./setup.py test
cd ../
deactivate
rm -rf venv-pycrypto
exit 0
|
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
Schema::create('rs_model_settings', function($table) {
$table->increments('id');
$table->integer('model_id')->unsigned();
$table->string('setting_name');
$table->text('setting_value');
// Define foreign key constraint
$table->foreign('model_id')->references('id')->on('models');
});
} |
def alert(data):
# Calculate the mean and standard deviation of the data
mean = sum(data) / len(data)
variance = sum([(x - mean) ** 2 for x in data]) / len(data)
std_dev = variance**0.5
# Calculate the Z-score for each data point
z_scores = [(x - mean) / std_dev for x in data]
# Generate an alert if any z-score is greater than 2 or less than -2
alert_message = "Unusual data point detected: "
for i, z in enumerate(z_scores):
if z > 2 or z < -2:
alert_message += f"Data point {i} with z-score {z}"
if alert_message != "Unusual data point detected: ":
print(alert_message)
data = [1,2,3,4,5,100]
alert(data) |
#!/bin/bash
# Reproduces data for figure 4
# Interference
PMDK=/home/aim/hjn/pmdk
clang++ -g0 -O3 -DNDEBUG=1 -march=native -std=c++17 interference.cpp -I${PMDK}/src/include/ ${PMDK}/src/nondebug/libpmem.a ${PMDK}/src/nondebug/libpmemlog.a -lpthread -lndctl -ldaxctl || exit -1
# Seqential ram
echo "" > results/interference_seq_ram.txt
./a.out 10 0 0 0 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
./a.out 10 1 0 0 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
./a.out 10 5 0 0 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
./a.out 10 10 0 0 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
./a.out 10 0 1 0 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
./a.out 10 0 5 0 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
./a.out 10 0 10 0 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
./a.out 10 0 0 1 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
./a.out 10 0 0 5 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
./a.out 10 0 0 10 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
./a.out 10 0 0 0 1 0 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
./a.out 10 0 0 0 5 0 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
./a.out 10 0 0 0 10 0 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
./a.out 10 0 0 0 0 1 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
./a.out 10 0 0 0 0 5 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
./a.out 10 0 0 0 0 10 /mnt/pmem0/renen | tee -a results/interference_seq_ram.txt
# Seqential nvm
echo "" > results/interference_seq_nvm.txt
./a.out 0 10 0 0 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
./a.out 1 10 0 0 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
./a.out 5 10 0 0 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
./a.out 10 10 0 0 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
./a.out 0 10 1 0 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
./a.out 0 10 5 0 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
./a.out 0 10 10 0 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
./a.out 0 10 0 1 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
./a.out 0 10 0 5 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
./a.out 0 10 0 10 0 0 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
./a.out 0 10 0 0 1 0 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
./a.out 0 10 0 0 5 0 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
./a.out 0 10 0 0 10 0 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
./a.out 0 10 0 0 0 1 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
./a.out 0 10 0 0 0 5 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
./a.out 0 10 0 0 0 10 /mnt/pmem0/renen | tee -a results/interference_seq_nvm.txt
# Random ram
echo "" > results/interference_rnd_ram.txt
./a.out 0 0 10 0 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
./a.out 1 0 10 0 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
./a.out 5 0 10 0 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
./a.out 10 0 10 0 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
./a.out 0 1 10 0 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
./a.out 0 5 10 0 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
./a.out 0 10 10 0 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
./a.out 0 0 10 1 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
./a.out 0 0 10 5 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
./a.out 0 0 10 10 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
./a.out 0 0 10 0 1 0 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
./a.out 0 0 10 0 5 0 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
./a.out 0 0 10 0 10 0 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
./a.out 0 0 10 0 0 1 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
./a.out 0 0 10 0 0 5 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
./a.out 0 0 10 0 0 10 /mnt/pmem0/renen | tee -a results/interference_rnd_ram.txt
# Random nvm
echo "" > results/interference_rnd_nvm.txt
./a.out 0 0 0 10 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
./a.out 1 0 0 10 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
./a.out 5 0 0 10 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
./a.out 10 0 0 10 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
./a.out 0 1 0 10 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
./a.out 0 5 0 10 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
./a.out 0 10 0 10 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
./a.out 0 0 1 10 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
./a.out 0 0 5 10 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
./a.out 0 0 10 10 0 0 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
./a.out 0 0 0 10 1 0 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
./a.out 0 0 0 10 5 0 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
./a.out 0 0 0 10 10 0 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
./a.out 0 0 0 10 0 1 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
./a.out 0 0 0 10 0 5 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
./a.out 0 0 0 10 0 10 /mnt/pmem0/renen | tee -a results/interference_rnd_nvm.txt
## Log nvm
#./a.out 0 0 0 0 10 0 /mnt/pmem0/renen
#./a.out 1 0 0 0 10 0 /mnt/pmem0/renen
#./a.out 5 0 0 0 10 0 /mnt/pmem0/renen
#./a.out 10 0 0 0 10 0 /mnt/pmem0/renen
#./a.out 0 1 0 0 10 0 /mnt/pmem0/renen
#./a.out 0 5 0 0 10 0 /mnt/pmem0/renen
#./a.out 0 10 0 0 10 0 /mnt/pmem0/renen
#./a.out 0 0 1 0 10 0 /mnt/pmem0/renen
#./a.out 0 0 5 0 10 0 /mnt/pmem0/renen
#./a.out 0 0 10 0 10 0 /mnt/pmem0/renen
#./a.out 0 0 0 1 10 0 /mnt/pmem0/renen
#./a.out 0 0 0 5 10 0 /mnt/pmem0/renen
#./a.out 0 0 0 10 10 0 /mnt/pmem0/renen
#./a.out 0 0 0 0 10 1 /mnt/pmem0/renen
#./a.out 0 0 0 0 10 5 /mnt/pmem0/renen
#./a.out 0 0 0 0 10 10 /mnt/pmem0/renen
#
## Page nvm
#./a.out 0 0 0 0 0 10 /mnt/pmem0/renen
#./a.out 1 0 0 0 0 10 /mnt/pmem0/renen
#./a.out 5 0 0 0 0 10 /mnt/pmem0/renen
#./a.out 10 0 0 0 0 10 /mnt/pmem0/renen
#./a.out 0 1 0 0 0 10 /mnt/pmem0/renen
#./a.out 0 5 0 0 0 10 /mnt/pmem0/renen
#./a.out 0 10 0 0 0 10 /mnt/pmem0/renen
#./a.out 0 0 1 0 0 10 /mnt/pmem0/renen
#./a.out 0 0 5 0 0 10 /mnt/pmem0/renen
#./a.out 0 0 10 0 0 10 /mnt/pmem0/renen
#./a.out 0 0 0 1 0 10 /mnt/pmem0/renen
#./a.out 0 0 0 5 0 10 /mnt/pmem0/renen
#./a.out 0 0 0 10 0 10 /mnt/pmem0/renen
#./a.out 0 0 0 0 1 10 /mnt/pmem0/renen
#./a.out 0 0 0 0 5 10 /mnt/pmem0/renen
#./a.out 0 0 0 0 10 10 /mnt/pmem0/renen |
<reponame>CPCoders/CPMath<filename>Examples/leaky_relu.js<gh_stars>0
var mathlib = require('../lib/cpmath.js');
console.log(mathlib.leaky_relu(56));
|
class SkyboxManager:
def __init__(self, blackside_pack_name):
self.blackside_pack_name = blackside_pack_name
def load_skybox_black_side(self):
# Complete the method to load the black side of the skybox
return loader.loadModel(self.blackside_pack_name + "cubemap.bam") |
TRAIN_FLAGS="
--iterations 300000 --anneal_lr True
--batch_size 16 --microbatch 16 --lr 1e-4
--save_interval 10000 --weight_decay 0.05
--data_dir /workspace/mnt/storage/yangdecheng/imagenet_1k/ImageNet-1k/train
--val_data_dir /workspace/mnt/storage/yangdecheng/imagenet_1k/ImageNet-1k/val
--log_root /workspace/guided-diffusion/log
--tot_class 200
--dataset_type imagenet-200 --imagenet200_class_list_file_path /workspace/guided-diffusion/datasets/list_tiny_imagenet.txt
--gpus 0,1
--only_projection False
--schedule_sampler range_uniform --t_range_start 0 --t_range_end 750
"
CLASSIFIER_FLAGS="
--classifier_pool attention --classifier_out_channels 200
--classifier_image_size 256
--classifier_num_channels 64 --classifier_num_res_blocks 2 --classifier_num_head_channels 64
--classifier_attention_resolutions 32,16,8
--classifier_use_fp16 True --classifier_resblock_updown True
--classifier_use_scale_shift_norm True
"
SAMPLE_FLAGS="
--sample_test_between_train False
--eva_timestep_respacing ddim25
--num_samples 512
--use_ddim True
--clip_denoised True
--classifier_scale 10.0
"
MODEL_FLAGS="
--learn_sigma True --class_cond False
--image_size 256
--num_channels 256 --num_res_blocks 2 --num_head_channels 64
--attention_resolutions 32,16,8
--use_fp16 True --resblock_updown True
--use_scale_shift_norm True
--num_classes 200
"
EVALUATE_FLAGS="
--ref_batch /workspace/guided-diffusion/pretrain_model/VIRTUAL_tinyimagenet256_labeled.npz
"
mpiexec -n 2 --allow-run-as-root python classifier_train.py \
$CLASSIFIER_FLAGS $TRAIN_FLAGS $SAMPLE_FLAGS $DIFFUSION_FLAGS $MODEL_FLAGS $EVALUATE_FLAGS \
--save_name imagenet200_classifier256x256_channel64_rangeUniform
# --model_path /workspace/guided-diffusion/pretrain_model/256x256_diffusion_uncond.pt \
# --classifier_path /workspace/guided-diffusion/pretrain_model/256x256_classifier.pt \
|
#!/bin/bash
function install_ovftool {
# Install provided ovftool
if [ ! -e "/usr/bin/ovftool" ]; then
pushd $ROOT_DIR/ovftool
ovftool_bundle=$(ls *)
chmod +x $ovftool_bundle
size_of_tool=$(ls -al $ovftool_bundle | awk '{print $5}')
if [ $size_of_tool -lt 10000000 ]; then
echo "ovftool downloaded is lesser than 10 MB!!"
echo "Check the file name/paths. Exiting from ova copy and deploy!!"
exit 1
fi
is_binary=$(file $ovftool_bundle | grep "executable" || true)
if [ "$is_binary" == "" ]; then
echo "ovftool downloaded was not a valid binary image!!"
echo "Check the file name/paths. Exiting from ova copy and deploy!!"
exit 1
fi
./${ovftool_bundle} --eulas-agreed
popd
echo "Done installing ovftool"
else
echo "ovftool already installed!!"
fi
echo ""
}
function check_ovas {
# ova_file_name_int
ova_file="$ROOT_DIR/nsx-mgr-ova/$ova_file_name_int"
is_tar=$(file $ova_file | grep "tar archive" || true)
if [ "$is_tar" == "" ]; then
echo "File $ova_file downloaded was not a valid OVA image!!"
echo "Check the file name/paths. Exiting from ova copy and deploy!!"
exit 1
fi
}
function copy_ovas_to_OVA_ISO_PATH {
mkdir -p $OVA_ISO_PATH
check_ovas
mv $ROOT_DIR/nsx-mgr-ova/$ova_file_name_int $OVA_ISO_PATH
echo "Done moving ova images into $OVA_ISO_PATH"
echo ""
}
function create_customize_ova_params {
cat > customize_ova_vars.yml <<-EOF
ovftool_path: '/usr/bin'
ova_file_path: "$OVA_ISO_PATH"
EOF
if [ "$NSX_T_KEEP_RESERVATION" == "false" ]; then
echo "nsx_t_keep_reservation: $NSX_T_KEEP_RESERVATION" >> customize_ova_vars.yml
fi
#echo "$NSX_T_SIZING_SPEC" >> customize_ova_vars.yml
}
|
import React, { useEffect, useState } from 'react';
import {
BrowserRouter as Router,
Route,
Link
} from 'react-router-dom';
import { HttpClient } from '../../shared/http-client';
interface SubscriptionDetails {
subscriptionId: string,
period: string,
expirationDate: Date,
status: string
}
export function SubscriptionDetails() {
const [subscriptionDetails, setSubscriptionDetails] = useState<SubscriptionDetails | null>(null);
useEffect(() => {
loadSubscription();
}, []);
function loadSubscription() {
HttpClient.get<SubscriptionDetails>('api/payments/payers/authenticated/subscription')
.then(response => setSubscriptionDetails(response));
}
return (
<div className="container">
Subscription details
{subscriptionDetails !== null &&
<div>
<div>ID: {subscriptionDetails.subscriptionId} </div>
<div>Period: {subscriptionDetails.period} </div>
<div>Status: {subscriptionDetails.status} </div>
<div>Expire: {subscriptionDetails.expirationDate} </div>
</div>
}
{subscriptionDetails === null &&
<div>
<div>You don't have a subscription</div>
<div>
<Link to="/subscription/buy">Buy subscription</Link>
</div>
</div>
}
</div>
)
} |
#!/usr/bin/env bash
set -xe
service ssh start
./sbin/start-all.sh
echo "Sleeping..."
sleep infinity
|
#!/bin/bash
ver=2.14.0
wget https://www.openvswitch.org/releases/openvswitch-$ver.tar.gz
tar -xzf openvswitch-$ver.tar.gz
sudo apt install -y build-essential libtool autoconf
cd openvswitch-$ver
sudo ./configure
sudo make
sudo make install
cd ..
sudo rm -rf openvswitch-$ver.tar.gz openvswitch-$ver
|
package de.frvabe.bpm.camunda.tbt.variableScopeDemo;
import org.camunda.bpm.engine.delegate.DelegateExecution;
import org.camunda.bpm.engine.delegate.JavaDelegate;
/**
* This task will set variables into different variable scopes (by using
* {@link DelegateExecution#setVariable(String, Object)} and
* {@link DelegateExecution#setVariableLocal(String, Object)}.
*
*/
public class SetVariablesTask implements JavaDelegate {
@Override
public void execute(DelegateExecution execution) throws Exception {
System.out.println();
System.out.println("Entering '" + execution.getCurrentActivityName()
+ "' ... (variableScopeKey=" + execution.getVariableScopeKey() + ")");
System.out.println(" executing 'execution.setVariable(\"myGlobal\", \"4711\") ...' ");
execution.setVariable("myGlobal", "4711");
System.out.println(" executing 'execution.setVariableLocal(\"myLocal\", \"4712\") ...'");
execution.setVariableLocal("myLocal", "4712");
System.out.println();
}
}
|
deploy_to_production() {
local APP_DIR=$1
local APP_DOCKER_IMAGE=$2
local DIGITAL_OCEAN_USER=$3
local PRODUCTION_HOST=$4
local RUN_APP=$5
# Change directory to the application directory
cd $APP_DIR || { echo "Error: Unable to change directory to $APP_DIR"; return 1; }
# Pull the latest Docker image
docker pull $APP_DOCKER_IMAGE || { echo "Error: Failed to pull the Docker image $APP_DOCKER_IMAGE"; return 1; }
# Stop existing containers using Docker Compose
docker-compose -f $APP_DIR/docker-compose.yml stop || { echo "Error: Failed to stop existing containers"; return 1; }
# Start updated containers using Docker Compose
docker-compose -f $APP_DIR/docker-compose.yml up -d || { echo "Error: Failed to start updated containers"; return 1; }
# Use SSH to connect to the production server and run the application
ssh -i ~/.ssh/deploy_rsa -oStrictHostKeyChecking=no $DIGITAL_OCEAN_USER@$PRODUCTION_HOST $RUN_APP
if [ $? != 0 ]; then
echo "Error: Failed to run the application on the production server"
return 1
fi
} |
import { assert, Inject, Injectable } from "../../mod.ts";
import { ASYNC_KEY } from "./async.constant.ts";
@Injectable()
export class AsyncService {
constructor(@Inject(ASYNC_KEY) private readonly connected: boolean) {
assert(this.connected === true, "injected CONNECTION_ASYNC maybe true");
}
info() {
return "info from AsyncService and the connected is: " + this.connected;
}
}
|
export modules_path='g:/.temp/CATSdesigner/modules'
export admin_path=$modules_path'/admin'
export tests_path=$modules_path'/tests'
export subjects_path=$modules_path'/subjects'
export cp_path=$modules_path'/course-projects'
export dp_path=$modules_path'/diplom-projects'
export confirmation_path=$modules_path'/confirmation'
export editor_path=$modules_path'/editor'
export complex_path=$modules_path'/complex'
export schedule_path=$modules_path'/schedule'
export statistics_path=$modules_path'/statistics'
cd /d $admin_path
npx rimraf ./node_modules
npx rimraf ./package-lock.json
npm i
npm run build:qa
cd $tests_path
npx rimraf ./node_modules
npx rimraf ./package-lock.json
npm i
npm run build:qa
cd $subjects_path
npx rimraf ./node_modules
npx rimraf ./package-lock.json
npm i
npm run build:qa
cd $cp_path
npx rimraf ./node_modules
npx rimraf ./package-lock.json
npm i
npm run build:qa
cd $dp_path
npx rimraf ./node_modules
npx rimraf ./package-lock.json
npm i
npm run build:qa
cd $confirmation_path
npx rimraf ./node_modules
npx rimraf ./package-lock.json
npm i
npm run build:qa
cd $complex_path
npx rimraf ./node_modules
npx rimraf ./package-lock.json
npm i
npm run build:qa
cd $editor_path
npx rimraf ./node_modules
npx rimraf ./package-lock.json
npm i
npm run build:qa
cd $schedule_path
npx rimraf ./node_modules
npx rimraf ./package-lock.json
npm i
npm run build:qa
cd $statistics_path
npx rimraf ./node_modules
npx rimraf ./package-lock.json
npm i
npm run build:qa |
#!/bin/bash -x
set -eo pipefail
# $PUBLIC_IP $PRIVATE_IP $PUBLIC_HOSTNAME $BOULDER_URL are dynamically set at execution
# with curl, instance metadata available from EC2 metadata service:
#public_host=$(curl -s http://169.254.169.254/2014-11-05/meta-data/public-hostname)
#public_ip=$(curl -s http://169.254.169.254/2014-11-05/meta-data/public-ipv4)
#private_ip=$(curl -s http://169.254.169.254/2014-11-05/meta-data/local-ipv4)
cd letsencrypt
LE_AUTO_DIR="/usr/local/bin"
LE_AUTO_PATH="$LE_AUTO_DIR/letsencrypt-auto"
sudo cp letsencrypt-auto-source/letsencrypt-auto "$LE_AUTO_PATH"
sudo chown root "$LE_AUTO_PATH"
sudo chmod 0755 "$LE_AUTO_PATH"
export PATH="$LE_AUTO_DIR:$PATH"
letsencrypt-auto --os-packages-only --debug --version
# Create a venv-like layout at the old virtual environment path to test that a
# symlink is properly created when letsencrypt-auto runs.
HOME=${HOME:-~root}
XDG_DATA_HOME=${XDG_DATA_HOME:-~/.local/share}
OLD_VENV_BIN="$XDG_DATA_HOME/letsencrypt/bin"
mkdir -p "$OLD_VENV_BIN"
touch "$OLD_VENV_BIN/letsencrypt"
letsencrypt-auto certonly --no-self-upgrade -v --standalone --debug \
--text --agree-dev-preview --agree-tos \
--renew-by-default --redirect \
--register-unsafely-without-email \
--domain $PUBLIC_HOSTNAME --server $BOULDER_URL
if [ "$(tools/readlink.py ${XDG_DATA_HOME:-~/.local/share}/letsencrypt)" != "/opt/eff.org/certbot/venv" ]; then
echo symlink from old venv path not properly created!
exit 1
fi
if ! letsencrypt-auto --help --no-self-upgrade | grep -F "letsencrypt-auto [SUBCOMMAND]"; then
echo "letsencrypt-auto not included in help output!"
exit 1
fi
OUTPUT=$(letsencrypt-auto --install-only --no-self-upgrade --quiet 2>&1)
if [ -n "$OUTPUT" ]; then
echo letsencrypt-auto produced unexpected output!
exit 1
fi
|
class CustomTableViewCell: UITableViewCell {
// Other properties and methods
override func setSelected(selected: Bool, animated: Bool) {
super.setSelected(selected, animated: animated)
if selected {
// Update UI for selected state
accessoryType = .checkmark
// Additional UI updates if needed
} else {
// Update UI for deselected state
accessoryType = .none
// Additional UI updates if needed
}
}
} |
class ApikeysProjectsLocationsKeysPatchRequest:
"""
A class representing an API request for updating API keys.
"""
def __init__(self, name, updateMask):
"""
Initializes the ApikeysProjectsLocationsKeysPatchRequest object with the given parameters.
Args:
name (str): The resource name of the key.
updateMask (str): The field mask specifying which fields to be updated as part of the request.
"""
self.name = name
self.updateMask = updateMask
# Example usage
request = ApikeysProjectsLocationsKeysPatchRequest(
name="projects/123456867718/locations/global/keys/b7ff1f9f-8275-410a-94dd-3855ee9b5dd2",
updateMask="display_name,restrictions"
) |
# This script runs a full end-to-end functional test of the dispatcher and the Optimizer transport with the Rotate Strategy, using two netcat instances as the application server and application client.
# An alternative way to run this test is to run each command in its own terminal. Each netcat instance can be used to type content which should appear in the other.
FILENAME=testStunUDPOptimizerRotateOutput.txt
# Update and build code
go get -u github.com/OperatorFoundation/shapeshifter-dispatcher
# remove text from the output file
rm $FILENAME
# Run a demo application server with netcat and write to the output file
nc -l -u 3333 >$FILENAME &
# Run the transport server
./shapeshifter-dispatcher -udp -server -state state -orport 127.0.0.1:3333 -transports shadow -bindaddr shadow-127.0.0.1:2222 -optionsFile shadowServer.json -logLevel DEBUG -enableLogging &
./shapeshifter-dispatcher -udp -server -state state -orport 127.0.0.1:3333 -transports obfs2 -bindaddr obfs2-127.0.0.1:2223 -logLevel DEBUG -enableLogging &
./shapeshifter-dispatcher -udp -server -state state -orport 127.0.0.1:3333 -transports Replicant -bindaddr Replicant-127.0.0.1:2224 -optionsFile ReplicantServerConfigV2.json -logLevel DEBUG -enableLogging &
sleep 5
# Run the transport client
./shapeshifter-dispatcher -udp -client -state state -target 127.0.0.1:2222 -transports Optimizer -proxylistenaddr 127.0.0.1:1443 -optionsFile OptimizerRotate.json -logLevel DEBUG -enableLogging &
sleep 1
# Run a demo application client with netcat
go test -run StunUDP
sleep 1
OS=$(uname)
if [ "$OS" = "Darwin" ]
then
FILESIZE=$(stat -f%z "$FILENAME")
else
FILESIZE=$(stat -c%s "$FILENAME")
fi
if [ "$FILESIZE" = "0" ]
then
echo "Test Failed"
killall shapeshifter-dispatcher
killall nc
exit 1
fi
echo "Testing complete. Killing processes."
killall shapeshifter-dispatcher
killall nc
echo "Done."
|
<reponame>nickolyamba/android-chem-app
package app.android.chemicals;
import android.content.Context;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.os.AsyncTask;
import android.os.Bundle;
import android.support.v4.app.Fragment;
import android.util.Base64;
import android.util.Log;
import android.util.Patterns;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.EditText;
import android.widget.TextView;
import android.widget.Toast;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.net.HttpURLConnection;
import java.net.URL;
public class EditSupplierFragment extends Fragment implements View.OnClickListener {
private static final String LOM = "LOM";
private Button add_button;
private EditText name;
private EditText website;
private String name_string;
private String website_string;
private String key_string;
private String supplier_data;
private String USER_ID = "";
private String TOKEN="";
TextView textToSet;
public EditSupplierFragment() {
}
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
// get passed Token and Chem Data from Activity
Bundle bundle = this.getArguments();
String strings[]= bundle.getStringArray("DATA_ARRAY");
supplier_data = strings[0];
TOKEN = strings[1];
USER_ID = strings[2];
Log.e(LOM, "chem_data from SuppFragment in EditSuppFrag: " + supplier_data + "\n" + TOKEN + "\n" + USER_ID);
}
// Checks if there is a connectivity
public boolean isConnected(){
boolean isConnected = false;
Context context = getActivity();
ConnectivityManager cm =
(ConnectivityManager)context.getSystemService(Context.CONNECTIVITY_SERVICE);
NetworkInfo activeNetwork = cm.getActiveNetworkInfo();
isConnected = activeNetwork != null &&
activeNetwork.isConnectedOrConnecting();
return isConnected;
}
public static boolean hasText(EditText editText) {
final String REQUIRED_MSG = "Text Required";
String text = editText.getText().toString().trim();
editText.setError(null);
// length 0 means there is no text
if (text.length() == 0) {
editText.setError(REQUIRED_MSG);
return false;
}
return true;
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
View view = inflater.inflate(R.layout.edit_supplier, container, false);
// create button
add_button = (Button) view.findViewById(R.id.submit_supp_btn);
add_button.setOnClickListener(this);
//http://stackoverflow.com/questions/22350683/
String[] tokens = supplier_data.split("\n");
Log.d(LOM, "\n\nSupplier_data :" + supplier_data);
name_string = tokens[0];
website_string = tokens[1];
key_string = tokens[2];
// set EditText values
name = (EditText) view.findViewById(R.id.supp_name);
name.setText(name_string, TextView.BufferType.EDITABLE);
website = (EditText) view.findViewById(R.id.supp_website);
website.setText(website_string, TextView.BufferType.EDITABLE);
// assign text to return to id, then set it in OnPostExecute()
textToSet = (TextView) view.findViewById(R.id.return_text);
return view;
}
public String createParams(String name, String website)
{
String params = "name=" + name + "&" + "website=" + website;
return params;
}
@Override
public void onClick(View v) {
if(isConnected()){
if (hasText(name)&& hasText(website)){
String NAME = name.getText().toString();
String WEBSITE = website.getText().toString();
String URL = "http://cs496-final-proj.appspot.com/supplier/"+key_string;
Log.d(LOM, "Submit clicked :" + NAME + " " + WEBSITE + key_string);
String params ="";
if(Patterns.WEB_URL.matcher(WEBSITE).matches()){
params = createParams(NAME, WEBSITE);
ConnectoinTask supplierTask = new ConnectoinTask();
supplierTask.execute(URL, params,"PUT");
/** Option to automatically return on previous screen
FragmentManager manager = getActivity().getSupportFragmentManager();
FragmentTransaction trans = manager.beginTransaction();
trans.remove(this);
trans.commit();
manager.popBackStack();
*/
}//if
else {
Toast toast = Toast.makeText(this.getActivity(),
"URL is NOT valid! Try again!", Toast.LENGTH_LONG);
View view = toast.getView();
view.setBackgroundResource(R.drawable.red_toast);
//TextView text = (TextView) view.findViewById(android.R.id.message);
/*here you can do anything with text*/
toast.show();
//Toast.makeText(this.getActivity(),
// "URL is NOT valid! Try again!", Toast.LENGTH_LONG).show();
}
}//if
}
else
Toast.makeText(this.getActivity(),
"No internet connection!\n" +
"Turn on connection and continue", Toast.LENGTH_LONG).show();
}//onClick()
/**
* ConnectoinTask extends AsyncTask()
*
* Creates network connection to internet using parallel thread
* separate from main UI thread. doInBackground() function creates thread
* where does all the job.
*
* onPostExecute() runs in main UI thread, receives data from doInBackground()
* through parameter
*
* http://developer.android.com/reference/android/os/AsyncTask.html
*/
public class ConnectoinTask extends AsyncTask<Object, Void, String> {
// get the name of the name of class
private final String LOG_TAG = ConnectoinTask.class.getSimpleName();
/**
* getSupplierDataFromJson()
* Parses JSON to get name, website and key
* parameters: JSON string
* return: String of suppliers
*/
private String getSupplierDataFromJson(String suppliersJsonStr)
throws JSONException {
// These are the names of the JSON objects that need to be extracted.
final String SUPPLIERS = "suppliers";
final String NAME = "name";
final String WEBSITE = "website";
final String KEY = "key";
// Raw JSON String to JSON object
JSONObject suppliersJson = new JSONObject(suppliersJsonStr);
// String array to save each supplier
String resultStr = "";
// Strings to save data
String name;
String website;
String key;
// Get the JSON object representing the supplier
name = suppliersJson.getString(NAME);
website = suppliersJson.getString(WEBSITE);
key = suppliersJson.getString(KEY);
resultStr = name;// + "\n" + website + "\n" + key;
Log.v(LOG_TAG, "Supplier entry: " + resultStr);
return resultStr;
}
@Override
protected String doInBackground(Object... params) {
// If there's not enough params - return.
if (params.length < 3) {
return null;
}
String url_string = (String) params[0];
String name_value = (String) params[1];
String method = (String) params[2];
// create HttpURLConnection object for connection.
HttpURLConnection urlConnection = null;
// create Buffer object to save the data stream.
BufferedReader reader = null;
// Will contain the raw JSON response as a string.
String suppliersJsonStr = null;
try
{
URL url = new URL(url_string);
Log.v(LOG_TAG, "Built URI " + url_string);
String credentials = USER_ID + ":" + TOKEN;
Log.v(LOG_TAG, "!!!Built credentials!!!" + credentials);
String base64EncodedCredentials = Base64.encodeToString(credentials.getBytes(), Base64.NO_WRAP); //or .DEFAULT
// Create the request to cloud and open the connection
urlConnection = (HttpURLConnection) url.openConnection();
urlConnection.setRequestMethod(method);
urlConnection.setDoInput(true);
urlConnection.setDoOutput(true);
urlConnection.setRequestProperty("Authorization", "basic " + base64EncodedCredentials);
urlConnection.connect();
// http://stackoverflow.com/questions/9767952/how-to-add-parameters-to-httpurlconnection-using-post
// http://developer.android.com/reference/java/net/HttpURLConnection.html
OutputStream outStream = urlConnection.getOutputStream();
BufferedWriter writer = new BufferedWriter(
new OutputStreamWriter(outStream, "UTF-8"));
writer.write(name_value);
writer.flush();
writer.close();
outStream.close();
// Read the input stream into a String
InputStream inputStream = urlConnection.getInputStream();
StringBuffer buffer = new StringBuffer();
if (inputStream == null) {
// Nothing to do.
return null;
}
reader = new BufferedReader(new InputStreamReader(inputStream));
// get new line
String line;
while ((line = reader.readLine()) != null) {
buffer.append(line + "\n");
}
// Stream was empty. No point in parsing.
if (buffer.length() == 0) {
return null;
}
// save as a String
suppliersJsonStr = buffer.toString();
Log.v(LOG_TAG, "Supplier string: " + suppliersJsonStr);
}//try
catch (IOException e) {
Log.e(LOG_TAG, "Error ", e);
return null;
}//catch
finally {
if (urlConnection != null)
urlConnection.disconnect();
if (reader != null) {
try {
reader.close();
} catch (final IOException e) {
Log.e(LOG_TAG, "Error closing stream", e);
}//catch
}//if
}//finally
// parse JSON and return string
try {
return getSupplierDataFromJson(suppliersJsonStr);
} catch (JSONException e) {
Log.e(LOG_TAG, e.getMessage(), e);
e.printStackTrace();
}//catch
return null;
}//doInBackground
@Override
protected void onPostExecute(String result) {
if (result != null) {
result = "Edited Supplier: "+result;
name.setText("");
website.setText("");
//textToSet.setText(result);
Toast toast = Toast.makeText(getActivity(), result, Toast.LENGTH_LONG);
View view = toast.getView();
view.setBackgroundResource(R.drawable.red_toast);
toast.show();
}
}
}
}
|
<filename>Documentation/_transpose_test_impl_8hpp.js
var _transpose_test_impl_8hpp =
[
[ "SimpleTransposeTest", "_transpose_test_impl_8hpp.xhtml#a6eaaa77532584d5c04fbaec94e630ded", null ],
[ "SimpleTransposeTestImpl", "_transpose_test_impl_8hpp.xhtml#a21dbaaf0ccf8eea33ab53f32dbb210c5", null ],
[ "TransposeValueSet1Test", "_transpose_test_impl_8hpp.xhtml#a8f4e38a297e8b835fc0ff29463eaf654", null ],
[ "TransposeValueSet2Test", "_transpose_test_impl_8hpp.xhtml#a8545635c84fc6de487df01e0dbb8a1d6", null ],
[ "TransposeValueSet3Test", "_transpose_test_impl_8hpp.xhtml#a0545d125bac84b2e1a6b952bc4f3e20d", null ]
]; |
cat << "EOF"
.
. !\ _
l\/ ( /(_
_ \`--" _/ .
\~") (_,/)
_)/. ,\,/
_____,-"~ \ / "~"-._____
,-~" "~-. . " . ,-~" "~-.
,^ ^. `. .' ,^ ^.
/ \ ^ / \
Y___________________Y Y___________________Y
| |^~"|^ _ ^|"~^| | | |"~"|^ _ ^|"~"| |
| ! l (_) ! ! l | ! l (_) ! ! |
l \ `\.___,/' / ! l \ `\.___,/' / !
\ ^. ,^ /! !\ ^. ,^ /
^. ~-------~ ,^\`v-v'/^. ~-------~ ,^
_)~-._______,-~ }---{ ~-._______,-~(_
.--"~ ,-^7' / \ `Y^-, ~"--.
/ (_,/ ,/' `\. \._) ___ \
\_____.,--"~~~"--..,__ ___,..--<"~ ~"-.,___/
/ ( __,--~ _.._""~~~~"" ,-" "-.`\ /~.-"
`._"--~_,.--"~ \ / \ `---' /
"~"" \ / "-.__,/
`L ]'
l !
j___L
(_____)
| |
| |
EOF
echo -e "\e[1m\e[32m _ ______ \e[33m_______\e[32m _
| (_____ \ \e[33m(_______)\e[32m _ _ | |
| |_____) ) _______ _| |_ _| |_ _____ ____| | _
| | ____/ | ___ (_ _|_ _|____ |/ ___) |_/ )
| | | | | | | | |_ | |_/ ___ ( (___| _ (
|_|_| |_| |_| \__) \__)_____|\____)_| \_)\e[0m\e[1m
| | \e[31m____________________| |____________________\e[0m\e[31m
|
package com.littlejenny.gulimall.coupon.service;
import com.baomidou.mybatisplus.extension.service.IService;
import com.littlejenny.common.utils.PageUtils;
import com.littlejenny.gulimall.coupon.entity.SkuFullReductionEntity;
import java.util.Map;
/**
* 商品满减信息
*
* @author littlejenny
* @email <EMAIL>
* @date 2021-07-16 17:26:22
*/
public interface SkuFullReductionService extends IService<SkuFullReductionEntity> {
PageUtils queryPage(Map<String, Object> params);
}
|
<filename>Source/Bellz/Enemy.h
// All rights reserved, <NAME> 2016 http://www.mamoniem.com/
#pragma once
#include "GameFramework/Character.h"
#include "Enemy.generated.h"
UCLASS()
class BELLZ_API AEnemy : public ACharacter
{
GENERATED_BODY()
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = Triggers, meta = (AllowPrivateAccess = "true"))
class USphereComponent* bodySphereTrigger;
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = Triggers, meta = (AllowPrivateAccess = "true"))
class USphereComponent* leftHandTrigger;
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = Triggers, meta = (AllowPrivateAccess = "true"))
class USphereComponent* rightHandTrigger;
public:
//The constructor
AEnemy();
//Override the PostInitializeComponents()
virtual void PostInitializeComponents() override;
// Called when the game starts or when spawned
virtual void BeginPlay() override;
// Called every frame
virtual void Tick( float DeltaSeconds ) override;
// Called to bind functionality to input
virtual void SetupPlayerInputComponent(class UInputComponent* InputComponent) override;
//The health of the enemy
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Enemy Behavior")
float TotalHealth;
//The range for the enemy attack
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Enemy Behavior")
float AttackRange;
//The power of the enemy attacks
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Enemy Behavior")
float AttackDamage;
//Check if the enemy is dead or alive
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Enemy Behavior")
bool IsDead;
//Check if the enemy is dead or alive
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Enemy Behavior")
bool IsAttacking;
//The sensing component used to see or hear the player
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = "Enemy AI")
class UPawnSensingComponent* PawnSensor;
//The used BT with that enemy
UPROPERTY(EditAnywhere, Category = "Enemy AI")
class UBehaviorTree* EnemyBehaviorTree;
//Perform attack
UFUNCTION(BlueprintCallable, Category = "Enemy AI")
void OnPerformAttack();
//Perform attack
UFUNCTION(BlueprintCallable, Category = "Enemy AI")
void OnPreAttack();
//Perform attack done
UFUNCTION(BlueprintCallable, Category = "Enemy AI")
void OnPostAttack();
//Hear the player's noise using the sensing component
UFUNCTION()
void OnHearNoise(APawn *OtherActor, const FVector &Location, float Volume);
//See the player's by sight using the sensing component
UFUNCTION()
void OnSeePawn(APawn *OtherPawn);
UFUNCTION()
void OnHandTriggerOverlap(class AActor* OtherActor, class UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromSweep, const FHitResult & SweepResult);
//float AccumulatedFiretime;
//bool IsFirstPerson(); //virtual bool IsFirstPerson() const override;
virtual void FaceRotation(FRotator NewRotation, float DeltaTime = 0.f) override;
bool Attacking;
/** Returns sphere trigger subobject **/
FORCEINLINE class USphereComponent* GetBodySphereTrigger() const { return bodySphereTrigger; }
};
|
<reponame>minuk8932/Algorithm_BaekJoon<filename>src/implementation/Boj14724.java
package implementation;
import java.io.InputStreamReader;
import java.util.StringTokenizer;
import java.io.BufferedReader;
/**
*
* @author exponential-e
* 백준 14724번: 관리자는 누구?
*
* @see https://www.acmicpc.net/problem/14724/
*
*/
public class Boj14724 {
private static final String[] GROUP = {"PROBRAIN", "GROW", "ARGOS", "ADMIN", "ANT", "MOTION", "SPG", "COMON", "ALMIGHTY"};
public static void main(String[] args) throws Exception{
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
int N = Integer.parseInt(br.readLine());
int max = 0, idx = 0;
for(int i = 0; i < 9; i++) {
StringTokenizer st = new StringTokenizer(br.readLine());
for(int j = 0; j < N; j++) {
int cnt = Integer.parseInt(st.nextToken());
if(cnt > max) { // 관리자 동아리 체크
max = cnt;
idx = i;
}
}
}
System.out.println(GROUP[idx]);
}
}
|
<reponame>glensand/shared_whiteboard
/* Copyright (C) 2020 - 2021 <NAME> - All Rights Reserved
* You may use, distribute and modify this code under the
* terms of the MIT license.
*
* You should have received a copy of the MIT license with
* this file. If not, please write to: <EMAIL>, or visit : https://github.com/glensand/visual-studio-compatibility
*/
#pragma once
#include <boost/asio.hpp>
#include <memory>
// TODO:: move class declaration (aka ISession) to the specified interface
// inherit implementation from interface that interface
namespace Net
{
/**
* \brief async operations callback alias
*/
using OnActionCallback = std::function<void(size_t)>;
/**
* \brief
*/
class BoostTcpSession final
{
/**
* \brief Recommended asio socket alias
*/
using Socket = boost::asio::ip::tcp::socket;
/**
* \brief Recommended asio async operations handler alias
*/
using Service = boost::asio::io_service;
public:
BoostTcpSession(Service& service);
~BoostTcpSession() = default;
/**
* \brief delegate to the asio::write_some
* \param data - data to be send
* \param count - number of bytes to be send
* \return
*/
size_t WriteSome(const void* data, size_t count);
/**
* \brief delegate to the asio::write
* \param data
* \param count
* \return
*/
size_t Write(const void* data, size_t count);
/**
* \brief delegate to the asio::async_write
* \param data
* \param count
* \param errorCallback
*/
void WriteAsync(const void* data, size_t count, const OnActionCallback& errorCallback);
/**
* \brief delegate to the asio::async_write
* \param stream
* \return
*/
size_t ReadSome(std::ostream& stream);
/**
* \brief delegate to the asio::read
* \param stream
* \param count
* \return
*/
size_t Read(std::ostream& stream, size_t count);
/** delegate to the asio::async_read_some, calls callback func if success
* \brief
* \param callback
*/
void AwaitData(const OnActionCallback& callback);
/** writes recently received data in the passed stream
* \brief
* \param count
*/
void Receive(std::ostream&, size_t count) const;
/**
* \brief sets initialization flag, it means the entire socket is connected
* successfully
* \param init
*/
void SetInitialized(bool init);
/** gets initialization flag state
* \brief
* \return
*/
bool IsInitialized() const;
/** gets row asio socket
* \brief
* \return
*/
Socket& GetSocket();
/** gets row asio socket
* \brief
* \return
*/
const Socket& GetSocket() const;
private:
Socket m_socket;
bool m_isInitialized;
std::vector<char> m_buffer;
};
/**
* \brief recommended boost session alias
*/
using Session = std::unique_ptr<BoostTcpSession>;
}
|
package oidc.management.controller;
import java.io.IOException;
import java.util.List;
import java.util.Optional;
import javax.servlet.http.HttpServletRequest;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.querydsl.core.types.Predicate;
import oidc.management.service.ServiceAccountService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.Pageable;
import org.springframework.data.querydsl.binding.QuerydslPredicate;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.security.access.annotation.Secured;
import org.springframework.security.access.prepost.PreAuthorize;
import org.springframework.security.crypto.password.PasswordEncoder;
import org.springframework.validation.BeanPropertyBindingResult;
import org.springframework.validation.BindException;
import org.springframework.validation.BindingResult;
import org.springframework.validation.beanvalidation.SpringValidatorAdapter;
import org.springframework.web.bind.annotation.*;
import oidc.management.model.ServiceAccount;
/**
* Service Account Controller
*
* @author <NAME>
* @since 03-02-2022
* @see ServiceAccount
* @see ServiceAccountService
* @see PasswordEncoder
* @see ObjectMapper
*/
@RestController
@RequestMapping("/service-accounts")
public class ServiceAccountController {
@Autowired
private ServiceAccountService serviceAccountService;
@Autowired
private PasswordEncoder passwordEncoder;
@Autowired
private ObjectMapper mapper;
@Autowired
private SpringValidatorAdapter validator;
/**
* Get all service accounts.
*
* @return List of service accounts.
*/
@Secured({"ROLE_OIDC_ADMIN"})
@PreAuthorize("hasAuthority('SCOPE_read_service_account')")
@GetMapping
public List<ServiceAccount> index() {
// Return list of service accounts
return serviceAccountService.findAll();
}
/**
* Get all service accounts.
*
* @return Page of service accounts.
*/
@Secured({"ROLE_OIDC_ADMIN"})
@PreAuthorize("hasAuthority('SCOPE_read_service_account')")
@GetMapping("page")
public Page<ServiceAccount> page(Pageable pageable, @RequestParam(value = "search", required = false) String search) {
return this.serviceAccountService.findAll(pageable, search);
}
/**
* Get a service account by id.
*
* @param id Service account id.
* @return Service account.
*/
@Secured({"ROLE_OIDC_ADMIN"})
@PreAuthorize("hasAuthority('SCOPE_read_service_account')")
@GetMapping("{id}")
public ResponseEntity<ServiceAccount> get(@PathVariable("id") String id) {
// Get the optional holder
Optional<ServiceAccount> optObject = this.serviceAccountService.findById(id);
// Verify if the holder contains a value
if (optObject.isPresent()) {
// Get the service account
ServiceAccount object = optObject.get();
// Return the service account
return new ResponseEntity<>(object, HttpStatus.OK);
}
// Return no content
return new ResponseEntity<>(HttpStatus.NO_CONTENT);
}
/**
* Creates a new service account.
*
* @param object The service account to create.
* @return The created service account.
* @throws BindException If the service account is not valid.
*/
@Secured({"ROLE_OIDC_ADMIN"})
@PreAuthorize("hasAuthority('SCOPE_create_service_account')")
@PostMapping
public ResponseEntity<ServiceAccount> save(@RequestBody ServiceAccount object) throws BindException {
// Remove the id
object.setId(null);
// If the password is not empty
if (object.getClientSecret() != null) {
// Encode the password
object.setClientSecret(this.passwordEncoder.encode(object.getClientSecret()));
}
// Create validator
BindingResult result = new BeanPropertyBindingResult(object, "serviceAccount");
// Validate authority
this.validator.validate(object, result);
// If there are errors
if (result.hasErrors()) {
// Throw exception
throw new BindException(result);
}
// Save the service account
this.serviceAccountService.save(object);
// Return the service account
return new ResponseEntity<>(object, HttpStatus.CREATED);
}
/**
* Updates a service account.
*
* @param id The service account id.
* @param request The request.
* @return The updated service account.
* @throws IOException If the request body cannot be parsed.
* @throws BindException If the service account is not valid.
*/
@Secured({"ROLE_OIDC_ADMIN"})
@PreAuthorize("hasAuthority('SCOPE_update_service_account')")
@PatchMapping("{id}")
public ResponseEntity<ServiceAccount> update(@PathVariable("id") String id, HttpServletRequest request) throws IOException, BindException {
// Get the optional holder
Optional<ServiceAccount> optional = this.serviceAccountService.findById(id);
// Verify if the holder contains a value
if ( optional.isPresent() ) {
// Get the service account
ServiceAccount object = optional.get();
// Get the previous password
String password = object.getClientSecret();
// Update the service account
object = mapper.readerForUpdating(object).readValue(request.getReader());
// If the previous password does not match the new password
if ( !password.equals(object.getClientSecret()) && object.getClientSecret() != null ) {
// Encode the password
object.setClientSecret(this.passwordEncoder.encode(object.getClientSecret()));
}
// Create validator
BindingResult result = new BeanPropertyBindingResult(object, "serviceAccount");
// Validate authority
this.validator.validate(object, result);
// If there are errors
if (result.hasErrors()) {
// Throw exception
throw new BindException(result);
}
// Save the service account
this.serviceAccountService.save(object);
// Return the service account
return new ResponseEntity<>(object, HttpStatus.OK);
}
// Return bad request
return new ResponseEntity<>(HttpStatus.BAD_REQUEST);
}
/**
* Deletes a service account.
*
* @param id The service account id.
* @return The deleted service account.
*/
@Secured({"ROLE_OIDC_ADMIN"})
@PreAuthorize("hasAuthority('SCOPE_delete_service_account')")
@DeleteMapping("{id}")
public ResponseEntity<ServiceAccount> delete(@PathVariable("id") String id) {
// Delete the service account by it's id
this.serviceAccountService.deleteById(id);
// Return ok
return new ResponseEntity<>(HttpStatus.OK);
}
}
|
#!/bin/bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script applies all of the base YAML files needed for the sample jobs in the proper order.
# All resources are created in the default namespace.
pushd samples/defaultresources
kubectl apply -R -f batchpriority/
kubectl apply -R -f batchcostmodel/
kubectl apply -R -f batchbudget/
kubectl apply -R -f batchjobconstraint/
kubectl apply -R -f batchqueue/
kubectl apply -R -f batchusercontext/
popd
|
import React, { Component } from 'react';
import { Paper, TextField, Grid, Button, Typography } from '@material-ui/core';
import './signup.css'
class RegForm extends Component {
state = { data:null,mail:false,names:false,surname:false,pass:false,confirm:false }
styles={
paper:{
paddingTop:40,
paddingBottom:40,
borderRadius: 10,
backgroundColor:"rgba(255,255,255,0.6)"
},
outterGrid:{
paddingTop: 40,
// backgroundImage: `url("${Bg}")`
}
}
sgnclick=async(e)=>{
e.preventDefault();
var fname=document.getElementById('outlined-fname').value
var lname=document.getElementById('outlined-lname').value
var mail=document.getElementById('outlined-email').value
var pass=document.getElementById('outlined-pass').value
var cpass=document.getElementById('outlined-cpass').value
console.log(fname)
console.log(lname)
console.log(mail)
console.log(cpass)
console.log(pass)
//firstname
console.log(cpass!=pass)
if(fname=='')
this.setState({names:false})
else
this.setState({names:true})
//lname
if(lname=='')
this.setState({surname:false})
else
this.setState({surname:true})
//mail
if(mail==''||!mail.includes('@')||!mail.includes('.'))
this.setState({mail:false})
else
this.setState({mail:true})
//pass
if(pass=='')
this.setState({pass:false})
else
this.setState({pass:true})
//repass
if(cpass!=pass)
this.setState({confirm:false})
else
this.setState({confirm:true})
if(this.state.mail==true&&this.state.names==true&&this.state.surname==true&&this.state.pass==true&&this.state.confirm==true)
{
console.log("inside")
const res=await fetch('http://localhost:3001/signup',{ ///dont change to axios
headers : {
'Content-Type': 'application/json',
'Accept': 'application/json'
},
method:"POST",
body:JSON.stringify({"fname":fname,"lname":lname,"mail":mail,"pass":pass})
})
const data=await res.json()
if(data!=false){
this.setState({data})
console.log(this.state.data)
this.props.history.push('/')
}
else
alert('Already exists')
// this.props.history.push('/Error')
}
else
{
// if(this.state.names==false)
// document.querySelector("#wrongsub").textContent="Invalid FirstName"
// else if(this.state.surname==false)
// document.querySelector("#wrongsub").textContent="Invalid LastName"
if(this.state.mail==false)
document.querySelector("#wrongsub").textContent="Invalid E-Mail"
else if(this.state.pass==false)
document.querySelector("#wrongsub").textContent="Invalid Password"
else if(this.state.confirm==false)
document.querySelector("#wrongsub").textContent="Passwords do not match"
else
document.querySelector("#wrongsub").textContent="Invalid Attributes"
}
}
mailchange=(e)=>{
if(e.target.value.includes('@')&&e.target.value.includes('.'))
{
this.setState({
name:true
})
}
else
{
this.setState({
name:false
})
}
}
render() {
return (
<React.Fragment >
<div id="regbg">
<Grid container direction="row" justify="center" style={this.styles.outterGrid} >
<Grid item xs={6} style={{marginBottom:"2vw"}}>
<Paper style={this.styles.paper} >
<form action="">
<Grid container direction="row" justify="center">
<Typography variant="overline" style={{fontSize:"2.5vw", color: "#607d8b"}} >
Register
</Typography>
<Grid item xs={12}>
</Grid>
<Grid item xs={6}>
<TextField
fullWidth
autoFocus
required
style={{marginTop:"3vw"}}
id="outlined-fname"
label="First Name"
// value={this.state.name}
// onChange={this.namechange}
margin="normal"
variant="outlined"
/>
</Grid>
<Grid item xs={12}>
</Grid>
<Grid item xs={6}>
<TextField
fullWidth
required
id="outlined-lname"
label="Last Name"
// onChange={this.surnamechange}
margin="normal"
variant="outlined"
/>
</Grid>
<Grid item xs={12}>
</Grid>
<Grid item xs={6}>
<TextField
fullWidth
required
id="outlined-email"
label="Email"
onChange={this.mailchange}
margin="normal"
variant="outlined"
/>
</Grid>
<Grid item xs={12}>
</Grid>
<Grid item xs={6}>
<TextField
fullWidth
required
id="outlined-pass"
label="Password"
type="password"
// onChange={this.passwordchange}
margin="normal"
variant="outlined"
/>
</Grid>
<Grid item xs={12}>
</Grid>
<Grid item xs={6}>
<TextField
fullWidth
required
id="outlined-cpass"
type="password"
label="Confirm Password"
// onChange={this.repasswordchange}
margin="normal"
variant="outlined"
/>
</Grid>
<Grid item xs={12}>
</Grid>
<Grid container justify="center">
<Grid item xs={12}>
<Typography id="wrongsub" variant="subtitle2" style={{fontSize:"1.5vw",marginLeft:"300px",marginTop:"20px",marginBottom:"-20px"}}></Typography>
</Grid>
</Grid>
<Grid item>
<Button onClick={this.sgnclick} variant="contained" color="primary" style={{marginTop:"3vw"}} >
Register
</Button>
</Grid>
</Grid>
</form>
</Paper>
</Grid>
</Grid>
</div>
</React.Fragment>
);
}
}
//
export default RegForm; |
def find_median(list_num):
# Sort the list
list_num.sort()
# Check if the list is odd or even
if len(list_num) % 2 == 0:
# If even, calculate the mean of the two middle numbers
median = (list_num[int(len(list_num) / 2)] + list_num[int(len(list_num) / 2) + 1]) / 2
else:
# If odd, return the middle number
median = list_num[int(len(list_num) / 2)]
return median |
<gh_stars>1-10
import os
# model settings
arch = 'resnet'
img_size = (224, 224)
model = dict(
type='TypeAwareRecommender',
backbone=dict(type='ResNet', setting='resnet18'),
global_pool=dict(
type='GlobalPooling',
inplanes=(7, 7),
pool_plane=(2, 2),
inter_channels=[512],
outchannels=256),
type_specific_net=dict(
type='TypeSpecificNet',
learned=False,
n_conditions=66,
rand_typespaces=False,
use_fc=True,
l2_embed=False,
dim_embed=256,
prein=False),
triplet_net=dict(
type='TripletNet',
text_feature_dim=6000,
embed_feature_dim=256,
loss_vse=dict(type='L1NormLoss', loss_weight=5e-3, average=False),
loss_triplet=dict(
type='MarginRankingLoss', margin=0.3, loss_weight=1),
loss_selective_margin=dict(
type='SelectiveMarginLoss', margin=0.3, loss_weight=5e-5),
learned_metric=True),
loss_embed=dict(type='L2NormLoss', loss_weight=5e-4),
loss_mask=dict(type='L1NormLoss', loss_weight=5e-4),
pretrained='checkpoint/resnet18.pth')
# dataset setting
dataset_type = 'PolyvoreOutfitDataset'
data_root = 'data/Polyvore'
img_norm = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
data = dict(
imgs_per_gpu=2,
workers_per_gpu=1,
drop_last=False,
train=dict(
type=dataset_type,
img_path=os.path.join(data_root, 'images'),
annotation_path=os.path.join(data_root, 'disjoint/train.json'),
meta_file_path=os.path.join(data_root, 'polyvore_item_metadata.json'),
img_size=img_size,
text_feat_path=os.path.join(data_root,
'disjoint/train_hglmm_pca6000.txt'),
text_feat_dim=6000,
compatibility_test_fn=os.path.join(data_root,
'disjoint/compatibility_train.txt'),
fitb_test_fn=os.path.join(data_root,
'disjoint/fill_in_blank_train.json'),
typespaces_fn=os.path.join(data_root, 'disjoint/typespaces.p'),
train=True),
test=dict(
type=dataset_type,
img_path=os.path.join(data_root, 'images'),
annotation_path=os.path.join(data_root, 'disjoint/test.json'),
meta_file_path=os.path.join(data_root, 'polyvore_item_metadata.json'),
img_size=img_size,
text_feat_path=None,
text_feat_dim=6000,
compatibility_test_fn=os.path.join(data_root,
'disjoint/compatibility_test.txt'),
fitb_test_fn=os.path.join(data_root,
'disjoint/fill_in_blank_test.json'),
typespaces_fn=os.path.join(data_root, 'disjoint/typespaces.p'),
train=False),
val=dict(
type=dataset_type,
img_path=os.path.join(data_root, 'images'),
annotation_path=os.path.join(data_root, 'disjoint/valid.json'),
meta_file_path=os.path.join(data_root, 'polyvore_item_metadata.json'),
img_size=img_size,
text_feat_path=None,
text_feat_dim=6000,
compatibility_test_fn=os.path.join(data_root,
'disjoint/compatibility_valid.txt'),
fitb_test_fn=os.path.join(data_root,
'disjoint/fill_in_blank_valid.json'),
typespaces_fn=os.path.join(data_root, 'disjoint/typespaces.p'),
train=False))
# optimizer
optimizer = dict(type='Adam', lr=5e-5)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.1,
step=[8, 10])
checkpoint_config = dict(interval=1)
log_config = dict(
interval=10, hooks=[
dict(type='TextLoggerHook'),
])
start_epoch = 0
total_epochs = 10
gpus = dict(train=[0], test=[0])
work_dir = 'checkpoint/FashionRecommend/TypeAware'
print_interval = 20 # interval to print information
save_interval = 5
init_weights_from = 'checkpoint/resnet18.pth'
resume_from = None
load_from = None
workflow = [('train', 10)]
dist_params = dict(backend='nccl')
log_level = 'INFO'
|
<filename>services/publish.webmaker.org/lib/remix.js
"use strict";
const Hoek = require(`hoek`);
const Url = require(`url`);
const REMIX_SCRIPT = process.env.REMIX_SCRIPT;
Hoek.assert(REMIX_SCRIPT, `Must define location of the remix script`);
const remixUrl = Url.parse(REMIX_SCRIPT);
const slashes = remixUrl.slashes ? `//` : ``;
function injectMetadata(html, metadata) {
let metaTags = ``;
Object.keys(metadata)
.forEach(function(key) {
metaTags += `<meta name="data-remix-${key}" content="${metadata[key]}">\n`;
});
return html.replace(/<head>/, `$&` + metaTags);
}
function injectRemixScript(html) {
return html.replace(
/<\/head/,
`<script src="${REMIX_SCRIPT}" type="text/javascript"></script>\n$&`
);
}
// Inject the Remix script into the given HTML string
// and any metadata (passed as an object) that needs to be added
function inject(srcHtml, metadata) {
return injectMetadata(
injectRemixScript(srcHtml),
metadata
);
}
module.exports = {
inject: inject,
resourceHost: `${remixUrl.protocol}${slashes}${remixUrl.host}`
};
|
<filename>test.js
const defaults = require('./index');
module.exports = {
extends: defaults.extends,
rules: Object.assign({}, defaults.rules, {
'arrow-body-style': 'off',
'newline-per-chained-call': 'off',
'max-nested-callbacks': ['error', 5],
'no-undefined': 'off',
'no-magic-numbers': 'off',
'no-unused-expressions': 'off',
'max-lines': ['error', 600],
})
}
|
package com.yin.springboot.mybatis.domain;
import java.io.Serializable;
import lombok.Data;
@Data
public class PmsBrand implements Serializable {
private Long id;
private String name;
/**
* 首字母
*/
private String firstLetter;
private Integer sort;
/**
* 是否为品牌制造商:0->不是;1->是
*/
private Integer factoryStatus;
private Integer showStatus;
/**
* 产品数量
*/
private Integer productCount;
/**
* 产品评论数量
*/
private Integer productCommentCount;
/**
* 品牌logo
*/
private String logo;
/**
* 专区大图
*/
private String bigPic;
/**
* 品牌故事
*/
private String brandStory;
private static final long serialVersionUID = 1L;
} |
"""
Algorithm to optimize a given dataset
"""
def optimize_dataset(dataset):
optimized_dataset = []
processed_indexes = set() # set to store indices of records already processed
while len(processed_indexes) != len(dataset):
min_index, min_val = 0, float('inf')
# Record with the minimum value is found
for i in range(len(dataset)):
if i not in processed_indexes and dataset[i] < min_val:
min_index, min_val = i, dataset[i]
# Update the optimized dataset and mark
# the record as processed
optimized_dataset.append(dataset[min_index])
processed_indexes.add(min_index)
return optimized_dataset |
#!/bin/sh
set -ex
cabal --version
echo "$(ghc --version) [$(ghc --print-project-git-commit-id 2> /dev/null || echo '?')]"
stack --version
case $BUILD in
hlint)
echo "Downloading hlint"
curl -sSL https://raw.github.com/ndmitchell/hlint/master/misc/run.sh > hlint.sh
chmod +x hlint.sh
;;
stack)
stack --no-terminal --skip-ghc-check setup
stack --no-terminal --skip-ghc-check test --only-snapshot
;;
cabal)
if [ -f $HOME/.cabal/packages/hackage.haskell.org/00-index.tar.gz ]
then
zcat $HOME/.cabal/packages/hackage.haskell.org/00-index.tar.gz > $HOME/.cabal/packages/hackage.haskell.org/00-index.tar
fi
cabal update -v || cabal update -v
sed -i 's/^jobs:/-- jobs:/' ${HOME}/.cabal/config
cabal install --only-dependencies --enable-tests --enable-benchmarks --dry -v > installplan.txt
sed -i -e '1,/^Resolving /d' installplan.txt; cat installplan.txt
# check whether current requested install-plan matches cached package-db snapshot
if diff -u installplan.txt $HOME/.cabsnap/installplan.txt;
then
echo "cabal build-cache HIT";
rm -rfv .ghc;
cp -a $HOME/.cabsnap/ghc $HOME/.ghc;
cp -a $HOME/.cabsnap/lib $HOME/.cabsnap/share $HOME/.cabsnap/bin $HOME/.cabal/;
else
echo "cabal build-cache MISS";
rm -rf $HOME/.cabsnap;
mkdir -p $HOME/.ghc $HOME/.cabal/lib $HOME/.cabal/share $HOME/.cabal/bin;
cabal install --only-dependencies --enable-tests --enable-benchmarks;
fi
# snapshot package-db on cache miss
if [ ! -d $HOME/.cabsnap ];
then
echo "snapshotting package-db to build-cache";
mkdir $HOME/.cabsnap;
cp -a $HOME/.ghc $HOME/.cabsnap/ghc;
cp -a $HOME/.cabal/lib $HOME/.cabal/share $HOME/.cabal/bin installplan.txt $HOME/.cabsnap/;
fi
;;
cabal2)
cabal v2-update -v
sed -i 's/^jobs:/-- jobs:/' ${HOME}/.cabal/config
cabal v2-build --only-dependencies --enable-tests --enable-benchmarks --dry
;;
esac
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.socialSoundcloud = void 0;
var socialSoundcloud = {
"viewBox": "0 0 512 512",
"children": [{
"name": "path",
"attribs": {
"d": "M256,0C114.609,0,0,114.609,0,256s114.609,256,256,256s256-114.609,256-256S397.391,0,256,0z M256,472\r\n\tc-119.297,0-216-96.703-216-216S136.703,40,256,40s216,96.703,216,216S375.297,472,256,472z"
},
"children": []
}, {
"name": "g",
"attribs": {},
"children": [{
"name": "g",
"attribs": {},
"children": [{
"name": "g",
"attribs": {},
"children": [{
"name": "path",
"attribs": {
"fill-rule": "evenodd",
"clip-rule": "evenodd",
"d": "M128.591,281.609c-2.875,13.5,5.344,32,12.781,38.391v-64\r\n\t\t\tC133.935,262.391,130.778,271.5,128.591,281.609z M347.341,250.188c-4.469,0-8.672,0.875-12.609,2.266\r\n\t\t\tc-1.969-33.672-31-60.453-66.812-60.453c-22.891,0-50.609,8.938-62.703,25.594V320h142.125c20.25,0,36.656-15.625,36.656-34.891\r\n\t\t\tC383.997,265.812,367.591,250.188,347.341,250.188z M179.685,320h12.766v-89.594h-12.766V320z M154.138,320h12.766v-76.812\r\n\t\t\th-12.766V320z"
},
"children": [{
"name": "path",
"attribs": {
"fill-rule": "evenodd",
"clip-rule": "evenodd",
"d": "M128.591,281.609c-2.875,13.5,5.344,32,12.781,38.391v-64\r\n\t\t\tC133.935,262.391,130.778,271.5,128.591,281.609z M347.341,250.188c-4.469,0-8.672,0.875-12.609,2.266\r\n\t\t\tc-1.969-33.672-31-60.453-66.812-60.453c-22.891,0-50.609,8.938-62.703,25.594V320h142.125c20.25,0,36.656-15.625,36.656-34.891\r\n\t\t\tC383.997,265.812,367.591,250.188,347.341,250.188z M179.685,320h12.766v-89.594h-12.766V320z M154.138,320h12.766v-76.812\r\n\t\t\th-12.766V320z"
},
"children": []
}]
}]
}]
}]
}]
};
exports.socialSoundcloud = socialSoundcloud; |
class Circle {
private double radius;
public Circle(double radius) {
this.radius = radius;
}
public double getArea() {
return Math.PI * radius * radius;
}
} |
#!/bin/sh
red=$(tput setaf 1)
green=$(tput setaf 2)
yellow=$(tput setaf 3)
end=$(tput sgr0)
VERSION="$(git describe --tags)"
build_dir="../../../out/target/product/d10f/obj/BOOTLOADER_EMMC_OBJ"
print_usage() {
echo "Usage: $0 [-h|-?|--help] [-b|--boot] [-f|--flash] [-c|--clean] [-z|--zip]"
echo "--help: show this text"
echo "--boot: create bootable image and boot it to device using fastboot boot without flashing"
echo "--flash: flash built binary to device using fastboot flash aboot and reboot device to fastboot"
echo "--clean: run make clean before building"
echo "--zip: build flashable Recovery zip after building"
echo "--install: build flashable Recovery zip and install it via TWRP"
}
# Transform long options to short ones
for arg in "$@"; do
shift
case "$arg" in
"--help") set -- "$@" "-h" ;;
"--boot") set -- "$@" "-b" ;;
"--clean") set -- "$@" "-c" ;;
"--flash") set -- "$@" "-f" ;;
"--zip") set -- "$@" "-z" ;;
"--install") set -- "$@" "-i" ;;
*) set -- "$@" "$arg"
esac
done
OPTIND=1
while getopts "hfcbzi" opt
do
case "$opt" in
"b") boot=true ;;
"c") clean=true ;;
"f") flash=true ;;
"z") zip=true ;;
"i") zip=true; install=true ;;
"h") print_usage; exit 0 ;;
*) print_usage >&2; exit 1 ;;
esac
done
shift $(expr $OPTIND - 1) # remove options from positional parameters
if [ "$clean" = "true" ]; then
rm -rf "$build_dir"
fi
mkdir -p "$build_dir"
touch dev/fbcon/fbcon.c # Force rebuilding it to make sure that version string is updated
make DEBUG=2 PROJECT=msm8226 BOOTLOADER_OUT="$build_dir" EMMC_BOOT=1 VERSION="$VERSION" -j3
if [ $? -gt 0 ]; then
echo ""
echo "${red}Build FAILED!${end}"
else
echo ""
echo "${green}Successfully built${end}"
if [ "$zip" = "true" ]; then
path=$(readlink -f $build_dir/../../)
zipname="$path/IBL_$VERSION.zip"
rm -rf /tmp/zip_template/ "$zipname"
cp -r zip_template /tmp/
cp "$build_dir/../../emmc_appsboot.mbn" "/tmp/zip_template/firmware-update/IBL_$VERSION.mbn"
cat > /tmp/zip_template/META-INF/com/google/android/updater-script <<EOF
# ---- radio update tasks ----
ui_print("Installing IBL $VERSION...");
ifelse(msm.boot_update("main"), (
package_extract_file("firmware-update/tz.mbn", "/dev/block/platform/msm_sdcc.1/by-name/tz");
package_extract_file("firmware-update/sbl1.mbn", "/dev/block/platform/msm_sdcc.1/by-name/sbl1");
package_extract_file("firmware-update/rpm.mbn", "/dev/block/platform/msm_sdcc.1/by-name/rpm");
package_extract_file("firmware-update/IBL_$VERSION.mbn", "/dev/block/platform/msm_sdcc.1/by-name/aboot");
), "");
msm.boot_update("backup");
msm.boot_update("finalize");
ui_print("Done...");
EOF
cd "/tmp/zip_template/"
zip "$zipname-unsigned" * -r
cd - > /dev/null
java -Xmx2048m -jar zip_sign/signapk.jar -w zip_sign/testkey.x509.pem zip_sign/testkey.pk8 "$zipname-unsigned" "$zipname"
echo "${yellow}$zipname ${green}built${end}"
rm -rf "/tmp/zip_template/" "$zipname-unsigned"
fi
if [ "$install" = "true" ]; then
if ! adb devices|grep recovery; then
adb reboot recovery
fi
until adb devices|grep recovery; do sleep 1; done
until adb shell mount|grep /external_sd; do sleep 1; done
adb push "$zipname" "/external_sd/"
adb shell twrp install "/external_sd/$(basename "$zipname")"
adb reboot bootloader
exit 0
fi
if [ "$boot" = "true" ]; then
rm -f "$build_dir/../../IBL.img" # Throw error on boot attempt if mkbootimg fails
mkbootimg --kernel "$build_dir/../../emmc_appsboot.raw" --dt "$build_dir/../../dt.img" --ramdisk /dev/null -o "$build_dir/../../IBL.img"
fastboot boot "$build_dir/../../IBL.img"
exit 0
fi
if [ "$flash" = "true" ]; then
fastboot flash aboot "$build_dir"/../../emmc_appsboot.mbn
fastboot reboot-bootloader
exit 0
fi
fi
|
#!/usr/bin/env bash
echo "Deploying $1..."
git pull origin master
composer install --no-dev
|
package io.opensphere.filterbuilder2.manager;
import java.util.Arrays;
import java.util.Collection;
import javax.swing.JOptionPane;
import org.apache.log4j.Logger;
import io.opensphere.core.util.swing.ButtonPanel;
import io.opensphere.core.util.swing.OptionDialog;
import io.opensphere.filterbuilder.controller.FilterBuilderToolbox;
import io.opensphere.filterbuilder.controller.FilterSet;
import io.opensphere.mantle.data.DataGroupInfo;
import io.opensphere.mantle.data.DataTypeInfo;
import io.opensphere.mantle.data.filter.DataLayerFilter;
/**
* The filter manager dialog.
*/
public final class FilterManagerDialog extends OptionDialog
{
/** The serialVersionUID. */
private static final long serialVersionUID = 1L;
/** The logger. */
private static final Logger LOGGER = Logger.getLogger(FilterManagerDialog.class);
/**
* Gets the dialog title for the given data type.
*
* @param dataType the data type
* @return the dialog title
*/
private static String dialogTitle(DataTypeInfo dataType)
{
if (dataType != null)
{
return dataType.getDisplayName() + " Features Filters";
}
return "Manage Filters";
}
/**
* Get the label to be used in a menu that launches a dialog for the specified type of filter. In case of null argument, it
* returns the generic "Manage Filters" label.
*
* @param dataType the datatype for which the label will be generated (optional, will default to a blank value if supplied
* with null).
* @return the stuff
*/
public static String menuLabel(DataTypeInfo dataType)
{
if (dataType != null)
{
return dataType.getDisplayName() + " Features";
}
return "Manage Filters";
}
/**
* Shows the filter manager for the given data group.
*
* @param toolbox the toolbox
* @param dataGroupInfo the data group info
*/
public static void showDataGroup(FilterBuilderToolbox toolbox, DataGroupInfo dataGroupInfo)
{
showDataType(toolbox, getDataType(dataGroupInfo));
}
/**
* Shows the filter manager for the given data type.
*
* @param tools the toolbox with which the dialog will be configured.
* @param dataType the data type
*/
public static void showDataType(FilterBuilderToolbox tools, DataTypeInfo dataType)
{
FilterManagerDialog dialog = new FilterManagerDialog(tools, dataType);
dialog.build();
dialog.showDialog();
}
// For now, assume that this is in no-save mode.
// Also note that the dialog is modal in this case.
/**
* Shows a dialog from which the user can select filters.
*
* @param tools the toolbox with which the dialog is configured.
* @param fs the filter set with which to populate the dialog.
* @return true if the user clicked okay, false otherwise.
*/
public static boolean showFilterSet(FilterBuilderToolbox tools, FilterSet fs)
{
FilterManagerDialog dialog = new FilterManagerDialog(tools, fs);
dialog.build();
dialog.showDialog();
return dialog.getSelection() == JOptionPane.OK_OPTION;
}
/**
* Gets the data type to filter from the data group.
*
* @param dataGroupInfo the data group
* @return the data type to filter
*/
private static DataTypeInfo getDataType(DataGroupInfo dataGroupInfo)
{
DataTypeInfo dataType = null;
if (dataGroupInfo != null)
{
Collection<DataTypeInfo> filterableDataTypes = DataLayerFilter.getFilterableDataTypes(dataGroupInfo);
if (filterableDataTypes.size() == 1)
{
dataType = filterableDataTypes.iterator().next();
}
else
{
LOGGER.error(filterableDataTypes.size() + " data types found for " + dataGroupInfo.getLongDisplayName());
}
}
return dataType;
}
/**
* Construct with a FilterSet. For now, we assume that this constructor is only used when the edits apply only to the given
* filters and are not persisted.
*
* @param tools the tools from which the UI registry is extracted.
* @param fs the filter set with which to populate the dialog.
*/
private FilterManagerDialog(FilterBuilderToolbox tools, FilterSet fs)
{
super(tools.getMainToolBox().getUIRegistry().getMainFrameProvider().get());
setTitle("Select Filter (Ops Clock)");
setModal(true);
setButtonLabels(Arrays.asList(ButtonPanel.OK));
FilterManagerPanel fmp = new FilterManagerPanel(tools, fs);
setComponent(fmp);
}
/**
* Constructor.
*
* @param tools the toolbox
* @param dataType the data type
*/
private FilterManagerDialog(FilterBuilderToolbox tools, DataTypeInfo dataType)
{
super(tools.getMainToolBox().getUIRegistry().getMainFrameProvider().get());
setTitle(dialogTitle(dataType));
setModal(false);
setButtonLabels(Arrays.asList(ButtonPanel.OK));
FilterManagerPanel filterManagerPanel = new FilterManagerPanel(tools, dataType);
setComponent(filterManagerPanel);
getContentButtonPanel().add(filterManagerPanel.getExportButton());
getContentButtonPanel().add(filterManagerPanel.getImportButton());
}
}
|
require 'active_support/concern'
require 'active_support/core_ext/module/delegation'
module SpecHelpers
module LoggerHelpers
extend ActiveSupport::Concern
included do
attr_reader :default_logger, :use_logger
around :each do |example|
@default_logger = Circuit.logger
if clean_logger?
Circuit.logger = nil
elsif !default_logger?
@logger_sio = StringIO.new
Circuit.logger = Logger.new(@logger_sio)
end
example.run
if clean_logger?
clean_logger!(false)
elsif !default_logger?
@logger_sio.close
@logger_sio = nil
end
Circuit.logger = @default_logger
end
end
def use_logger!(key)
@use_logger = (key ? key.to_sym : nil)
end
def use_logger?(key)
@use_logger == key.to_sym
end
def clean_logger!(val=true)
use_logger!(val ? :clean : false)
end
def clean_logger?() use_logger?(:clean); end
def default_logger!(val=true)
use_logger!(val ? :default : false)
end
def default_logger?() use_logger?(:default); end
def logger_output
raise "Clean logger used" if clean_logger?
raise "Default logger used" if default_logger?
@logger_sio.string
end
end
end
|
#!/usr/bin/env bash
# ==============================================================================
# Home Assistant Community Add-ons: Bashio
# Bashio is an bash function library for use with Home Assistant add-ons.
#
# It contains a set of commonly used operations and can be used
# to be included in add-on scripts to reduce code duplication across add-ons.
# ==============================================================================
# ------------------------------------------------------------------------------
# Checks if a give value is true.
#
# Arguments:
# $1 value
# ------------------------------------------------------------------------------
function bashio::var.true() {
local value=${1:-null}
bashio::log.trace "${FUNCNAME[0]}:" "$@"
if [[ "${value}" = "true" ]]; then
return "${__BASHIO_EXIT_OK}"
fi
return "${__BASHIO_EXIT_NOK}"
}
# ------------------------------------------------------------------------------
# Checks if a give value is false.
#
# Arguments:
# $1 value
# ------------------------------------------------------------------------------
function bashio::var.false() {
local value=${1:-null}
bashio::log.trace "${FUNCNAME[0]}:" "$@"
if [[ "${value}" = "false" ]]; then
return "${__BASHIO_EXIT_OK}"
fi
return "${__BASHIO_EXIT_NOK}"
}
# ------------------------------------------------------------------------------
# Checks if a global variable is defined.
#
# Arguments:
# $1 Name of the variable
# ------------------------------------------------------------------------------
bashio::var.defined() {
local variable=${1}
bashio::log.trace "${FUNCNAME[0]}:" "$@"
[[ "${!variable-X}" = "${!variable-Y}" ]]
}
# ------------------------------------------------------------------------------
# Checks if a value has actual value.
#
# Arguments:
# $1 Value
# ------------------------------------------------------------------------------
function bashio::var.has_value() {
local value=${1}
bashio::log.trace "${FUNCNAME[0]}:" "$@"
if [[ -n "${value}" ]]; then
return "${__BASHIO_EXIT_OK}"
fi
return "${__BASHIO_EXIT_NOK}"
}
# ------------------------------------------------------------------------------
# Checks if a value is empty.
#
# Arguments:
# $1 Value
# ------------------------------------------------------------------------------
function bashio::var.is_empty() {
local value=${1}
bashio::log.trace "${FUNCNAME[0]}:" "$@"
if [[ -z "${value}" ]]; then
return "${__BASHIO_EXIT_OK}"
fi
return "${__BASHIO_EXIT_NOK}"
}
# ------------------------------------------------------------------------------
# Checks if a value equals.
#
# Arguments:
# $1 Value
# $2 Equals value
# ------------------------------------------------------------------------------
function bashio::var.equals() {
local value=${1}
local equals=${2}
bashio::log.trace "${FUNCNAME[0]}:" "$@"
if [[ "${value}" = "${equals}" ]]; then
return "${__BASHIO_EXIT_OK}"
fi
return "${__BASHIO_EXIT_NOK}"
}
# ------------------------------------------------------------------------------
# Creates JSON based on function arguments.
#
# Arguments:
# $@ Bash array of key/value pairs, prefix integer or boolean values with ^
# ------------------------------------------------------------------------------
function bashio::var.json() {
local data=("$@");
local number_of_items=${#data[@]}
local json=''
local separator
local counter
local item
if [[ ${number_of_items} -eq 0 ]]; then
bashio::log.error "Length of input array needs to be at least 2"
return "${__BASHIO_EXIT_NOK}"
fi
if [[ $((number_of_items%2)) -eq 1 ]]; then
bashio::log.error "Length of input array needs to be even (key/value pairs)"
return "${__BASHIO_EXIT_NOK}"
fi
counter=0;
for i in "${data[@]}"; do
separator=","
if [ $((++counter%2)) -eq 0 ]; then
separator=":";
fi
item="\"$i\""
if [[ "${i:0:1}" == "^" ]]; then
item="${i:1}"
fi
json="$json$separator$item";
done
echo "{${json:1}}";
return "${__BASHIO_EXIT_OK}"
}
|
SELECT AVG(age) AS avg_age
FROM (
SELECT id, name, age, MIN(age) OVER (PARTITION BY name) AS min_age
FROM People) AS t
WHERE age = min_age; |
var gulp = require('gulp');
var exec = require('child_process').exec;
gulp.task('start', function () {
exec('live-server --open=styleguide');
exec('styleguide start');
});
|
#!/bin/bash
#SBATCH -J Act_sin_1
#SBATCH --mail-user=eger@ukp.informatik.tu-darmstadt.de
#SBATCH --mail-type=FAIL
#SBATCH -e /work/scratch/se55gyhe/log/output.err.%j
#SBATCH -o /work/scratch/se55gyhe/log/output.out.%j
#SBATCH -n 1 # Number of cores
#SBATCH --mem-per-cpu=2000
#SBATCH -t 23:59:00 # Hours, minutes and seconds, or '#SBATCH -t 10' -only mins
#module load intel python/3.5
python3 /home/se55gyhe/Act_func/progs/meta.py sin 1 sgd 4 0.41186226239657975 478 0.013919639129579407 glorot_uniform PE-infersent
|
import numpy as np
def get_mm_line_fit(hits, sig_keys):
## extract quantities
zs = hits[:, sig_keys.index('z')]
xs = hits[:, sig_keys.index('projX_at_middle_x')]
unc_xs = 2*np.abs( hits[:, sig_keys.index('projX_at_middle_x')] -
hits[:, sig_keys.index('projX_at_rightend_x')] )
unc_xs[unc_xs < 0.45] = 0.45
## only save the ones with valid hits
zs = zs[ hits[:, sig_keys.index('is_muon')] > -1 ]
xs = xs[ hits[:, sig_keys.index('is_muon')] > -1 ]
unc_xs = unc_xs[ hits[:, sig_keys.index('is_muon')] > -1 ]
## calculate coefficients
_c1 = ( zs**2/unc_xs**2 ).sum() #beta
_c2 = ( zs/unc_xs**2 ).sum() #gamma
_c3 = ( zs*xs/unc_xs**2 ).sum() #omega
_c4 = ( 1./unc_xs**2 ).sum() #lambda
_c5 = ( xs/unc_xs**2 ).sum() #rho
## calculate line parameters
A = ( _c5*_c2 - _c3*_c4 ) / ( _c2**2 - _c1*_c4 )
B = ( _c3 - A*_c1 ) / _c2
## calculate chi2 value
Chi2 = ( ( A*zs + B - xs )**2 / unc_xs**2 ).sum()
return [A, B, Chi2]
def get_fits(events, sig_keys, overwrite=False):
fits = np.zeros( (events.shape[0], 3) )
for iev,hits in enumerate(events):
lfit = get_mm_line_fit(hits, sig_keys)
fits[iev,:] = lfit
return fits
|
#!/usr/bin/env bash
uid=$(id -u)
gid=$(id -g)
printf "UID=${uid}\nGID=${gid}\nCOMPOSE_PROJECT_NAME=profile" > .env
|
// Import necessary packages
const express = require("express");
const http = require("http");
const socketIO = require("socket.io");
// Create the express app
const app = express();
// Create the server
const server = http.createServer(app);
// Set up the socket
const io = socketIO(server);
// Listener for connection event
io.on("connection", (socket) => {
console.log("New client connected");
// Send message
socket.emit("newMessage", {
username: "admin",
message: "Welcome to Chat App!"
});
// Listener for newMessage event
socket.on("createMessage", (message) => {
console.log("New message: ", message);
// Broadcasts message to all connected sockets
io.emit("newMessage", {
username: message.username,
message: message.message
});
});
// Listener for disconnect event
socket.on("disconnect", () => {
console.log("Client disconnected");
});
});
// Set the port
const PORT = process.env.PORT || 5001;
server.listen(PORT, () => console.log(`Server started on port ${PORT}`)); |
#!/bin/bash
#
# Copyright (C) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Runs the Kubernetes conformance suite against an OpenShift cluster
#
# Test prerequisites:
#
# * all nodes that users can run workloads under marked as schedulable
#
source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh"
# Check inputs
if [[ -z "${KUBECONFIG-}" ]]; then
os::log::fatal "KUBECONFIG must be set to a root account"
fi
test_report_dir="${ARTIFACT_DIR}"
mkdir -p "${test_report_dir}"
cat <<END > "${test_report_dir}/README.md"
This conformance report is generated by the OpenShift CI infrastructure. The canonical source location for this test script is located at https://github.com/openshift/origin/blob/master/test/extended/conformance-k8s.sh
This file was generated by:
Commit $( git rev-parse HEAD || "<commit>" )
Tag $( git describe || "<tag>" )
To recreate these results
1. Install an [OpenShift cluster](https://docs.openshift.com/container-platform/latest/install_config/install/advanced_install.html)
2. Retrieve a \`.kubeconfig\` file with administrator credentials on that cluster and set the environment variable KUBECONFIG
export KUBECONFIG=PATH_TO_KUBECONFIG
3. Clone the OpenShift source repository and change to that directory:
git clone https://github.com/openshift/origin.git
cd origin
4. Place the \`oc\` binary for that cluster in your PATH
5. Run the conformance test:
test/extended/conformance-k8s.sh
Nightly conformance tests are run against release branches and reported https://openshift-gce-devel.appspot.com/builds/origin-ci-test/logs/test_branch_origin_extended_conformance_k8s/
END
version="${KUBERNETES_VERSION:-release-1.8}"
kubernetes="${KUBERNETES_ROOT:-${OS_ROOT}/../../../k8s.io/kubernetes}"
if [[ -d "${kubernetes}" ]]; then
git fetch origin --tags
else
if [[ -n "${KUBERNETES_ROOT-}" ]]; then
os::log::fatal "Cannot find Kubernetes source directory, set KUBERNETES_ROOT"
fi
kubernetes="${OS_ROOT}/_output/components/kubernetes"
if [[ ! -d "${kubernetes}" ]]; then
mkdir -p "$( dirname "${kubernetes}" )"
os::log::info "Cloning Kubernetes source"
git clone "https://github.com/kubernetes/kubernetes.git" -b "${version}" "${kubernetes}" # --depth=1 unfortunately we need history info as well
fi
fi
os::log::info "Running Kubernetes conformance suite for ${version}"
# Execute OpenShift prerequisites
# Disable container security
oc adm policy add-scc-to-group privileged system:authenticated system:serviceaccounts
oc adm policy remove-scc-from-group restricted system:authenticated
oc adm policy remove-scc-from-group anyuid system:cluster-admins
# Mark the masters and infra nodes as unschedulable so tests ignore them
oc get nodes -o name -l 'role in (infra,master)' | xargs -L1 oc adm cordon
unschedulable="$( oc get nodes -o name -l 'role in (infra,master)' | wc -l )"
# TODO: undo these operations
# Execute Kubernetes prerequisites
pushd "${kubernetes}" > /dev/null
git checkout "${version}"
make WHAT=cmd/kubectl
make WHAT=test/e2e/e2e.test
export PATH="${kubernetes}/_output/local/bin/$( os::build::host_platform ):${PATH}"
kubectl version > "${test_report_dir}/version.txt"
echo "-----" >> "${test_report_dir}/version.txt"
oc version >> "${test_report_dir}/version.txt"
# Run the test
e2e.test '-ginkgo.focus=\[Conformance\]' \
-report-dir "${test_report_dir}" -ginkgo.noColor \
-allowed-not-ready-nodes ${unschedulable} \
2>&1 | tee "${test_report_dir}/e2e.log"
echo
echo "Run complete, results in ${test_report_dir}" |
def count_strings_by_sum_of_ascii(strings):
count = 0
for string in strings:
ascii_sum = 0
for char in string:
ascii_sum += ord(char)
if ascii_sum % 3 == 0:
count += 1
return count |
#!/usr/bin/env bash
curl -sSLO https://unpkg.com/webextension-polyfill@0.8.0/dist/browser-polyfill.js
|
#!/bin/bash
tee /etc/pam.d/mariadb << EOF
auth required pam_unix.so audit
auth required pam_unix.so audit
account required pam_unix.so audit
EOF
useradd testPam
chpasswd << EOF
testPam:myPwd
EOF
usermod -a -G shadow mysql
echo "pam configuration done" |
<reponame>kemitchell/httpcallback.js<filename>test.js
var HTTPCallback = require('./')
var concat = require('concat-stream')
var http = require('http')
var series = require('async-series')
var tape = require('tape')
var url = require('url')
tape(function (test) {
test.plan(9)
// The data to send from an event source server to an event listener
// server.
var CALLBACK_DATA = 'callback body'
var BAD_CALLBACK_PORT = 1
var BAD_CALLBACK = 'http://localhost:' + BAD_CALLBACK_PORT + '/x'
var RETRY_INTERVAL = 500
var listenerURL
// Create an event source server with an example HTTPCallback.
var example = new HTTPCallback(
// Configure automatic retry so the event source server will retry
// and deregister bad callbacks quickly.
{
retry: {
maxTimeout: RETRY_INTERVAL,
minTimeout: RETRY_INTERVAL,
random: false,
retries: 0
}
}
)
example
.once('registration', function (parsedURL) {
test.equal(
parsedURL.pathname, '/receive',
'emits registration event for good callback'
)
this
.once('registration', function (parsedURL) {
test.equal(
parsedURL.pathname, '/x',
'emits registration event for bad callback')
})
})
.on('failure', function (error) {
test.equal(
error.errno, 'ECONNREFUSED',
'emits failure event for bad callback'
)
test.equal(
error.port, BAD_CALLBACK_PORT,
'failure event is for the bad port'
)
})
.once('deregistration', function (href) {
test.equal(
href, BAD_CALLBACK,
'deregistration event for bad callback'
)
})
var source = http.createServer(
function (request, response) {
// The event source server proxies POST /register to the
// HTTPCallback request handler.
var callbackRequest = (
request.method === 'POST' &&
url.parse(request.url).pathname === '/register'
)
if (callbackRequest) {
example.handler(request, response)
// Otherwise it fails with an error.
} else {
throw new Error()
}
}
)
// Start the event source server on a random high port.
.listen(0, function () {
// Record the random port for future reference.
var sourcePort = this.address().port
// Create an event listener server.
var listener = http.createServer(
// The event listener server responds to POST /receive.
function (request, response) {
var callbackRequest = (
request.method === 'POST' &&
url.parse(request.url).pathname === '/receive'
)
if (callbackRequest) {
// Read the the body of the POST request.
request.pipe(concat(function (buffer) {
var asString = buffer.toString()
// Should equal the value sent by the even source server.
test.equal(
asString, CALLBACK_DATA,
'listener receives data via POST /receive'
)
response.end()
setTimeout(
function () {
test.deepEqual(
example.callbackListeners(),
[listenerURL],
'only the listener remains listening'
)
// Close our test servers.
source.close()
listener.close()
},
RETRY_INTERVAL
)
}))
// Otherwise it fails with an error.
} else {
throw new Error()
}
}
)
// Also start the event listener server on a random high port.
.listen(0, function () {
// Record that random port, so we can tell the event source
// server what port to call back on.
var listenerPort = this.address().port
listenerURL = 'http://localhost:' + listenerPort + '/receive'
var post = {
port: sourcePort,
path: '/register',
method: 'POST'
}
series(
[
function (done) {
http.request(post)
.once('response', function (response) {
test.equal(
response.statusCode, 202,
'POST /register to source' +
' responds 202 for good callback'
)
done()
})
.once('error', done)
// The body of the callback registration request to the
// event source server is the plain-text URL of the
// source listener server where the event source server
// should POST data.
.end(listenerURL)
},
function (done) {
http.request(post)
.once('response', function (response) {
test.equal(
response.statusCode, 202,
'POST /register to source' +
' responds 202 for bad callback'
)
done()
})
.once('error', done)
// Register a bogus callback as well, to test error and
// deregistration events.
.end(BAD_CALLBACK)
},
function () {
// Dispatch the callback data to all listeners
// registered with the event source server.
example.send(function (stream) {
stream.end(CALLBACK_DATA)
})
}
],
test.ifError.bind(test)
)
})
})
})
|
import {EMPTY, Observable, Subscription} from 'rxjs'
import {AbstractControl, FormGroup, ValidatorFn} from '@angular/forms'
import {debounceTime, map, startWith} from 'rxjs/operators'
import {FormDataStringType, FormInputData} from './form.input'
import {mandatoryOptionsValidator, optionalOptionsValidator} from '../../utils/form.validator'
import {foldUndefined, isEmpty, subscribe, voidF} from '../../utils/functions'
export class FormInputOption<Option> implements FormInputData<string> {
readonly type: FormDataStringType
readonly validator: ValidatorFn | ValidatorFn[]
readonly value: string
private readonly subs: Subscription[]
private options: Option[] = []
filteredOptions: Observable<Option[]>
control: Readonly<AbstractControl>
constructor(
readonly controlName: string,
private readonly errorKey: string,
private readonly required: boolean,
private readonly display: (value: Option) => string,
private readonly options$: Observable<Option[]>,
private readonly debounce: number = 200,
private readonly selected?: (options: Option[]) => Option | undefined
) {
this.value = ''
this.type = 'options'
this.validator = required ? mandatoryOptionsValidator() : optionalOptionsValidator()
this.subs = []
}
onInit = (group: FormGroup) => {
this.control = group.controls[this.controlName]
this.bindOptions0(this.options$, os => {
foldUndefined(this.selected, f => foldUndefined(f(os), v => this.control.setValue(v), voidF), voidF)
})
}
bindControl = (control: AbstractControl) => this.control = control
bindOptionsIfNeeded = () => {
if (isEmpty(this.options)) {
this.bindOptions(this.options$)
}
}
bindOptions = (options$: Observable<Option[]>) => {
this.bindOptions0(options$, voidF)
}
reset = () => {
this.bindOptions(EMPTY)
this.onDestroy()
}
private bindOptions0 = (options$: Observable<Option[]>, completion: (os: Option[]) => void) => {
this.subs.push(subscribe(options$, os => {
this.options = os
completion(os)
}))
this.filteredOptions = this.control.valueChanges
.pipe(
debounceTime(this.debounce),
startWith(''),
map(value => typeof value === 'string' ? value : this.display(value)),
map(value => value ? this.filter(value) : this.options.slice())
)
}
onDestroy = () => {
this.subs.forEach(s => s.unsubscribe())
}
private filter = (input: string): Option[] => {
const filterValue = input.toLowerCase()
return this.options.filter(t => this.display(t).toLowerCase().indexOf(filterValue) >= 0)
}
displayFn = (object?: Option): string | undefined => {
if (!object) {
return undefined
}
return this.display(object)
}
hasError = (): boolean => {
return !this.control.untouched && this.control.hasError(this.errorKey)
}
getErrorMessage = (): string => {
return this.control.getError(this.errorKey)
}
}
|
#!/bin/bash
if [[ `uname` == "Darwin" ]]; then
THIS_SCRIPT=`python -c 'import os,sys;print os.path.realpath(sys.argv[1])' $0`
MKTEMP="mktemp -t `basename $0`"
else
THIS_SCRIPT=`readlink -f $0`
MKTEMP="mktemp -t `basename $0`.XXXXXXXX"
fi
THIS_DIR="${THIS_SCRIPT%/*}"
cd $THIS_DIR
FORCE=true
. ../ingest/ingest-env.sh
export INGEST_BIN=$THIS_DIR/..
# now apply the appropriate system configuration
if [[ "$INGEST_HOST" == "localhost" || "$INGEST_HOST" == `hostname` || "$INGEST_HOST" == `hostname -s` ]]; then
$INGEST_BIN/ingest/listIngest.sh
else
ingestHost=`$MKTEMP`
trap 'rm -f "$ingestHost"; exit $?' INT TERM EXIT
echo $INGEST_HOST > $ingestHost
pssh -p 1 -i -h ${ingestHost} "$INGEST_BIN/ingest/listIngest.sh" < /dev/null | grep -v 'SUCCESS'
rm $ingestHost
trap - INT TERM EXIT
fi
|
#!/bin/bash
srcdir=`dirname $0`
. "${srcdir}/lib.sh"
parse_args "$0" "owner dataset_owner experiment dataset" "$@"
shift $n
set -e
set -x
srcdir=`realpath $srcdir`
mkdir workdir
cd workdir
pwd
aws s3 sync s3://almond-research/${owner}/workdir-${experiment}/ .
cp /opt/genie-toolkit/languages/multiwoz/ontology.json data/clean-ontology.json
export GENIE_TOKENIZER_ADDRESS=tokenizer.default.svc.cluster.local:8888
export TZ=America/Los_Angeles
make "experiment=${experiment}" "owner=${dataset_owner}" "tradedir=${srcdir}/.." "geniedir=/opt/genie-toolkit" "$@" data-generated
aws s3 sync data-generated/ "s3://almond-research/${dataset_owner}/dataset/${experiment}/${dataset}/"
|
<reponame>yqian4/optuna
import abc
import copy
from optuna import study
from optuna.trial import TrialState
from optuna import type_checking
if type_checking.TYPE_CHECKING:
from typing import Any # NOQA
from typing import Dict # NOQA
from typing import List # NOQA
from typing import Optional # NOQA
from optuna import distributions # NOQA
from optuna.trial import FrozenTrial # NOQA
DEFAULT_STUDY_NAME_PREFIX = "no-name-"
class BaseStorage(object, metaclass=abc.ABCMeta):
"""Base class for storages.
This class is not supposed to be directly accessed by library users.
Storage classes abstract a backend database and provide library internal interfaces to
read/write history of studies and trials.
**Thread safety**
Storage classes might be shared from multiple threads, and thus storage classes
must be thread-safe.
As one of the requirements of the thread-safety, storage classes must guarantee
that the returned values, such as `FrozenTrial`s will not be directly modified
by storage class.
However, storage class can assume that return values are never modified by users.
When users modify return values of storage classes, it might break the internal states
of storage classes, which will result in undefined behaviors.
**Ownership of RUNNING trials**
Trials in finished states are not allowed to be modified.
Trials in the WAITING state are not allowed to be modified except for the `state` field.
Storage classes can assume that each RUNNING trial is modified from only one process.
When users modify a RUNNING trial from multiple processes, it might lead to
an inconsistent internal state, which will result in undefined behaviors.
To use optuna with MPI or in other multi-process programs, users must make sure
that the optuna interface is accessed from only one of the processes.
Storage classes are not designed to provide inter-process communication functionalities.
**Consistency models**
Storage classes must support monotonic-reads consistency model, that is, if a
process reads a data `X`, any successive reads on data `X` does not return
older values.
They must support read-your-writes, that is, if a process writes to data `X`,
any successive reads on data `X` from the same process must read the written
value or one of more recent values.
**Stronger consistency requirements for special data**
TODO(ytsmiling) Add load method to storage class implementations.
Under multi-worker settings, storage classes are guaranteed to return the latest
values of any attributes of `Study`, but not guaranteed the same thing for
attributes of `Trial`.
However, if `load(study_id)` method is called, any successive reads on the `state`
attribute of `Trial` in the study are guaranteed to return the same or more recent
values than the value at the time the `load` method called.
Let `T` be a `Trial`.
Let `P` be a process that last updated the `state` attribute of `T`.
Then, any reads on any attributes of `T` are guaranteed to return the same or
more recent values than any writes by `P` on the attribute before `P` updated
the `state` attribute of `T`.
The same applies for `user_attrs', 'system_attrs', 'intermediate_values` attributes,
but future development may allow storage class users to explicitly skip the above
properties for these attributes.
**Data persistence**
Storage classes do not guarantee that write operations are logged into a persistent
storage even when write methods succeed.
Thus, when process failure occurs, some writes might be lost.
As exceptions, when a persistent storage is available, any writes on any attributes
of `Study` and writes on `state` of `Trial` are guaranteed to be persistent.
Additionally, any preceding writes on any attributes of `Trial` are guaranteed to
be written into a persistent storage before writes on `state` of `Trial` succeed.
The same applies for `user_attrs', 'system_attrs', 'intermediate_values` attributes,
but future development may allow storage class users to explicitly skip the above
properties for these attributes.
"""
# Basic study manipulation
@abc.abstractmethod
def create_new_study(self, study_name=None):
# type: (Optional[str]) -> int
raise NotImplementedError
@abc.abstractmethod
def delete_study(self, study_id):
# type: (int) -> None
raise NotImplementedError
@abc.abstractmethod
def set_study_user_attr(self, study_id, key, value):
# type: (int, str, Any) -> None
raise NotImplementedError
@abc.abstractmethod
def set_study_direction(self, study_id, direction):
# type: (int, study.StudyDirection) -> None
raise NotImplementedError
@abc.abstractmethod
def set_study_system_attr(self, study_id, key, value):
# type: (int, str, Any) -> None
raise NotImplementedError
# Basic study access
@abc.abstractmethod
def get_study_id_from_name(self, study_name):
# type: (str) -> int
raise NotImplementedError
@abc.abstractmethod
def get_study_id_from_trial_id(self, trial_id):
# type: (int) -> int
raise NotImplementedError
@abc.abstractmethod
def get_study_name_from_id(self, study_id):
# type: (int) -> str
raise NotImplementedError
@abc.abstractmethod
def get_study_direction(self, study_id):
# type: (int) -> study.StudyDirection
raise NotImplementedError
@abc.abstractmethod
def get_study_user_attrs(self, study_id):
# type: (int) -> Dict[str, Any]
raise NotImplementedError
@abc.abstractmethod
def get_study_system_attrs(self, study_id):
# type: (int) -> Dict[str, Any]
raise NotImplementedError
@abc.abstractmethod
def get_all_study_summaries(self):
# type: () -> List[study.StudySummary]
raise NotImplementedError
# Basic trial manipulation
@abc.abstractmethod
def create_new_trial(self, study_id, template_trial=None):
# type: (int, Optional[FrozenTrial]) -> int
raise NotImplementedError
@abc.abstractmethod
def set_trial_state(self, trial_id, state):
# type: (int, TrialState) -> bool
raise NotImplementedError
@abc.abstractmethod
def set_trial_param(self, trial_id, param_name, param_value_internal, distribution):
# type: (int, str, float, distributions.BaseDistribution) -> bool
raise NotImplementedError
@abc.abstractmethod
def get_trial_number_from_id(self, trial_id):
# type: (int) -> int
raise NotImplementedError
@abc.abstractmethod
def get_trial_param(self, trial_id, param_name):
# type: (int, str) -> float
raise NotImplementedError
@abc.abstractmethod
def set_trial_value(self, trial_id, value):
# type: (int, float) -> None
raise NotImplementedError
@abc.abstractmethod
def set_trial_intermediate_value(self, trial_id, step, intermediate_value):
# type: (int, int, float) -> bool
raise NotImplementedError
@abc.abstractmethod
def set_trial_user_attr(self, trial_id, key, value):
# type: (int, str, Any) -> None
raise NotImplementedError
@abc.abstractmethod
def set_trial_system_attr(self, trial_id, key, value):
# type: (int, str, Any) -> None
raise NotImplementedError
# Basic trial access
@abc.abstractmethod
def get_trial(self, trial_id):
# type: (int) -> FrozenTrial
raise NotImplementedError
@abc.abstractmethod
def get_all_trials(self, study_id, deepcopy=True):
# type: (int, bool) -> List[FrozenTrial]
raise NotImplementedError
@abc.abstractmethod
def get_n_trials(self, study_id, state=None):
# type: (int, Optional[TrialState]) -> int
raise NotImplementedError
def get_best_trial(self, study_id):
# type: (int) -> FrozenTrial
all_trials = self.get_all_trials(study_id, deepcopy=False)
all_trials = [t for t in all_trials if t.state is TrialState.COMPLETE]
if len(all_trials) == 0:
raise ValueError("No trials are completed yet.")
if self.get_study_direction(study_id) == study.StudyDirection.MAXIMIZE:
best_trial = max(all_trials, key=lambda t: t.value)
else:
best_trial = min(all_trials, key=lambda t: t.value)
return copy.deepcopy(best_trial)
def get_trial_params(self, trial_id):
# type: (int) -> Dict[str, Any]
return self.get_trial(trial_id).params
def get_trial_user_attrs(self, trial_id):
# type: (int) -> Dict[str, Any]
return self.get_trial(trial_id).user_attrs
def get_trial_system_attrs(self, trial_id):
# type: (int) -> Dict[str, Any]
return self.get_trial(trial_id).system_attrs
def remove_session(self):
# type: () -> None
pass
def check_trial_is_updatable(self, trial_id, trial_state):
# type: (int, TrialState) -> None
if trial_state.is_finished():
trial = self.get_trial(trial_id)
raise RuntimeError(
"Trial#{} has already finished and can not be updated.".format(trial.number)
)
|
import { ScalafmtError } from '../src/ScalafmtError';
describe('ScalafmtError.parseErrors', () => {
test('parses empty string', () => {
const errors = ScalafmtError.parseErrors('', 'workdir');
expect(errors).toHaveLength(0);
});
test('parses errors', () => {
const input = `
--- workdir/Example.scala
+++ workdir/Example.scala-formatted
@@ -45,3 +45,3 @@
- process( value )
+ process(value)
--- workdir/Example.scala
+++ workdir/Example.scala-formatted
@@ -85,5 +85,5 @@
- .withHeader("foo", "5")
- .withHeader("bar", "true")
- .withHeader("baz", "other")
+ .withHeader("foo", "5")
+ .withHeader("bar", "true")
+ .withHeader("baz", "other")
`;
const errors = ScalafmtError.parseErrors(input, 'workdir');
expect(errors.map((error) => error.toString())).toEqual([
'::error file=Example.scala,line=45,col=3::Incorrectly formatted line\n',
'::error file=Example.scala,line=85,col=5::Incorrectly formatted lines\n',
]);
});
});
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.