text stringlengths 1 1.05M |
|---|
import React from "react";
import { faUserPlus } from "@fortawesome/free-solid-svg-icons";
import { Row , Container, Form , Button} from "react-bootstrap";
import FormCard from "./FormCard";
import Ipsum from "../Ipsum";
// const error = useSelector((state) => state.authReducer.error);
const validCheck = () => {
// 동의가 체크되어야만 다음으로 넘어감
};
class TermContent extends React.Component {
render() {
return (
<>
<style>
{
`
.scroll{
overflow : scroll;
}
`
}
</style>
<Container className="text-left">
<Form action="/" method="post">
<Row><h5>사용약관</h5></Row>
<Row>
<div className="scroll">
<Ipsum title={"사용약관"} />
</div>
<div className="text-end"><Form.Check label={'동의'} id={'isAgree'}/></div>
</Row>
<Row><h5>개인정보처리방침</h5></Row>
<Row>
<div className="scroll">
<Ipsum title={"개인정보 처리 방침"} />
</div>
<div className="text-end"><Form.Check label={'동의'} id={'isAgree'}/></div>
</Row>
</Form>
</Container>
<div className="text-center">
<Button type="submit" onClick="validCheck" href="/join">다음</Button>
</div>
</>
);
}
}
const content = <TermContent />
const JoinTerm = () => {
return (
<FormCard icon={faUserPlus} content={content} title={"회원가입"} />
);
}
export default JoinTerm; |
using System;
using System.Configuration;
using System.Data.SqlClient;
public class DatabaseConnection
{
public void EstablishConnection()
{
string connectionString = GetConnectionStringFromConfig();
try
{
using (SqlConnection connection = new SqlConnection(connectionString))
{
connection.Open();
Console.WriteLine("Connection to the database is established successfully.");
}
}
catch (Exception ex)
{
Console.WriteLine("An error occurred while establishing the database connection: " + ex.Message);
}
}
private string GetConnectionStringFromConfig()
{
string connectionString = ConfigurationManager.AppSettings["DBConnectionString"];
if (string.IsNullOrEmpty(connectionString))
{
throw new Exception("DBConnectionString is missing or empty in the configuration file.");
}
return connectionString;
}
} |
<filename>alicloud/resource_alicloud_eip_association_test.go
package alicloud
import (
"fmt"
"testing"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/services/ecs"
"github.com/aliyun/alibaba-cloud-sdk-go/services/vpc"
"github.com/denverdino/aliyungo/slb"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAlicloudEIPAssociation(t *testing.T) {
var asso vpc.EipAddress
var inst ecs.Instance
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
// module name
IDRefreshName: "alicloud_eip_association.foo",
Providers: testAccProviders,
CheckDestroy: testAccCheckEIPAssociationDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccEIPAssociationConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckInstanceExists(
"alicloud_instance.instance", &inst),
testAccCheckEIPExists(
"alicloud_eip.eip", &asso),
testAccCheckEIPAssociationExists(
"alicloud_eip_association.foo", &inst, &asso),
),
},
},
})
}
func TestAccAlicloudEIPAssociation_slb(t *testing.T) {
var asso vpc.EipAddress
var slb slb.LoadBalancerType
resource.Test(t, resource.TestCase{
PreCheck: func() {
testAccPreCheck(t)
},
// module name
IDRefreshName: "alicloud_eip_association.foo",
Providers: testAccProviders,
CheckDestroy: testAccCheckEIPAssociationDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccEIPAssociationSlb,
Check: resource.ComposeTestCheckFunc(
testAccCheckSlbExists(
"alicloud_slb.vpc", &slb),
testAccCheckEIPExists(
"alicloud_eip.eip", &asso),
testAccCheckEIPAssociationSlbExists(
"alicloud_eip_association.foo", &slb, &asso),
),
},
},
})
}
func testAccCheckEIPAssociationExists(n string, instance *ecs.Instance, eip *vpc.EipAddress) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No EIP Association ID is set")
}
client := testAccProvider.Meta().(*AliyunClient)
return resource.Retry(3*time.Minute, func() *resource.RetryError {
d, err := client.DescribeEipAddress(rs.Primary.Attributes["allocation_id"])
if err != nil {
return resource.NonRetryableError(err)
}
if d.Status != string(InUse) {
return resource.RetryableError(fmt.Errorf("Eip is in associating - trying again while it associates"))
} else if d.InstanceId == instance.InstanceId {
*eip = d
return nil
}
return resource.NonRetryableError(fmt.Errorf("EIP Association not found"))
})
}
}
func testAccCheckEIPAssociationSlbExists(n string, slb *slb.LoadBalancerType, eip *vpc.EipAddress) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No EIP Association ID is set")
}
client := testAccProvider.Meta().(*AliyunClient)
return resource.Retry(3*time.Minute, func() *resource.RetryError {
d, err := client.DescribeEipAddress(rs.Primary.Attributes["allocation_id"])
if err != nil {
return resource.NonRetryableError(err)
}
if d.Status != string(InUse) {
return resource.RetryableError(fmt.Errorf("Eip is in associating - trying again while it associates"))
} else if d.InstanceId == slb.LoadBalancerId {
*eip = d
return nil
}
return resource.NonRetryableError(fmt.Errorf("EIP Association not found"))
})
}
}
func testAccCheckEIPAssociationDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*AliyunClient)
for _, rs := range s.RootModule().Resources {
if rs.Type != "alicloud_eip_association" {
continue
}
if rs.Primary.ID == "" {
return fmt.Errorf("No EIP Association ID is set")
}
// Try to find the EIP
eip, err := client.DescribeEipAddress(rs.Primary.Attributes["allocation_id"])
// Verify the error is what we want
if err != nil {
if NotFoundError(err) {
continue
}
return err
}
if eip.Status != string(Available) {
return fmt.Errorf("Error EIP Association still exist")
}
}
return nil
}
const testAccEIPAssociationConfig = `
data "alicloud_zones" "default" {
available_disk_category = "cloud_ssd"
}
data "alicloud_instance_types" "default" {
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
cpu_core_count = 1
memory_size = 2
}
data "alicloud_images" "default" {
name_regex = "^ubuntu_14.*_64"
most_recent = true
owners = "system"
}
variable "name" {
default = "testAccEIPAssociationConfig"
}
resource "alicloud_vpc" "main" {
name = "${var.name}"
cidr_block = "10.1.0.0/21"
}
resource "alicloud_vswitch" "main" {
vpc_id = "${alicloud_vpc.main.id}"
cidr_block = "10.1.1.0/24"
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
depends_on = [
"alicloud_vpc.main"]
}
resource "alicloud_instance" "instance" {
# cn-beijing
vswitch_id = "${alicloud_vswitch.main.id}"
image_id = "${data.alicloud_images.default.images.0.id}"
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
system_disk_category = "cloud_ssd"
instance_type = "${data.alicloud_instance_types.default.instance_types.0.id}"
security_groups = ["${alicloud_security_group.group.id}"]
instance_name = "${var.name}"
tags {
Name = "TerraformTest-instance"
}
}
resource "alicloud_eip" "eip" {
}
resource "alicloud_eip_association" "foo" {
allocation_id = "${alicloud_eip.eip.id}"
instance_id = "${alicloud_instance.instance.id}"
}
resource "alicloud_security_group" "group" {
name = "${var.name}"
description = "New security group"
vpc_id = "${alicloud_vpc.main.id}"
}
`
const testAccEIPAssociationSlb = `
variable "name" {
default = "testAccEIPAssociationSlb"
}
data "alicloud_zones" "default" {
"available_resource_creation"= "VSwitch"
}
resource "alicloud_vpc" "main" {
name = "${var.name}"
cidr_block = "10.1.0.0/21"
}
resource "alicloud_vswitch" "main" {
vpc_id = "${alicloud_vpc.main.id}"
cidr_block = "10.1.1.0/24"
availability_zone = "${data.alicloud_zones.default.zones.0.id}"
name = "${var.name}"
}
resource "alicloud_eip" "eip" {
}
resource "alicloud_eip_association" "foo" {
allocation_id = "${alicloud_eip.eip.id}"
instance_id = "${alicloud_slb.vpc.id}"
}
resource "alicloud_slb" "vpc" {
name = "${var.name}"
specification = "slb.s2.small"
vswitch_id = "${alicloud_vswitch.main.id}"
}
`
|
<filename>allegation/views/officer_allegation_sunburst_view.py
from allegation.views.officer_allegation_api_view import (
OfficerAllegationAPIView)
from allegation.serializers import SunburstSerializer
from document.response import JsonResponse
class OfficerAllegationSunburstView(OfficerAllegationAPIView):
def get(self, request):
officer_allegations = self.get_officer_allegations(
ignore_filters=['final_outcome', 'final_finding', 'final_finding_text', 'outcome_text'])
return JsonResponse({
'sunburst': SunburstSerializer(officer_allegations).data
})
|
package io.opensphere.stkterrain.model;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.List;
import org.codehaus.jackson.JsonParseException;
import org.codehaus.jackson.map.JsonMappingException;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.Test;
import io.opensphere.server.util.JsonUtils;
/**
* Unit test for {@link TileSetMetadata}.
*/
public class TileSetMetadataTest
{
/**
* Tests populating the class from json.
*
* @throws IOException Bad IO.
* @throws JsonMappingException Bad mapping.
* @throws JsonParseException Bad json.
*/
@Test
public void testDeserialize() throws JsonParseException, JsonMappingException, IOException
{
String json = "{\"tilejson\":\"2.1.0\",\"name\":\"world\",\"description\":null,\"version\":\"1.16389.0\",\"format\":\"quantized-mesh-1.0\","
+ "\"attribution\":\"© Analytical Graphics Inc., © CGIAR-CSI, Produced using Copernicus"
+ " data and information funded by the European Union - EU-DEM layers\","
+ "\"scheme\":\"tms\",\"extensions\":[\"watermask\",\"vertexnormals\",\"octvertexnormals\"],\"tiles\":[\"{z}/{x}/{y}.terrain?v={version}\"],"
+ "\"minzoom\":0,\"maxzoom\":16,\"bounds\":[-179.9,-89.9,179.9,89.9],\"projection\":\"EPSG:4326\","
+ "\"available\":[[{\"startX\":0,\"startY\":0,\"endX\":1,\"endY\":0}],"
+ "[{\"startX\":0,\"startY\":0,\"endX\":2,\"endY\":0}, {\"startX\":2,\"startY\":1,\"endX\":3,\"endY\":1}]]}";
ObjectMapper mapper = JsonUtils.createMapper();
TileSetMetadata tileSet = mapper.readValue(json, TileSetMetadata.class);
assertEquals("2.1.0", tileSet.getTilejson());
assertEquals("world", tileSet.getName());
assertNull(tileSet.getDescription());
assertEquals("1.16389.0", tileSet.getVersion());
assertEquals("quantized-mesh-1.0", tileSet.getFormat());
assertEquals("© Analytical Graphics Inc., © CGIAR-CSI, Produced using Copernicus"
+ " data and information funded by the European Union - EU-DEM layers", tileSet.getAttribution());
assertEquals("tms", tileSet.getScheme());
assertEquals(3, tileSet.getExtensions().size());
assertEquals("watermask", tileSet.getExtensions().get(0));
assertEquals("vertexnormals", tileSet.getExtensions().get(1));
assertEquals("octvertexnormals", tileSet.getExtensions().get(2));
assertEquals(1, tileSet.getTiles().size());
assertEquals("{z}/{x}/{y}.terrain?v={version}", tileSet.getTiles().get(0));
assertEquals(0, tileSet.getMinzoom());
assertEquals(16, tileSet.getMaxzoom());
assertEquals(4, tileSet.getBounds().length);
assertEquals(-179.9f, tileSet.getBounds()[0], 0f);
assertEquals(-89.9f, tileSet.getBounds()[1], 0f);
assertEquals(179.9f, tileSet.getBounds()[2], 0f);
assertEquals(89.9f, tileSet.getBounds()[3], 0f);
assertEquals("EPSG:4326", tileSet.getProjection());
assertEquals(2, tileSet.getAvailable().size());
List<TileRange> ranges = tileSet.getAvailable().get(0);
assertEquals(1, ranges.size());
TileRange range = ranges.get(0);
assertEquals(0, range.getStartX());
assertEquals(0, range.getStartY());
assertEquals(1, range.getEndX());
assertEquals(0, range.getEndY());
ranges = tileSet.getAvailable().get(1);
assertEquals(2, ranges.size());
range = ranges.get(0);
assertEquals(0, range.getStartX());
assertEquals(0, range.getStartY());
assertEquals(2, range.getEndX());
assertEquals(0, range.getEndY());
range = ranges.get(1);
assertEquals(2, range.getStartX());
assertEquals(1, range.getStartY());
assertEquals(3, range.getEndX());
assertEquals(1, range.getEndY());
}
/**
* Tests java serializing the class.
*
* @throws IOException Bad IO.
* @throws ClassNotFoundException Bad class.
*/
@Test
public void testSerialize() throws IOException, ClassNotFoundException
{
String json = "{\"tilejson\":\"2.1.0\",\"name\":\"world\",\"description\":null,\"version\":\"1.16389.0\",\"format\":\"quantized-mesh-1.0\","
+ "\"attribution\":\"© Analytical Graphics Inc., © CGIAR-CSI, Produced using Copernicus"
+ " data and information funded by the European Union - EU-DEM layers\","
+ "\"scheme\":\"tms\",\"extensions\":[\"watermask\",\"vertexnormals\",\"octvertexnormals\"],\"tiles\":[\"{z}/{x}/{y}.terrain?v={version}\"],"
+ "\"minzoom\":0,\"maxzoom\":16,\"bounds\":[-180,-90,180,90],\"projection\":\"EPSG:4326\","
+ "\"available\":[[{\"startX\":0,\"startY\":0,\"endX\":1,\"endY\":0}],"
+ "[{\"startX\":0,\"startY\":0,\"endX\":2,\"endY\":0}, {\"startX\":2,\"startY\":1,\"endX\":3,\"endY\":1}]]}";
ObjectMapper mapper = JsonUtils.createMapper();
TileSetMetadata tileSet = mapper.readValue(json, TileSetMetadata.class);
ByteArrayOutputStream out = new ByteArrayOutputStream();
ObjectOutputStream objectOut = new ObjectOutputStream(out);
objectOut.writeObject(tileSet);
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
ObjectInputStream objectIn = new ObjectInputStream(in);
tileSet = (TileSetMetadata)objectIn.readObject();
assertEquals("2.1.0", tileSet.getTilejson());
assertEquals("world", tileSet.getName());
assertNull(tileSet.getDescription());
assertEquals("1.16389.0", tileSet.getVersion());
assertEquals("quantized-mesh-1.0", tileSet.getFormat());
assertEquals("© Analytical Graphics Inc., © CGIAR-CSI, Produced using Copernicus"
+ " data and information funded by the European Union - EU-DEM layers", tileSet.getAttribution());
assertEquals("tms", tileSet.getScheme());
assertEquals(3, tileSet.getExtensions().size());
assertEquals("watermask", tileSet.getExtensions().get(0));
assertEquals("vertexnormals", tileSet.getExtensions().get(1));
assertEquals("octvertexnormals", tileSet.getExtensions().get(2));
assertEquals(1, tileSet.getTiles().size());
assertEquals("{z}/{x}/{y}.terrain?v={version}", tileSet.getTiles().get(0));
assertEquals(0, tileSet.getMinzoom());
assertEquals(16, tileSet.getMaxzoom());
assertEquals(4, tileSet.getBounds().length);
assertEquals(-180f, tileSet.getBounds()[0], 0f);
assertEquals(-90f, tileSet.getBounds()[1], 0f);
assertEquals(180f, tileSet.getBounds()[2], 0f);
assertEquals(90f, tileSet.getBounds()[3], 0f);
assertEquals("EPSG:4326", tileSet.getProjection());
assertEquals(2, tileSet.getAvailable().size());
List<TileRange> ranges = tileSet.getAvailable().get(0);
assertEquals(1, ranges.size());
TileRange range = ranges.get(0);
assertEquals(0, range.getStartX());
assertEquals(0, range.getStartY());
assertEquals(1, range.getEndX());
assertEquals(0, range.getEndY());
ranges = tileSet.getAvailable().get(1);
assertEquals(2, ranges.size());
range = ranges.get(0);
assertEquals(0, range.getStartX());
assertEquals(0, range.getStartY());
assertEquals(2, range.getEndX());
assertEquals(0, range.getEndY());
range = ranges.get(1);
assertEquals(2, range.getStartX());
assertEquals(1, range.getStartY());
assertEquals(3, range.getEndX());
assertEquals(1, range.getEndY());
}
}
|
/*
* Copyright 2016-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.yms.ydt;
/**
* Abstraction of an entity which provides interfaces for YDT walk.
*
* When YANG management system gets data from application to be returned
* to protocol for any protocol operation or as a part of notification, YANG
* management system encodes this data in a YANG data tree and sends the same
* to protocol.
* Protocols can use the YANG data tree walker utility to have their
* callbacks to be invoked as per the YANG data tree walking.
* By this way protocols can encode the data from abstract YANG data tree
* into a protocol specific representation.
*
* YDT walker provides entry and exit callbacks for each node in YANG data
* tree.
*/
public interface YdtWalker {
/**
* Walks the YANG data tree. Protocols implements YDT listener service
* and walks YDT tree with input as implemented object. YDT walker provides
* call backs to implemented methods.
*
* @param ydtListener YDT listener implemented by the protocol
* @param rootNode root node of YDT
*/
void walk(YdtListener ydtListener, YdtContext rootNode);
}
|
#!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
FOLDEROUT=src/api
rm -rf $FOLDEROUT
$DIR/generate.sh -def $DIR/../definitions/alfresco-auth.yaml -o $DIR/../$FOLDEROUT/auth-rest-api/
$DIR/generate.sh -def $DIR/../definitions/activiti-api.json -o $DIR/../$FOLDEROUT/activiti-rest-api/ -skip-build-codegen
$DIR/generate.sh -def $DIR/../definitions/alfresco-core.yaml -o $DIR/../$FOLDEROUT/content-rest-api/ -skip-build-codegen
$DIR/generate.sh -def $DIR/../definitions/alfresco-discovery.yaml -o $DIR/../$FOLDEROUT/discovery-rest-api/ -skip-build-codegen
$DIR/generate.sh -def $DIR/../definitions/alfresco-search.yaml -o $DIR/../$FOLDEROUT/search-rest-api/ -skip-build-codegen
$DIR/generate.sh -def $DIR/../definitions/gs-classification-api.yaml -o $DIR/../$FOLDEROUT/gs-classification-rest-api/ -skip-build-codegen
$DIR/generate.sh -def $DIR/../definitions/gs-core-api.yaml -o $DIR/../$FOLDEROUT/gs-core-rest-api/ -skip-build-codegen
|
package pantry
import (
"bytes"
"encoding/json"
"fmt"
"strings"
"github.com/BurntSushi/toml"
"gopkg.in/yaml.v2"
)
// UnsupportedConfigError denotes encountering an unsupported
// configuration filetype.
type UnsupportedConfigError string
// Returns the formatted configuration error.
func (e UnsupportedConfigError) Error() string {
return fmt.Sprintf("Unsupported Config Type %q", string(e))
}
// ConfigParseError denotes failing to parse configuration file.
type ConfigParseError struct {
err error
}
// Returns the formatted configuration error.
func (e ConfigParseError) Error() string {
return fmt.Sprintf("Parsing config failed: %s", e.err.Error())
}
// ConfigEncodeError denotes failing to encode configuration
type ConfigEncodeError struct {
err error
}
// Returns the formatted configuration error.
func (e ConfigEncodeError) Error() string {
return fmt.Sprintf("Encoding config failed: %s", e.err.Error())
}
// MarshalFunc is any marshaler.
type MarshalFunc func(v interface{}) ([]byte, error)
// UnmarshalFunc is any unmarshaler.
type UnmarshalFunc func(data []byte, v interface{}) error
type ConfigFormat struct {
Marshal MarshalFunc
Unmarshal UnmarshalFunc
}
type ConfigFormats map[string]*ConfigFormat
// Register format marshaler and unmarphaler
func (c ConfigFormats) Register(format string, m MarshalFunc, um UnmarshalFunc) {
c[strings.ToLower(format)] = &ConfigFormat{m, um}
}
func (c ConfigFormats) Search(format string) (*ConfigFormat, error) {
format = strings.ToLower(format)
f, ok := c[format]
if !ok {
if f, ok = c[ext(format)]; !ok {
return f, UnsupportedConfigError(format)
}
}
return f, nil
}
// Formats contains marshalers and unmarshalers for different file formats
var Formats = ConfigFormats{}
func init() {
Formats.Register("json", func(v interface{}) ([]byte, error) {
return json.MarshalIndent(v, "", " ")
}, json.Unmarshal)
Formats.Register("yaml", yaml.Marshal, yaml.Unmarshal)
Formats.Register("yml", yaml.Marshal, yaml.Unmarshal)
Formats.Register("toml", func(v interface{}) ([]byte, error) {
b := bytes.Buffer{}
err := toml.NewEncoder(&b).Encode(v)
return b.Bytes(), err
}, toml.Unmarshal)
}
|
/**
* Orthanc - A Lightweight, RESTful DICOM Store
* Copyright (C) 2012-2016 <NAME>, Medical Physics
* Department, University Hospital of Liege, Belgium
* Copyright (C) 2017-2020 <NAME>., Belgium
*
* This program is free software: you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In addition, as a special exception, the copyright holders of this
* program give permission to link the code of its release with the
* OpenSSL project's "OpenSSL" library (or with modified versions of it
* that use the same license as the "OpenSSL" library), and distribute
* the linked executables. You must obey the GNU General Public License
* in all respects for all of the code used other than "OpenSSL". If you
* modify file(s) with this exception, you may extend this exception to
* your version of the file(s), but you are not obligated to do so. If
* you do not wish to do so, delete this exception statement from your
* version. If you delete this exception statement from all source files
* in the program, then also delete it here.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
**/
#include "../../PrecompiledHeadersServer.h"
#include "DatabaseLookup.h"
#include "../../../../OrthancFramework/Sources/OrthancException.h"
#include "../../Search/DicomTagConstraint.h"
#include "../../ServerToolbox.h"
#include "SetOfResources.h"
namespace Orthanc
{
namespace Compatibility
{
namespace
{
// Anonymous namespace to avoid clashes between compiler modules
class MainTagsConstraints : boost::noncopyable
{
private:
std::vector<DicomTagConstraint*> constraints_;
public:
~MainTagsConstraints()
{
for (size_t i = 0; i < constraints_.size(); i++)
{
assert(constraints_[i] != NULL);
delete constraints_[i];
}
}
void Reserve(size_t n)
{
constraints_.reserve(n);
}
size_t GetSize() const
{
return constraints_.size();
}
DicomTagConstraint& GetConstraint(size_t i) const
{
if (i >= constraints_.size())
{
throw OrthancException(ErrorCode_ParameterOutOfRange);
}
else
{
assert(constraints_[i] != NULL);
return *constraints_[i];
}
}
void Add(const DatabaseConstraint& constraint)
{
constraints_.push_back(new DicomTagConstraint(constraint));
}
};
}
static void ApplyIdentifierConstraint(SetOfResources& candidates,
ILookupResources& compatibility,
const DatabaseConstraint& constraint,
ResourceType level)
{
std::list<int64_t> matches;
switch (constraint.GetConstraintType())
{
case ConstraintType_Equal:
compatibility.LookupIdentifier(matches, level, constraint.GetTag(),
IdentifierConstraintType_Equal, constraint.GetSingleValue());
break;
case ConstraintType_SmallerOrEqual:
compatibility.LookupIdentifier(matches, level, constraint.GetTag(),
IdentifierConstraintType_SmallerOrEqual, constraint.GetSingleValue());
break;
case ConstraintType_GreaterOrEqual:
compatibility.LookupIdentifier(matches, level, constraint.GetTag(),
IdentifierConstraintType_GreaterOrEqual, constraint.GetSingleValue());
break;
case ConstraintType_Wildcard:
compatibility.LookupIdentifier(matches, level, constraint.GetTag(),
IdentifierConstraintType_Wildcard, constraint.GetSingleValue());
break;
case ConstraintType_List:
for (size_t i = 0; i < constraint.GetValuesCount(); i++)
{
std::list<int64_t> tmp;
compatibility.LookupIdentifier(tmp, level, constraint.GetTag(),
IdentifierConstraintType_Wildcard, constraint.GetValue(i));
matches.splice(matches.end(), tmp);
}
break;
default:
throw OrthancException(ErrorCode_InternalError);
}
candidates.Intersect(matches);
}
static void ApplyIdentifierRange(SetOfResources& candidates,
ILookupResources& compatibility,
const DatabaseConstraint& smaller,
const DatabaseConstraint& greater,
ResourceType level)
{
assert(smaller.GetConstraintType() == ConstraintType_SmallerOrEqual &&
greater.GetConstraintType() == ConstraintType_GreaterOrEqual &&
smaller.GetTag() == greater.GetTag() &&
ServerToolbox::IsIdentifier(smaller.GetTag(), level));
std::list<int64_t> matches;
compatibility.LookupIdentifierRange(matches, level, smaller.GetTag(),
greater.GetSingleValue(), smaller.GetSingleValue());
candidates.Intersect(matches);
}
static void ApplyLevel(SetOfResources& candidates,
IDatabaseWrapper& database,
ILookupResources& compatibility,
const std::vector<DatabaseConstraint>& lookup,
ResourceType level)
{
typedef std::set<const DatabaseConstraint*> SetOfConstraints;
typedef std::map<DicomTag, SetOfConstraints> Identifiers;
// (1) Select which constraints apply to this level, and split
// them between "identifier tags" constraints and "main DICOM
// tags" constraints
Identifiers identifiers;
SetOfConstraints mainTags;
for (size_t i = 0; i < lookup.size(); i++)
{
if (lookup[i].GetLevel() == level)
{
if (lookup[i].IsIdentifier())
{
identifiers[lookup[i].GetTag()].insert(&lookup[i]);
}
else
{
mainTags.insert(&lookup[i]);
}
}
}
// (2) Apply the constraints over the identifiers
for (Identifiers::const_iterator it = identifiers.begin();
it != identifiers.end(); ++it)
{
// Check whether some range constraint over identifiers is
// present at this level
const DatabaseConstraint* smaller = NULL;
const DatabaseConstraint* greater = NULL;
for (SetOfConstraints::const_iterator it2 = it->second.begin();
it2 != it->second.end(); ++it2)
{
assert(*it2 != NULL);
if ((*it2)->GetConstraintType() == ConstraintType_SmallerOrEqual)
{
smaller = *it2;
}
if ((*it2)->GetConstraintType() == ConstraintType_GreaterOrEqual)
{
greater = *it2;
}
}
if (smaller != NULL &&
greater != NULL)
{
// There is a range constraint: Apply it, as it is more efficient
ApplyIdentifierRange(candidates, compatibility, *smaller, *greater, level);
}
else
{
smaller = NULL;
greater = NULL;
}
for (SetOfConstraints::const_iterator it2 = it->second.begin();
it2 != it->second.end(); ++it2)
{
// Check to avoid applying twice the range constraint
if (*it2 != smaller &&
*it2 != greater)
{
ApplyIdentifierConstraint(candidates, compatibility, **it2, level);
}
}
}
// (3) Apply the constraints over the main DICOM tags (no index
// here, so this is less efficient than filtering over the
// identifiers)
if (!mainTags.empty())
{
MainTagsConstraints c;
c.Reserve(mainTags.size());
for (SetOfConstraints::const_iterator it = mainTags.begin();
it != mainTags.end(); ++it)
{
assert(*it != NULL);
c.Add(**it);
}
std::list<int64_t> source;
candidates.Flatten(compatibility, source);
candidates.Clear();
std::list<int64_t> filtered;
for (std::list<int64_t>::const_iterator candidate = source.begin();
candidate != source.end(); ++candidate)
{
DicomMap tags;
database.GetMainDicomTags(tags, *candidate);
bool match = true;
for (size_t i = 0; i < c.GetSize(); i++)
{
if (!c.GetConstraint(i).IsMatch(tags))
{
match = false;
break;
}
}
if (match)
{
filtered.push_back(*candidate);
}
}
candidates.Intersect(filtered);
}
}
static std::string GetOneInstance(IDatabaseWrapper& compatibility,
int64_t resource,
ResourceType level)
{
for (int i = level; i < ResourceType_Instance; i++)
{
assert(compatibility.GetResourceType(resource) == static_cast<ResourceType>(i));
std::list<int64_t> children;
compatibility.GetChildrenInternalId(children, resource);
if (children.empty())
{
throw OrthancException(ErrorCode_Database);
}
resource = children.front();
}
return compatibility.GetPublicId(resource);
}
void DatabaseLookup::ApplyLookupResources(std::list<std::string>& resourcesId,
std::list<std::string>* instancesId,
const std::vector<DatabaseConstraint>& lookup,
ResourceType queryLevel,
size_t limit)
{
// This is a re-implementation of
// "../../../Resources/Graveyard/DatabaseOptimizations/LookupResource.cpp"
assert(ResourceType_Patient < ResourceType_Study &&
ResourceType_Study < ResourceType_Series &&
ResourceType_Series < ResourceType_Instance);
ResourceType upperLevel = queryLevel;
ResourceType lowerLevel = queryLevel;
for (size_t i = 0; i < lookup.size(); i++)
{
ResourceType level = lookup[i].GetLevel();
if (level < upperLevel)
{
upperLevel = level;
}
if (level > lowerLevel)
{
lowerLevel = level;
}
}
assert(upperLevel <= queryLevel &&
queryLevel <= lowerLevel);
SetOfResources candidates(database_, upperLevel);
for (int level = upperLevel; level <= lowerLevel; level++)
{
ApplyLevel(candidates, database_, compatibility_, lookup, static_cast<ResourceType>(level));
if (level != lowerLevel)
{
candidates.GoDown();
}
}
std::list<int64_t> resources;
candidates.Flatten(compatibility_, resources);
// Climb up, up to queryLevel
for (int level = lowerLevel; level > queryLevel; level--)
{
std::list<int64_t> parents;
for (std::list<int64_t>::const_iterator
it = resources.begin(); it != resources.end(); ++it)
{
int64_t parent;
if (database_.LookupParent(parent, *it))
{
parents.push_back(parent);
}
}
resources.swap(parents);
}
// Apply the limit, if given
if (limit != 0 &&
resources.size() > limit)
{
resources.resize(limit);
}
// Get the public ID of all the selected resources
size_t pos = 0;
for (std::list<int64_t>::const_iterator
it = resources.begin(); it != resources.end(); ++it, pos++)
{
assert(database_.GetResourceType(*it) == queryLevel);
const std::string resource = database_.GetPublicId(*it);
resourcesId.push_back(resource);
if (instancesId != NULL)
{
if (queryLevel == ResourceType_Instance)
{
// The resource is itself the instance
instancesId->push_back(resource);
}
else
{
// Collect one child instance for each of the selected resources
instancesId->push_back(GetOneInstance(database_, *it, queryLevel));
}
}
}
}
}
}
|
<filename>index.js
#!/usr/bin/env node
import readline from 'readline'
import { checkModuleIsExist } from './utils/checkModuleIsExist.js'
import { detectModules } from './utils/detectModules.js'
import { link } from './utils/link.js'
const cli = readline.createInterface({
input: process.stdin,
output: process.stdout,
})
cli.question('What is your local modules path ? ', (modulesPath) => {
if (!modulesPath) {
console.log('modulesPath is required.')
return cli.close()
}
const modules = detectModules(modulesPath)
if (!modules || modules.length === 0) {
console.log(`There ara no modules in "${modulesPath}"`)
return cli.close()
}
modules.forEach((module) => {
const isExist = checkModuleIsExist(module)
if (isExist) {
console.log(`${module} is already exist.`)
return cli.close()
}
link(modulesPath, module)
})
cli.close()
})
cli.on('close', () => {
process.exit(0)
})
|
#==============================================================================
# ■ HSP ヘルプファイルを作る
#------------------------------------------------------------------------------
# 注意…
# ・[対応済]"@param[in] x, y, z" は正常に検出できないので "x,y,z" のようにスペースを消す。
# ・上記のような "x,y,z" にデフォルト引数の指定はできない
# ・関数宣言は複数行にしない
#==============================================================================
require './Config.rb'
# グループ
$class_group = {
"LConfig" => "初期設定",
"LAudio" => "音声機能",
"LSound" => "音声機能",
"L3DSoundListener" => "音声機能"
}
# ヘッダテンプレート
$hs_header_filepath = "HSPHelpTemplate/header.txt"
# 関数テンプレート
$hs_func_filepath = "HSPHelpTemplate/function.txt"
# 最終出力全体に対して行う文字列置換
$global_replace_strings = [
["のポインタ", ""]
]
#==============================================================================
# ▲ 設定ここまで
#==============================================================================
require './Analyzer.rb'
file = open($hs_header_filepath)
$output = file.read
file = open($hs_func_filepath)
$func_template = file.read
$analyzer = Analyzer.new
# 全ファイル解析
for filename in $target_files
p filename
$analyzer.analyze($lnote_root + filename, "hsp")
end
# 落とし込み
for doc in $analyzer.func_doc_list
# 前処理
continue = false
func_name = doc.name
for opt in doc.option_args
case opt.name
when "disable"
continue = opt.value
when "name"
func_name = opt.value
end
end
if continue
step = 0
next
end
func_tmpl = $func_template.dup
# 基本
func_tmpl.sub!("_NAME_", func_name)
func_tmpl.sub!("_BRIEF_", doc.summary)
func_tmpl.sub!("_INST_", doc.detail)
func_tmpl.sub!("_HREF_", "")
# グループ
func_name =~ /^.+_/
class_name = $&.delete("_")
group = $class_group[class_name]
if group != nil
func_tmpl.sub!("_GROUP_", group)
else
func_tmpl.sub!("_GROUP_", "")
end
# 引数リスト
arg_list = ""
for i in 0...doc.param_pars.size
arg_list += ", " if i != 0
arg_list += doc.param_pars[i].caption
end
func_tmpl.sub!("_PRM_LIST_", arg_list)
# 引数詳細
arg_detail = ""
# [in] 等に必要なスペース領域チェック
io_space_count = 0
for param in doc.param_pars
io_space_count = param.io_type.length if param.io_type.length > io_space_count
end
io_space_count += 1 # スペース
# 名前とデフォルト値のスペース領域チェック
name_space_count = 0
for param in doc.param_pars
t = param.caption.length + 1 # +1 はスペースの分
default_val = doc.find_default_arg(param.caption) # デフォルト引数
if default_val != nil
t += default_val.length + 2 # +2 は ()
end
name_space_count = t if t > name_space_count
end
# 引数詳細を作る
for param in doc.param_pars
# io type
t = param.io_type
snum = io_space_count - t.length
t += " " * snum if snum > 0
# name
default_val = doc.find_default_arg(param.caption) # デフォルト引数
c = param.caption
c += "(" + default_val + ")" if default_val != nil
t += c
snum = name_space_count - c.length
t += " " * snum if snum > 0
# detail
t += param.detail + "\n"
# args
for a in param.arg_pars
t += (" " * (io_space_count+name_space_count)) + " "
t += a + "\n"
end
arg_detail += t
end
# return
if doc.return_detail != nil
arg_detail += "\nstat : "
lines = doc.return_detail.split("\n")
for i in 0...lines.size
if i >= 1
arg_detail += "\n " # "stat : " の分の空白
end
arg_detail += lines[i]
end
end
func_tmpl.sub!("_PRM_DETAIL_", arg_detail)
$output += "\n" + func_tmpl + "\n"
step = 0
end
# グローバルの置換処理
for pair in $global_replace_strings
$output.gsub!(pair[0], pair[1])
end
# 出力
open(ARGV[0] + "lnote.hs", "w") {|f| f.write $output}
p "[finished.]"
=begin
#--------------------------------------------------------------------------
# 関数コメントの先頭行かをチェックする " ///**"
#--------------------------------------------------------------------------
def is_doc_begin_line(one_line_str)
# \s+ :スペースofタブを1回以上
# \/{3} :/ を3回
# \*{2} :* を2回
if one_line_str =~ /\s+\/{3}\*{2}/
return true
end
return false
end
#--------------------------------------------------------------------------
# 関数コメントの終端行かをチェックする " //*/"
#--------------------------------------------------------------------------
def is_doc_end_line(one_line_str)
if one_line_str =~ /\s+\/{2}\*\//
return true
end
return false
end
#--------------------------------------------------------------------------
# 関数行かをチェックする (is_doc_end_line で終端検出済みであること)
#--------------------------------------------------------------------------
def is_decl_func_line(one_line_str)
if one_line_str =~ /\s+.*\(.*\);/
return true
end
return false
end
#--------------------------------------------------------------------------
# step 2 言語別オプション解析
# ["disable"] false の場合、disable
#--------------------------------------------------------------------------
def step_2_analyze_lang_option(one_line_str)
r = []
if one_line_str =~ /\[.+\]/
lang_texts = $&.delete("[]").split(",")
for text in lang_texts
args = text.delete("{}").split(" ")
if args[0] == "hsp"
for a in args
pair = a.split("=")
case pair[0]
when "disable"
r.push(["disable", true])
when "name"
r.push(["name", pair[1]])
end
end
end
end
end
if r.size == 0
r.push(["disable", false])
end
return r
end
#--------------------------------------------------------------------------
#
#--------------------------------------------------------------------------
class ParamPar
attr_accessor :io_type # 入出力の種類 "[in]" "[out]" "[in,out]"
attr_accessor :caption # 引数名
attr_accessor :detail # 説明 (':' を含む)
attr_accessor :arg_pars # @arg 文字列配列 "NAME : DETAIL"
def initialize
@io_type = ""
@caption = ""
@detail = ""
@arg_pars = []
end
end
#--------------------------------------------------------------------------
#
#--------------------------------------------------------------------------
class FuncDecl
attr_accessor :name # 関数名
attr_accessor :args # 引数リスト
def initialize
@name = ""
@args = []
end
end
class ArgDecl
attr_accessor :name # 引数名
attr_accessor :default # デフォルト引数
def initialize
@name = ""
@default = ""
end
end
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
$doc_brief = ""
$param_pars = []
$last_param_par = nil
$return_detail = nil
$func_decl = nil
$output = ""
$func_template = ""
$default_arg_hash = {}
$option_args = []
file = open($hs_header_filepath)
$output = file.read
file = open($hs_func_filepath)
$func_template = file.read
for filename in $target_files
p filename
# 0 : doc 開始検索中
# 1 : doc 終端検索中
# 2 : 関数宣言検索中
# 3 : 出力
step = 0
# 1 行ずつ読んでいく
file = open($lnote_root + filename)
while text = file.gets do
case step
################# doc 開始検索中
when 0
if is_doc_begin_line(text)
$param_pars.clear
$default_arg_hash = {}
$option_args = []
$return_detail = nil
step = 1
end
################# doc 終端検索中 (doc 解析中)
when 1
# doc 終端
if is_doc_end_line(text)
step = 2
end
# @brief
if text =~ /\@brief\s+/
$doc_brief = $' # 最後にマッチした箇所の後
end
# @param
if text =~ /\@param/
$last_param_par = ParamPar.new
$' =~ /\[.+\]/
$last_param_par.io_type = $& # $& = マッチした箇所
# "x, y, z" の暫定対策。':' の前までを引数名とみなす
if $'.include?(":")
$' =~ /:.+/
$last_param_par.caption = $`.strip
$last_param_par.detail = $&
else
$' =~ /\s+\S+\s+/
$last_param_par.caption = $&
$last_param_par.caption.strip! # 前後のスペースを削除
$last_param_par.detail = $'
end
$param_pars.push($last_param_par)
end
# @arg
if text =~ /\@arg\s+/
$last_param_par.arg_pars.push($')
end
# @return
if text =~ /\@return\s+/
$return_detail = $'
end
################# 言語別オプション解析
when 2
$option_args = step_2_analyze_lang_option(text)
step = 3
################# 関数宣言検索中
when 3
if is_decl_func_line(text)
$func_decl = FuncDecl.new
# () とその前とに分ける
text =~ /\(.*\)/
arg_str = $&
# 関数名
$` =~ /\S+$/
$func_decl.name = $&
# 引数 - 括弧を削除し、分割
arg_str.delete!("()")
args = arg_str.split(",")
for a in args
arg_decl = ArgDecl.new
a.strip!
pair = a.split("=")
arg_decl.name = pair[0]
if pair.size != 1 # デフォルト引数がある
arg_decl.default = pair[1]
arg_decl.default.strip!
end
# 型部分を取り除く
arg_decl.name.strip!
arg_decl.name =~ /\S+$/
arg_decl.name = $&
# デフォルト引数登録
if !arg_decl.default.empty?
$default_arg_hash[arg_decl.name] = arg_decl.default
end
$func_decl.args.push(arg_decl)
end
step = 4
end
################# 出力
when 4
# 前処理
continue = false
for pair in $option_args
case pair[0]
when "disable"
continue = pair[1]
when "name"
$func_decl.name = pair[1]
end
end
if continue
step = 0
next
end
func_tmpl = $func_template.dup
# 基本
func_tmpl.sub!("_NAME_", $func_decl.name)
func_tmpl.sub!("_BRIEF_", $doc_brief)
func_tmpl.sub!("_INST_", "")
func_tmpl.sub!("_HREF_", "")
# グループ
$func_decl.name =~ /^.+_/
class_name = $&.delete("_")
group = $class_group[class_name]
if group != nil
func_tmpl.sub!("_GROUP_", group)
else
func_tmpl.sub!("_GROUP_", "")
end
# 引数リスト
arg_list = ""
for i in 0...$param_pars.size
arg_list += ", " if i != 0
arg_list += $param_pars[i].caption
end
func_tmpl.sub!("_PRM_LIST_", arg_list)
# 引数詳細
arg_detail = ""
# [in] 等に必要なスペース領域チェック
io_space_count = 0
for param in $param_pars
io_space_count = param.io_type.length if param.io_type.length > io_space_count
end
io_space_count += 1 # スペース
# 名前とデフォルト値のスペース領域チェック
name_space_count = 0
for param in $param_pars
t = param.caption.length + 1 # +1 はスペースの分
default_val = $default_arg_hash[param.caption] # デフォルト引数
if default_val != nil
t += default_val.length + 2 # +2 は ()
end
name_space_count = t if t > name_space_count
end
# 引数詳細を作る
for param in $param_pars
# io type
t = param.io_type
snum = io_space_count - t.length
t += " " * snum if snum > 0
# name
default_val = $default_arg_hash[param.caption] # デフォルト引数
c = param.caption
c += "(" + default_val + ")" if default_val != nil
t += c
snum = name_space_count - c.length
t += " " * snum if snum > 0
# detail
t += param.detail
for a in param.arg_pars
t += (" " * (io_space_count+name_space_count)) + " "
t += a
end
arg_detail += t + "\n"
end
# return
if $return_detail != nil
arg_detail += "\nstat : " + $return_detail
end
func_tmpl.sub!("_PRM_DETAIL_", arg_detail)
$output += "\n" + func_tmpl + "\n"
step = 0
end
end
file.close
end
for pair in $global_replace_strings
$output.gsub!(pair[0], pair[1])
end
open("lnote.hs", "w") {|f| f.write $output}
=end
|
require "net/telnet"
require "readline"
require "tunnel/tunnel"
class CFConsole < CFTunnel
def initialize(client, app, port = 10000)
@client = client
@app = app
@port = port
end
def get_connection_info(auth)
instances = @app.instances
if instances.empty?
raise "App has no running instances; try starting it."
end
unless console = instances[0].console
raise "App does not have console access; try restarting it."
end
{ "hostname" => console[:ip],
"port" => console[:port]
}
end
def get_credentials
YAML.load(@app.file("app", "cf-rails-console", ".consoleaccess"))
end
def start_console
prompt = login
init_readline
run_console prompt
end
def login(auth = get_credentials)
if !auth["username"] || !auth["password"]
raise "Unable to verify console credentials."
end
@telnet = telnet_client
prompt = nil
err_msg = "Login attempt timed out."
5.times do
begin
results = @telnet.login(
"Name" => auth["username"],
"Password" => auth["password"])
lines = results.sub("Login: Password: ", "").split("\n")
last_line = lines.pop
if last_line =~ /[$%#>] \z/n
prompt = last_line
elsif last_line =~ /Login failed/
err_msg = last_line
end
break
rescue TimeoutError
sleep 1
rescue EOFError
# This may happen if we login right after app starts
close_console
sleep 5
@telnet = telnet_client
end
end
unless prompt
close_console
raise err_msg
end
prompt
end
private
def init_readline
if Readline.respond_to?("basic_word_break_characters=")
Readline.basic_word_break_characters= " \t\n`><=;|&{("
end
Readline.completion_append_character = nil
# Assumes that sending a String ending with tab will return a non-empty
# String of comma-separated completion options, terminated by a new line
# For example, "app.\t" might result in "to_s,nil?,etc\n"
Readline.completion_proc = proc do |s|
console_tab_completion_data(s)
end
end
def run_console(prompt)
prev = trap("INT") { |x| exit_console; prev.call(x); exit }
prev = trap("TERM") { |x| exit_console; prev.call(x); exit }
loop do
cmd = readline_with_history(prompt)
if cmd == nil
exit_console
break
end
prompt = send_console_command_display_results(cmd, prompt)
end
end
def readline_with_history(prompt)
line = Readline::readline(prompt)
return if line == nil || line == 'quit' || line == 'exit'
if line !~ /^\s*$/ && Readline::HISTORY.to_a.last != line
Readline::HISTORY.push(line)
end
line
end
def send_console_command_display_results(cmd, prompt)
begin
lines = send_console_command cmd
# Assumes the last line is a prompt
prompt = lines.pop
lines.each do |line|
puts line if line != cmd
end
rescue TimeoutError
puts "Timed out sending command to server."
rescue EOFError
raise "The console connection has been terminated. Perhaps the app was stopped or deleted?"
end
prompt
end
def send_console_command(cmd)
results = @telnet.cmd(cmd)
results.split("\n")
end
def exit_console
@telnet.cmd("String" => "exit", "Timeout" => 1)
rescue TimeoutError
# TimeoutError expected, as exit doesn't return anything
ensure
close_console
end
def close_console
@telnet.close
end
def console_tab_completion_data(cmd)
begin
results = @telnet.
cmd("String" => cmd + "\t", "Match" => /\S*\n$/, "Timeout" => 10)
results.chomp.split(",")
rescue TimeoutError
[] #Just return empty results if timeout occurred on tab completion
end
end
def telnet_client
Net::Telnet.new(
"Port" => @port,
"Prompt" => /[$%#>] \z|Login failed/n,
"Timeout" => 30,
"FailEOF" => true)
end
end
|
#!/bin/bash -eu
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
if [ ! -f scripts/build_from_source.sh ];then
echo "This script must execute in project root"
exit 1
fi
echo "Build Weex SDK From Source...."
npm install
npm run build:jsfm
npm run build:polyfill
npm run build:rax
echo "Weex JS Framework build completed."
sleep 2
mkdir pre-build
cp dist/weex-js-framework.min.js pre-build/native-bundle-main.js
cp dist/weex-js-framework.min.js android_sdk/assets/native-bundle-main.js
cp dist/weex-js-framework.min.js pre-build/weex-main-jsfm.js
cp dist/weex-js-framework.min.js android_sdk/assets/weex-main-jsfm.js
cp dist/weex-polyfill.min.js pre-build/weex-polyfill.js
cp dist/weex-rax.min.js pre-build/weex-rax-api.js
cp dist/weex-rax.min.js android_sdk/assets/weex-rax-api.js
gradle wrapper --gradle-version 4.4
echo 'include ":android_sdk"'>settings.gradle
./gradlew :android_sdk:assemble -PasfRelease
xcodebuild -project ios_sdk/WeexSDK.xcodeproj -target WeexSDK_MTL
echo "Weex SDK Build completed."
|
#pragma once
#include "PhysicsObjects\SphereClass.h"
#include "PhysicsObjects\Plane.h"
#include "PhysicsObjects\BoxClass.h"
#include "PhysicsObjects\SpringJoint.h"
#include <vector>
class DIYPhysicScene
{
public:
DIYPhysicScene();
~DIYPhysicScene();
void AddActor(PhysicsObject* actorToAdd);
void RemoveActor(PhysicsObject* actorToRemove);
void Update(float deltaTime);
void DebugScene();
void AddGizmos();
vec3 ProjectileMotionPrediction(vec3 initialPos, vec3 initialVelocity, float time);
void CheckForCollision();
// CollsionChecks
static bool Plane2Plane(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Plane2Sphere(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Plane2Box(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Plane2Capsule(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Plane2Joint(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Sphere2Plane(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Sphere2Sphere(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Sphere2Box(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Sphere2Capsule(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Sphere2Joint(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Box2Plane(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Box2Sphere(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Box2Box(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Box2Capsule(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Box2Joint(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Capsule2Plane(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Capsule2Sphere(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Capsule2Box(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Capsule2Capsule(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Capsule2Joint(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Joint2Plane(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Joint2Sphere(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Joint2Box(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Joint2Capsule(PhysicsObject* obj1, PhysicsObject* obj2);
static bool Joint2Joint(PhysicsObject* obj1, PhysicsObject* obj2);
static void Response(RigidBody* obj1, RigidBody* obj2, float overlap, vec3 normal);
static void Seperate(RigidBody* obj1, RigidBody* obj2, float overlap, vec3 normal);
vec3 gravity;
float timeStep = 0;
std::vector<PhysicsObject*> actors;
};
|
#!/usr/bin/env bash
#
# Usage: ./provision-rackspace-cluster.sh <key pair name> [flavor] [environment]
#
set -e
THIS_DIR=$(cd $(dirname $0); pwd) # absolute path
CONTRIB_DIR=$(dirname $THIS_DIR)
source $CONTRIB_DIR/utils.sh
if [ -z "$1" ]; then
echo_red 'Usage: provision-rackspace-cluster.sh <key pair name> [flavor] [environment]'
exit 1
fi
if [ -z "$2" ]; then
FLAVOR="performance1-2"
else
FLAVOR=$2
fi
if [ -z "$3" ]; then
ENV="production"
else
ENV=$3
fi
if ! which supernova > /dev/null; then
echo_red 'Please install the dependencies listed in the README and ensure they are in your $PATH.'
exit 1
fi
if ! supernova $ENV network-list|grep -q deis &>/dev/null; then
echo_yellow "Creating deis private network..."
supernova $ENV network-create deis 10.21.12.0/24
fi
NETWORK_ID=`supernova $ENV network-list|grep deis|awk -F"|" '{print $2}'|sed 's/^ *//g'`
if [ -z "$DEIS_NUM_INSTANCES" ]; then
DEIS_NUM_INSTANCES=3
fi
# check that the CoreOS user-data file is valid
$CONTRIB_DIR/util/check-user-data.sh
i=1 ; while [[ $i -le $DEIS_NUM_INSTANCES ]] ; do \
echo_yellow "Provisioning deis-$i..."
# TODO: update to CoreOS 494.4.0 when it's in the stable channel at Rackspace
# This image is CoreOS 494.0.0 in their alpha channel
supernova $ENV boot --image 1c423602-ea76-4263-b56b-0a2fa3e8c663 --flavor $FLAVOR --key-name $1 --user-data $CONTRIB_DIR/coreos/user-data --no-service-net --nic net-id=$NETWORK_ID --config-drive true deis-$i ; \
((i = i + 1)) ; \
done
echo_green "Your Deis cluster has successfully deployed to Rackspace."
echo_green "Please continue to follow the instructions in the README."
|
#!/bin/bash
# Efficient Neural Architecture Search via Parameter Sharing, ICML 2018
# bash ./scripts-search/scripts/algos/ENAS.sh cifar10 -1
echo script name: $0
echo $# arguments
if [ "$#" -ne 2 ] ;then
echo "Input illegal number of parameters " $#
echo "Need 2 parameters for dataset and seed"
exit 1
fi
if [ "$TORCH_HOME" = "" ]; then
echo "Must set TORCH_HOME envoriment variable for data dir saving"
exit 1
else
echo "TORCH_HOME : $TORCH_HOME"
fi
dataset=$1
seed=$2
channel=16
num_cells=5
max_nodes=4
space=nas-bench-102
if [ "$dataset" == "cifar10" ] || [ "$dataset" == "cifar100" ]; then
data_path="$TORCH_HOME/cifar.python"
else
data_path="$TORCH_HOME/cifar.python/ImageNet16"
fi
save_dir=./output/search-cell-${space}/ENAS-${dataset}
OMP_NUM_THREADS=4 python ./exps/algos/ENAS.py \
--save_dir ${save_dir} --max_nodes ${max_nodes} --channel ${channel} --num_cells ${num_cells} \
--dataset ${dataset} --data_path ${data_path} \
--search_space_name ${space} \
--arch_nas_dataset ${TORCH_HOME}/NAS-Bench-102-v1_0-e61699.pth \
--config_path ./configs/nas-benchmark/algos/ENAS.config \
--controller_entropy_weight 0.0001 \
--controller_bl_dec 0.99 \
--controller_train_steps 50 \
--controller_num_aggregate 20 \
--controller_num_samples 100 \
--workers 4 --print_freq 200 --rand_seed ${seed}
|
<gh_stars>0
package users
/*
DTO ==================>>>>> IS DATA TRANSFER OBJECT
*/
import (
"strings"
"github.com/egnimos/book_store_users_api/utils/errors"
)
const (
StatusActive = "active"
)
//User : User struct for performing some operations
type User struct {
ID int64 `json:"id"`
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
Email string `json:"email"`
DateCreated string `json:"date_created"`
Status string `json:"status"`
Password string `json:"password"`
}
type Users []User
//Validate : it filter the email
func (user *User) Validate() *errors.RestErr {
//validate the firstname
user.FirstName = strings.TrimSpace(user.FirstName)
//validate the lastname
user.LastName = strings.TrimSpace(user.LastName)
//validate the email
user.Email = strings.TrimSpace(strings.ToLower(user.Email))
if user.Email == "" {
return errors.NewBadRequestError("invalid email address")
}
//validate the password
user.Password = strings.TrimSpace(user.Password)
if user.Password == "" {
return errors.NewBadRequestError("invalid password")
}
return nil
}
|
<reponame>Marmelatze/docker-controller
package de.schub.docker_controller.Metadata;
import com.spotify.docker.client.DefaultDockerClient;
import com.spotify.docker.client.DockerClient;
import java.net.URI;
/**
* Creates a {@see DockerClient}
* This class exists mainly for decoupling and testing.
*/
public class DockerClientFactory
{
public DockerClient get(URI endpoint)
{
return DefaultDockerClient
.builder()
.uri(endpoint)
.build()
;
}
}
|
<filename>webapp/src/app/scripts/codemirror.js<gh_stars>0
/* Any changes to this file require /server/apps/utils/codemirror/widget.py to be
* updated with the Gulp generated codemirror-XXXXXXXXXXX.js file name
*/
(function(){
CodeMirror.defineSimpleMode("sgf", {
// The start state contains the rules that are intially used
start: [
// The regex matches the token, the token property contains the type
{regex: /"(?:[^\\]|\\.)*?(?:"|$)/, token: "string"},
// You can match multiple tokens at once. Note that the captured
// groups must span the whole string in this case
{regex: /(function)(\s+)([a-z$][\w$]*)/,
token: ["keyword", null, "variable-2"]},
// Rules are matched in the order in which they appear, so there is
// no ambiguity between this one and the one above
{regex: /(?:on|when|every|config|manual|realtime|copy|set)\b/,
token: "keyword"},
{regex: /connect|connected|streamer|unbuffered|input|output|uint32_t/, token: "atom"},
{regex: /0x[a-f\d]+|[-+]?(?:\.\d+|\d+\.?\d*)(?:e[-+]?\d+)?/i,
token: "number"},
{regex: /#.*/, token: "comment"},
{regex: /[-+\/*=<>!]+/, token: "operator"},
// indent and dedent properties guide autoindentation
{regex: /\{\(/, indent: true},
{regex: /\}\)/, dedent: true},
// You can embed other modes with the mode property. This rule
// causes all code between << and >> to be highlighted with the XML
// mode.
{regex: /<</, token: "meta", mode: {spec: "xml", end: />>/}}
],
// The meta property contains global information about the mode. It
// can contain properties like lineComment, which are supported by
// all modes, and also directives like dontIndentStates, which are
// specific to simple modes.
meta: {
dontIndentStates: ["comment"],
lineComment: "//"
}
});
var $ = jQuery;
$(document).ready(function(){
$('textarea.json-editor').each(function(idx, el){
CodeMirror.fromTextArea(el, {
lineNumbers: true,
lineWrapping: true,
mode: 'javascript'
});
});
$('textarea.sgf-editor').each(function(idx, el){
CodeMirror.fromTextArea(el, {
lineNumbers: true,
lineWrapping: true,
mode: 'sgf'
});
});
});
})();
|
<filename>util/yamlutil/yaml.go
package yamlutil
import (
"bytes"
"fmt"
"os"
"reflect"
"strconv"
"strings"
"github.com/abiosoft/colima/config"
"github.com/abiosoft/colima/embedded"
"gopkg.in/yaml.v3"
)
// WriteYAML encodes struct to file as YAML.
func WriteYAML(value interface{}, file string) error {
b, err := yaml.Marshal(value)
if err != nil {
return fmt.Errorf("error encoding YAML: %w", err)
}
return os.WriteFile(file, b, 0644)
}
// Save saves the config.
func Save(c config.Config, file string) error {
b, err := encodeYAML(c)
if err != nil {
return err
}
if err := os.WriteFile(file, b, 0644); err != nil {
return fmt.Errorf("error writing yaml file: %w", err)
}
return nil
}
func encodeYAML(conf config.Config) ([]byte, error) {
var doc yaml.Node
f, err := embedded.Read("defaults/colima.yaml")
if err != nil {
return nil, fmt.Errorf("error reading config file: %w", err)
}
if err := yaml.Unmarshal(f, &doc); err != nil {
return nil, fmt.Errorf("embedded default config is invalid yaml: %w", err)
}
if l := len(doc.Content); l != 1 {
return nil, fmt.Errorf("unexpected error during yaml decode: doc has multiple children of len %d", l)
}
root := doc.Content[0]
// get all nodes
nodeVals := map[string]*yaml.Node{}
if err := traverseNode("", root, nodeVals); err != nil {
return nil, fmt.Errorf("error traversing yaml node: %w", err)
}
// get all node values
structVals := map[string]any{}
traverseConfig("", conf, structVals)
// apply values to nodes
for key, node := range nodeVals {
val := structVals[key]
// top level, ignore. except known maps.
if node.Kind == yaml.MappingNode {
switch val.(type) {
case map[string]any:
case map[string]string:
default:
continue
}
}
// lazy way, delegate node construction to the yaml library via a roundtrip.
// no performance concern as only one file is being read
b, err := yaml.Marshal(val)
if err != nil {
return nil, fmt.Errorf("unexpected error nested value encoding: %w", err)
}
var newNode yaml.Node
if err := yaml.Unmarshal(b, &newNode); err != nil {
return nil, fmt.Errorf("unexpected error during yaml node traversal: %w", err)
}
if l := len(newNode.Content); l != 1 {
return nil, fmt.Errorf("unexpected error during yaml node traversal: doc has multiple children of len %d", l)
}
*node = *newNode.Content[0]
}
b, err := encode(root)
if err != nil {
return nil, fmt.Errorf("error encoding yaml file: %w", err)
}
tmp := string(b)
_ = tmp
return b, nil
}
func traverseConfig(parentKey string, s any, vals map[string]any) {
typ := reflect.TypeOf(s)
val := reflect.ValueOf(s)
// everything else is a value, no nesting required
if typ.Kind() != reflect.Struct {
vals[parentKey] = val.Interface()
return
}
// traverse the struct fields recursively
for i := 0; i < typ.NumField(); i++ {
field := typ.Field(i)
key := strings.TrimSuffix(field.Tag.Get("yaml"), ",omitempty")
if key == "" || key == "-" { // no yaml tag is present
continue
}
if parentKey != "" {
key = parentKey + "." + key
}
val := val.Field(i)
traverseConfig(key, val.Interface(), vals)
}
}
func traverseNode(parentKey string, node *yaml.Node, vals map[string]*yaml.Node) error {
switch node.Kind {
case yaml.MappingNode:
if l := len(node.Content); l%2 != 0 {
return fmt.Errorf("uneven children of %d found for mapping node", l)
}
for i := 0; i < len(node.Content); i += 2 {
if i > 1 {
// fix jumbled comments
if cn := node.Content[i]; cn.HeadComment != "" {
if strings.Index(cn.HeadComment, "#") == 0 {
cn.HeadComment = "\n" + cn.HeadComment
}
}
}
key := node.Content[i].Value
val := node.Content[i+1]
if parentKey != "" {
key = parentKey + "." + key
}
vals[key] = val
if err := traverseNode(key, val, vals); err != nil {
return err
}
}
case yaml.SequenceNode:
for i := 0; i < len(node.Content); i += 2 {
key := strconv.Itoa(i)
val := node.Content[i+1]
if parentKey != "" {
key = parentKey + "." + key
}
vals[key] = val
if err := traverseNode(key, val, vals); err != nil {
return err
}
}
}
// yaml.ScalarNode has nothing to do
return nil
}
func encode(v any) ([]byte, error) {
var buf bytes.Buffer
enc := yaml.NewEncoder(&buf)
enc.SetIndent(2)
err := enc.Encode(v)
return buf.Bytes(), err
}
|
<gh_stars>0
import React from 'react';
import PropTypes from 'prop-types';
const TwitterIcon = ({ bgColor, width, height }) => (
<svg
className="svgIcon-use"
width={width}
height={height}
viewBox="0 0 25 25"
fill={bgColor}
>
<path d="M21.725 5.338c-.744.47-1.605.804-2.513 1.006a3.978 3.978 0 0 0-2.942-1.293c-2.22 0-4.02 1.81-4.02 4.02 0 .32.034.63.07.94-3.31-.18-6.27-1.78-8.255-4.23a4.544 4.544 0 0 0-.574 2.01c.04 1.43.74 2.66 1.8 3.38-.63-.01-1.25-.19-1.79-.5v.08c0 1.93 1.38 3.56 3.23 3.95-.34.07-.7.12-1.07.14-.25-.02-.5-.04-.72-.07.49 1.58 1.97 2.74 3.74 2.8a8.49 8.49 0 0 1-5.02 1.72c-.3-.03-.62-.04-.93-.07A11.447 11.447 0 0 0 8.88 21c7.386 0 11.43-6.13 11.414-11.414.015-.21.01-.38 0-.578a7.604 7.604 0 0 0 2.01-2.08 7.27 7.27 0 0 1-2.297.645 3.856 3.856 0 0 0 1.72-2.23" />
</svg>
);
TwitterIcon.defaultProps = {
bgColor: '',
width: '25',
height: '25',
};
TwitterIcon.propTypes = {
bgColor: PropTypes.string,
width: PropTypes.string,
height: PropTypes.string,
};
export default TwitterIcon;
|
arr = [1, 2, 3, 4]
sums = []
# using loop
for i in range(0, len(arr), 2):
if i+1 < len(arr):
sums.append(arr[i] + arr[i+1])
else:
sums.append(arr[i])
print(sums) |
def sort_list(lst):
for i in range(len(lst) - 1):
for j in range(0, len(lst) - i - 1):
if lst[j] > lst[j + 1]:
lst[j], lst[j + 1] = lst[j + 1], lst[j]
return lst |
const shellExec = require('shell-exec');
const args = require('args');
args
.option('recipe', 'dameblanche recipe name', 'web')
.option('dir', 'directory');
const flags = args.parse(process.argv);
const dir = flags.dir || args.sub[0] || '.';
const exec = async() => {
try {
// find npm package url
const packageUrl = (await shellExec(`npm view @dameblanche/recipe-${flags.recipe} dist.tarball`)).stdout.trim();
// make dir
if (dir !== '.') {
await shellExec(`mkdir -p ${dir}`);
}
// download package
await shellExec(`curl '${packageUrl}' -o ${dir}/package.tgz`);
// untar package
await shellExec(`tar -C ${dir} -zxvf ${dir}/package.tgz `);
// put package files in the correct place
await shellExec(`rm -f ${dir}/package.tgz && mv ${dir}/package/{*,.[^.]*} ${dir} && rm -rf ${dir}/package`);
} catch (e) {
console.error(e);
}
};
exec();
|
"""
The LibRoadRunner SBML Simulation Engine, (c) 2009-2017 <NAME>, <NAME>, <NAME> and <NAME>
LibRoadRunner is an SBML JIT compiler and simulation engine with a variety of analysis
functions. LibRoadRunner is a self contained library which is designed to be integrated
into existing simulation platforms or may be used a stand alone simulation and analysis
package.
"""
from .roadrunner import *
__version__ = roadrunner.getVersionStr(VERSIONSTR_BASIC)
def runTests(testDir=None):
try:
import testing
return testing.tester.runTester(testDir)
except (ImportError):
import roadrunner.testing
return roadrunner.testing.tester.runTester(testDir)
|
import nltk, numpy
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
# importing our intents file
import json
with open('intents.json') as json_data:
intents = json.load(json_data)
# tokenizing patterns
words = []
classes = []
documents = []
ignore_words = ['?']
# loop through each sentence in our intents patterns
for intent in intents['intents']:
for pattern in intent['patterns']:
# tokenize each word in the sentence
w = nltk.word_tokenize(pattern)
# add to our words list
words.extend(w)
# add to documents in our corpus
documents.append((w, intent['tag']))
# add to our classes list
if intent['tag'] not in classes:
classes.append(intent['tag'])
# stem and lower each word and remove duplicates
words = [stemmer.stem(w.lower()) for w in words if w not in ignore_words]
words = sorted(list(set(words)))
# remove duplicates
classes = sorted(list(set(classes)))
# create our training data
training = []
output = []
# create an empty array for our output
output_empty = [0] * len(classes)
# training set, bag of words for each sentence
for doc in documents:
# initialize our bag of words
bag = []
# list of tokenized words for the pattern
pattern_words = doc[0]
# stem each word
pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]
# create our bag of words array
for w in words:
bag.append(1) if w in pattern_words else bag.append(0)
training.append(bag)
# output is a '0' for each tag and '1' for current tag
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
output.append(output_row)
# sample training/output
i = 0
w = documents[i][0]
print ([stemmer.stem(word.lower()) for word in w])
print (training[i])
print (output[i])
# import tensorflow
import numpy
import tflearn
import tensorflow as tf
import random
# seed
random.seed(1)
# transform as a numpy array
training = numpy.array(training)
output = numpy.array(output)
# build deep neural network
net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
# define model and setup tensor board
model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')
# start training (apply gradient descent algorithm)
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save('model.tflearn')
import pickle
pickle.dump( {'words':words, 'classes':classes, 'train_x':training, 'train_y':output}, open( "training_data", "wb" ) ) |
<reponame>vqboy/VQTools
Pod::Spec.new do |s|
s.name = "VQTools"
s.version = "0.0.1"
s.summary = "一些常用的工具方法的整合."
s.description = <<-DESC
整合了一些系统的拍照录音录像功能,及相册存取和其他相关便捷方法。
DESC
s.homepage = "https://github.com/vqboy/VQTools"
s.license = { :type => "MIT", :file => "LICENSE" }
s.author = { "VQBoy" => "<EMAIL>" }
s.platform = :ios, "8.0"
s.source = { :git => "https://github.com/vqboy/VQTools.git", :tag => "#{s.version}" }
s.source_files = "VQTools/VQTools.{h,m}","VQTools/Lib/lame/lame.h"
s.public_header_files = "VQTools/VQTools.h"
s.preserve_paths = "VQTools/**/*.{h,m,a}"
s.frameworks = "UIKit", "Foundation", "CoreTelephony", "AudioToolbox", "AVFoundation", "AssetsLibrary", "Photos", "CoreLocation", "MediaPlayer"
s.ios.vendored_libraries = "VQTools/Lib/lame/libmp3lame.a"
s.requires_arc = true
s.xcconfig = { "ENABLE_BITCODE" => "NO" }
s.dependency "AFNetworking", "~> 3.0.0"
end
|
/*
* Copyright 2016 Realm Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.realm.entities.migration;
import io.realm.RealmObject;
import io.realm.annotations.PrimaryKey;
import io.realm.migration.MigrationPrimaryKey;
// Class used to test what happens if you rename a primary field in a migration.
public class MigrationFieldRenamed extends RealmObject implements MigrationPrimaryKey {
public static String CLASS_NAME = "MigrationFieldRenamed";
public static long DEFAULT_FIELDS_COUNT = 5;
public static long DEFAULT_PRIMARY_INDEX = 2;
public static String FIELD_PRIMARY = "fieldRenamedPrimary";
private Byte fieldFirst;
private Short fieldSecond;
// PK is placed in the middle to check if prior/posterior fields' removal is properly reflected
// during migration step. The MigrationPrimaryKey interface' PK field name is `fieldPrimary`.
@PrimaryKey
private String fieldRenamedPrimary;
private Integer fieldFourth;
private Long fieldFifth;
public void setFieldFirst(Byte fieldFirst) {
this.fieldFirst = fieldFirst;
}
public Byte getFieldFirst() {
return this.fieldFirst;
}
public void setFieldSecond(Short fieldSecond) {
this.fieldSecond = fieldSecond;
}
public Short getFieldSecond() {
return this.fieldSecond;
}
public void setFieldRenamedPrimary(String fieldRenamedPrimary) {
this.fieldRenamedPrimary = fieldRenamedPrimary;
}
public String getFieldRenamedPrimary() {
return this.fieldRenamedPrimary;
}
public void setFieldFourth(Integer fieldFourth) {
this.fieldFourth = fieldFourth;
}
public Integer getFieldFourth() {
return this.fieldFourth;
}
public void setFieldFifth(Long fieldFifth) {
this.fieldFifth = fieldFifth;
}
public Long getFieldFifth() {
return this.fieldFifth;
}
}
|
#!/bin/sh
# Generated file, master is Makefile.am
. ${srcdir:-.}/common.sh
infile="$srcdir/images/minisblack-1c-16b.tiff"
outfile="o-tiffcrop-extractz14-minisblack-1c-16b.tiff"
f_test_convert "$TIFFCROP -E left -Z1:4,2:4" $infile $outfile
f_tiffinfo_validate $outfile
|
function modifyColumn($data, $columnName, $callback) {
$modifiedData = [];
foreach ($data as $row) {
$modifiedRow = (array)$row;
$modifiedRow[$columnName] = $callback($row->$columnName);
$modifiedData[] = (object)$modifiedRow;
}
return $modifiedData;
}
// Sample usage
$data = [
(object)['id' => 1, 'name' => 'Alice', 'age' => 25],
(object)['id' => 2, 'name' => 'Bob', 'age' => 30],
(object)['id' => 3, 'name' => 'Charlie', 'age' => 28]
];
$modifiedData = modifyColumn($data, 'age', function($value) {
return $value * 2;
});
// $modifiedData will be:
// [
// (object)['id' => 1, 'name' => 'Alice', 'age' => 50],
// (object)['id' => 2, 'name' => 'Bob', 'age' => 60],
// (object)['id' => 3, 'name' => 'Charlie', 'age' => 56]
// ] |
#!/bin/bash
# Archived program command-line for experiment
# Copyright 2016 Xiang Zhang
#
# Usage: bash {this_file} [additional_options]
set -x;
set -e;
qlua main.lua -driver_location models/jdfull/temporal8length486feature256roman -driver_variation small -driver_dimension 257 -train_data_file data/jd/sentiment/full_train_pinyin_byte.t7b -train_data_replace 257 -train_data_shift 1 -test_data_file data/jd/sentiment/full_test_pinyin_byte.t7b -test_data_replace 257 -test_data_shift 1 "$@";
|
function formatAlertBox($message) {
$sanitizedMessage = htmlspecialchars($message, ENT_QUOTES, 'UTF-8');
$formattedAlert = '<div class="alert alert-info alert-dismissible">';
$formattedAlert .= '<a href="#" class="close" data-dismiss="alert">×</a>';
$formattedAlert .= '<strong>Information!</strong> ' . $sanitizedMessage;
$formattedAlert .= '</div>';
return $formattedAlert;
}
// Test the function
$message = "This is an important message!";
echo formatAlertBox($message); |
#!/bin/bash
# Need to install:
# texlive (obviously !)
# nkf
# bzip2
# fontforge
# mftrace
TEXDIR=/usr/share/texmf
TEMPDIR=/tmp/mahjong-tile-font
[[ -d "$TEMPDIR" ]] || mkdir -p $TEMPDIR
cd $TEMPDIR
wget http://riichi.dynaman.net/RiichiBooks/piemf-2.0.2.tar.bz2
tar xjvf piemf-2.0.2.tar.bz2
cd piemf-2.0.2/macros
mkdir work
for f in *.sty
do
nkf $f > work/$f
mv work/$f .
done
cd ..
mkdir -p $TEXDIR/{fonts/{source,tfm,type1,map/dvips},tex/latex}/piemf
cp src/*.mf $TEXDIR/fonts/source/piemf/
cp macros/*.{sty,fd} $TEXDIR/tex/latex/piemf/
mktexlsr
mkdir -p type1
cd type1
mftrace --magnification=4000 --encoding=tex256.enc pie
t1binary pie.pfa pie.pfb
cat > pie.map <<EOF
pie pie <pie.pfb
EOF
cp pie.pfb $TEXDIR/fonts/type1/piemf/
cp pie.map $TEXDIR/fonts/map/dvips/piemf/
mktexlsr
updmap-sys --enable Map=pie.map
cd -
rm -rf "$TEMPDIR"
|
echo "hi"
cd ../../
ls
exit
|
#include <string>
#include <vector>
#include "../include/node.h"
#include "../include/variables.hpp"
#include "../include/main.hpp"
Node *add_child(Node *tree)
{
Node *ne = new Node;
tree->children.push_back(ne);
return ne;
}
void push_child(Node *tree, Node *c)
{
tree->children.push_back(c);
}
string ast_t_rep(Node *ast_t, int tab)
{
string t;
for (int i = 0; i < tab; ++i)
{
t += "├\t";
}
if (tab != 0)
{
t += "├─ ";
}
string str = ast_t->value;
str.erase(std::remove(str.begin(), str.end(), '\n'), str.end());
string res = t + str + " (" + ast_t->type + ")\t" + ast_t->ref + "\n";
for (int i = 0; i < ast_t->children.size(); ++i)
{
res += ast_t_rep(ast_t->children[i], tab + 1);
}
return res;
}
vector<string> childs_value(Node *trunc)
{
vector<string> values;
for (int i = 0; i < trunc->children.size(); ++i)
{
values.push_back(trunc->children[i]->value);
}
return values;
}
vector<string> childs_type(Node *trunc)
{
vector<string> types;
for (int i = 0; i < trunc->children.size(); ++i)
{
types.push_back(trunc->children[i]->type);
}
return types;
}
vector<string> childs_ref(Node *trunc)
{
vector<string> ref;
for (int i = 0; i < trunc->children.size(); ++i)
{
ref.push_back(trunc->children[i]->ref);
}
return ref;
} |
<filename>trustconnector/src/main/java/libs/trustconnector/scdp/util/tlv/simpletlv/LocationStatus.java<gh_stars>1-10
package libs.trustconnector.scdp.util.tlv.simpletlv;
import libs.trustconnector.scdp.util.tlv.*;
import libs.trustconnector.scdp.util.tlv.bertlv.*;
import libs.trustconnector.scdp.util.*;
import libs.trustconnector.scdp.util.ByteArray;
import libs.trustconnector.scdp.util.tlv.Length;
import libs.trustconnector.scdp.util.tlv.Tag;
import libs.trustconnector.scdp.util.tlv.bertlv.BERLength;
public class LocationStatus extends SimpleTLV
{
public static final byte STATUS_NORMAL_SERVICE = 0;
public static final byte STATUS_LIMITED_SERVICE = 1;
public static final byte STATUS_NO_SERVICE = 2;
public LocationStatus(final Tag tag, final Length len, final byte[] v, final int vOff) {
super(tag, len, v, vOff);
}
public LocationStatus(final byte status) {
this.tag = new SimpleTag(27);
this.len = new BERLength(1);
(this.value = new ByteArray(1)).setByte(0, status);
}
public byte getStatus() {
return this.value.getByte(0);
}
}
|
import os
import sys
from distutils.core import setup
import py2exe, numpy
def numpy_dll_paths_fix():
paths = set()
np_path = numpy.__path__[0]
for dirpath, _, filenames in os.walk(np_path):
for item in filenames:
if item.endswith('.dll'):
paths.add(dirpath)
sys.path.append(*list(paths))
numpy_dll_paths_fix()
origIsSystemDLL = py2exe.build_exe.isSystemDLL
def isSystemDLL(pathname):
dlls = ("libfreetype-6.dll", "libogg-0.dll", "sdl_ttf.dll")
if os.path.basename(pathname).lower() in dlls:
return 0
return origIsSystemDLL(pathname)
py2exe.build_exe.isSystemDLL = isSystemDLL
sys.argv.append('py2exe')
setup(
name = 'Flappy Bird',
version = '1.0',
author = '<NAME>',
options = {
'py2exe': {
'bundle_files': 1, # doesn't work on win64
'compressed': True,
}
},
windows = [{
'script': "flappy.py",
'icon_resources': [
(1, 'flappy.ico')
],
'uac_info': "requireAdministrator"
}],
zipfile=None,
)
|
#!/bin/sh
set -e
APP_NAME="$(basename $(pwd))"
VERSION="$(awk '/Version/{print $NF}' apkg.rc)"
echo "Building ${APP_NAME} version ${VERSION}"
MODELS="MyCloudEX2Ultra"
for model in $MODELS; do
make
../../mksapkg-OS5 -E -s -m $model > /dev/null
done
echo "Move binaries"
RELEASE_DIR="../../packages/${APP_NAME}"
mkdir -p "${RELEASE_DIR}"
find .. -maxdepth 1 -name "*.bin*" | while read f; do mv "$f" "${f%.bin*}.bin"; done
mv ../*_${APP_NAME}_* "${RELEASE_DIR}"
echo "Bundle sources"
SRC_TAR="${RELEASE_DIR}/${APP_NAME}_src_${VERSION}.tar.gz"
tar -czf $SRC_TAR .
|
require('dotenv').config()
const express = require('express')
const path = require('path')
const fs = require('fs')
const formidable = require('formidable')
const helper = require('sendgrid').mail
const app = express()
const sg = require('sendgrid')(process.env.SENDGRID_API_KEY)
const fetch = require('node-fetch')
const PORT = process.env.SERVER_PORT || 9000
// const CLIENT_PORT = process.env.PORT || 3000
const PROTOCOL = process.env.PROTOCOL || 'http'
const HOSTNAME = process.env.HOST || 'localhost'
const UPLOAD_DIR = path.join(__dirname, 'uploads/')
const CORS =
process.env.NODE_ENV === 'production' ? `${PROTOCOL}://${HOSTNAME}` : `*`
const ENABLE_SEND_EMAILS =
process.env.NODE_ENV === 'production' ||
process.env.ENABLE_SEND_EMAILS === 'true'
const ENABLE_WRIKE =
process.env.NODE_ENV === 'production' || process.env.ENABLE_WRIKE === 'true'
if (ENABLE_SEND_EMAILS) {
console.info('Sending emails is enabled')
} else {
console.info('Sending emails is disabled')
}
if (ENABLE_WRIKE) {
console.info('Wrike integration is enabled')
} else {
console.info('Wrike integration is disabled')
}
const makeSgRequest = body =>
sg.emptyRequest({
method: 'POST',
path: '/v3/mail/send',
body: body.toJSON()
})
// This converts {a:1, b:2} into 'a=1&b=2'
const queryParams = obj =>
Object.keys(obj)
.map(key => [key, obj[key]]) // There is no Object.entries() in node 6
.map(
([key, val]) => encodeURIComponent(key) + '=' + encodeURIComponent(val)
)
.join('&')
const wrikeMkFolder = (name, content) =>
fetch(process.env.WRIKE_URL, {
body: queryParams({
title: name,
description: content,
shareds: process.env.WRIKE_SHARE_ID,
project: process.env.WRIKE_OWNER_ID
}),
method: 'post',
headers: {
Authorization: `bearer ${process.env.WRIKE_TOKEN}`,
'Content-Type': 'application/x-www-form-urlencoded'
}
}).then(res => res.json())
const wrikeAddAttachment = (id, file, name, type) =>
fetch(`https://www.wrike.com/api/v3/folders/${id}/attachments`, {
body: file,
method: 'post',
headers: {
Authorization: `bearer ${process.env.WRIKE_TOKEN}`,
'x-requested-with': 'XMLHttpRequest',
'x-file-name': name,
'content-type': type,
'cache-control': 'no-cache'
}
}).then(res => res.json())
if (!fs.existsSync(UPLOAD_DIR)) {
console.warn('Creating uploads folder...')
fs.mkdirSync(UPLOAD_DIR)
}
console.info(`Uploads will be saved in ${UPLOAD_DIR}`)
app.use(express.static(path.join(__dirname, 'build')))
app.get('/*', function (req, res) {
res.sendFile(path.join(__dirname, 'build', 'index.html'))
})
app.post('/uploads', function (req, res) {
const form = new formidable.IncomingForm()
// In any case send the cors headers (even on error)
res.header('Access-Control-Allow-Origin', CORS)
res.header(
'Access-Control-Allow-Headers',
'Origin, X-Requested-With, Content-Type, Accept'
)
form.parse(req)
// The events we subscribe to in the form occur in the following order
// field - multiple times
// fileBegin then file - once per file
// error - only if there was a parsing error
// end - when all other events have been handled and the files have
// finished being written to the disk, this event happens even
// if there was an error
form.on('fileBegin', function (name, file) {
file.path = path.join(UPLOAD_DIR, file.name)
})
form.on('file', function (name, file) {
console.log('Uploaded ' + file.name)
})
const files = []
form.on('file', function (name, file) {
files.push(file)
})
const fields = {}
let fieldsString = ''
form.on('field', (name, value) => {
fields[name] = value
fieldsString = fieldsString + `${name}: ${value}<br />`
})
// Handle a possible error while parsing the request
// We need a variable in this scope to hold whether there was an error
// because we need to know that in a different callback
let error = false
form.on('error', err => {
error = true
console.log('Error while parsing request to /uploads: ' + err)
res
.status(400) // Bad request
.json({ success: false, status: 'Error parsing the request' })
})
form.on('end', () => {
// The end event is fired even if an error occurs, so we
// need to prevent from sending a second response, otherwise the
// server crashes
if (error) return
console.log('Received fields:\n' + JSON.stringify(fields, null, 2))
// TODO: Validate fields
// Here is a good place to send the emails since we have the fields
// We don't want to actually send emails during testing since it
// would send a test email on every single commit
if (ENABLE_SEND_EMAILS) {
const toEmail = new helper.Email('<EMAIL>')
const fromEmail = new helper.Email('<EMAIL>')
const subject = 'New Service Request Form Submission'
const content = new helper.Content('text/html', fieldsString)
const mail = new helper.Mail(fromEmail, subject, toEmail, content)
const request = makeSgRequest(mail)
console.log('Sending email...')
sg.API(request, function (error, response) {
if (error) {
console.log('Error response received')
}
console.log(response.statusCode)
console.log(response.body)
console.log(response.headers)
})
}
// Create project and attach files in wrike
if (ENABLE_WRIKE) {
wrikeMkFolder(fields['email'], fieldsString)
.then(status => {
const folderId = status.data[0].id
for (const file of files) {
// Formidable files are just metadata, not the actual file
// Use the file name to create a ReadStream and pass it to
// node-fetch which can handle ReadStreams
// To pass a ReadStream is something like piping the file
// instead of reading the whole file and passing it
const readStream = fs.createReadStream(file.path)
wrikeAddAttachment(
folderId,
readStream,
file.name,
file.type
).catch(err => {
console.log(
'Error while reading file for upload to Wrike: ' + err
)
console.log('Filename: ' + file.path)
})
}
})
.catch(err => {
console.log('Error while creating a project in Wrike: ' + err)
})
}
// Send the success response
res
.status(200)
.json({ success: true, status: 'Form successfully submitted' })
})
})
app.listen(PORT, _ => console.info(`Server listening on PORT ${PORT}...`))
|
<reponame>XBigTK13X/snowgloo<filename>android-client/src/com/simplepathstudios/snowgloo/api/model/SearchResults.java
package com.simplepathstudios.snowgloo.api.model;
import java.util.ArrayList;
public class SearchResults {
public int ItemCount;
public ArrayList<MusicFile> Songs;
public ArrayList<MusicAlbum> Albums;
public ArrayList<MusicArtist> Artists;
}
|
#!/bin/bash
CWD=$(pwd)
BENCHMARK=$(echo ${CWD} | rev | cut -d'/' -f1 | rev)
PACKET_SIZE=(64 256 512 1400)
SWEEP_BUFFERS=(1 2 4 8 16 32 64 128 256 512 1024 2048 4096)
OUTDIR=${CWD}/output
mkdir -p ${OUTDIR}
cd ${CWD}/../../
rm main
PIPELINE=$(echo $CWD | rev | cut -d'/' -f1 | rev)
MOD1=$(echo $PIPELINE | cut -d'-' -f1)
MOD2=$(echo $PIPELINE | cut -d'-' -f2)
for pkt in ${PACKET_SIZE[@]}; do
printf "$MOD1\t$MOD2\tPACKET_SIZE\tCYCLES\n"
for mod1 in ${SWEEP_BUFFERS[@]}; do
for mod2 in ${SWEEP_BUFFERS[@]}; do
printf "$mod1\t$mod2\t$pkt\t"
make profile-run BENCHMARK=${BENCHMARK} EXTRA="-DREPEAT=200 -DPACKET_SIZE=${pkt} -DMOD_BUFFER_SIZE_2=${mod2} -DMOD_BUFFER_SIZE_1=${mod1}" | grep cycles | rev | cut -d' ' -f1 | sed -e 's/[()]//g' | rev
done
done | tee "${OUTDIR}/$pkt.tsv"
done
cd ${CWD}
|
#!/bin/bash
FN="prostateCancerGrasso_1.22.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.14/data/experiment/src/contrib/prostateCancerGrasso_1.22.0.tar.gz"
"https://bioarchive.galaxyproject.org/prostateCancerGrasso_1.22.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-prostatecancergrasso/bioconductor-prostatecancergrasso_1.22.0_src_all.tar.gz"
)
MD5="d20d47c6801e10e3e6973a6712c4320a"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
<reponame>leongaban/redux-saga-exchange<filename>src/features/users/view/containers/OpenOrdersReport/OpenOrdersReport.tsx
import * as React from 'react';
import block from 'bem-cn';
import { connect } from 'react-redux';
import { bindActionCreators, Dispatch } from 'redux';
import { bind } from 'decko';
import { containersProvider, IContainerTypes } from 'core';
import { IAppReduxState } from 'shared/types/app';
import { IActiveOrder, OrderValueFormatter, IPaginatedData } from 'shared/types/models';
import { defaultRecordsPerPageSelectConfig } from 'shared/constants';
import { selectors as configSelectors } from 'services/config';
import { selectors, actions } from './../../../redux';
import './OpenOrdersReport.scss';
interface IOwnProps {
userID: string;
}
interface IStateProps {
formatPrice: OrderValueFormatter;
formatVolume: OrderValueFormatter;
areOpenOrdersRequesting: boolean;
openOrders: IPaginatedData<IActiveOrder[]>;
}
interface IActionProps {
loadOpenOrders: typeof actions.loadOpenOrders;
}
interface IProviderProps {
OpenOrdersTable: IContainerTypes['OpenOrdersTable'];
}
type IProps = IStateProps & IActionProps & IProviderProps & IOwnProps;
function mapState(state: IAppReduxState): IStateProps {
return {
formatPrice: configSelectors.selectOrderPriceFormatter(state),
formatVolume: configSelectors.selectOrderVolumeFormatter(state),
areOpenOrdersRequesting: selectors.selectCommunication(state, 'loadOpenOrders').isRequesting,
openOrders: selectors.selectOpenOrders(state),
};
}
function mapDispatch(dispatch: Dispatch<IAppReduxState>): IActionProps {
return bindActionCreators({
loadOpenOrders: actions.loadOpenOrders,
}, dispatch);
}
const b = block('open-orders-report');
class OpenOrdersReport extends React.PureComponent<IProps> {
public componentDidMount() {
const { userID, loadOpenOrders } = this.props;
const { initialOption } = defaultRecordsPerPageSelectConfig;
loadOpenOrders({ userID, page: 1, perPage: initialOption });
}
public render() {
const {
openOrders: { data, pagination: { total, page } }, areOpenOrdersRequesting,
OpenOrdersTable,
} = this.props;
return (
<div className={b()}>
<OpenOrdersTable
records={data}
renderHeader={this.renderHeader}
recordsPerPageSelectConfig={defaultRecordsPerPageSelectConfig}
serverPaginationProps={{
isRequesting: areOpenOrdersRequesting,
onPageRequest: this.handlePageRequest,
activePage: page,
totalPages: total,
}}
/>
</div>
);
}
@bind
private handlePageRequest(page: number, perPage: number) {
const { userID, loadOpenOrders } = this.props;
loadOpenOrders({ userID, perPage, page });
}
@bind
private renderHeader(renderPaginationControls: () => JSX.Element | null) {
return (
<div className={b('header')()}>
{renderPaginationControls()}
</div>
);
}
}
export default containersProvider(['OpenOrdersTable'], <div />)(connect(mapState, mapDispatch)(OpenOrdersReport));
|
/* eslint-disable import/extensions */
import displayUtils from '../utils/displayUtils.js';
import plotUtils from '../utils/plotUtils.js';
export default function projectedDataStep({ projectedData, palette }) {
displayUtils.labelStep('Data plotted on\n\'Principal Axes\'');
plotUtils.drawAxes();
plotUtils.plot2d(projectedData, palette);
}
|
#!/bin/bash
echo Q4 - Tempos > ./output/mandelbrot.txt 2>&1
for num_procs in 1 2 4 8
do
(
echo mandelbrot_mpi2_nox_dist_n$num_procs
for i in `seq 30`
do
time mpirun -n $num_procs ./bin/mandelbrot_mpi2_nox_dist
done
) >> ./output/mandelbrot.txt 2>&1
(
echo mandelbrot_mpi1_nox_dist_n$num_procs
for i in `seq 30`
do
time mpirun -n $num_procs ./bin/mandelbrot_mpi1_nox_dist
done
) >> ./output/mandelbrot.txt 2>&1
done
|
# Set up Vagrant.
date > /etc/vagrant_box_build_time
#Setting up sudo
cp /etc/sudoers /etc/sudoers.orig
sed -i -e 's/vagrant ALL=(ALL) ALL/vagrant ALL=NOPASSWD:ALL/g' /etc/sudoers
#Installing vagrant keys
mkdir /home/vagrant/.ssh
chmod 700 /home/vagrant/.ssh
cd /home/vagrant/.ssh
wget --no-check-certificate 'https://raw.github.com/mitchellh/vagrant/master/keys/vagrant.pub' -O authorized_keys
chmod 600 /home/vagrant/.ssh/authorized_keys
chown -R vagrant /home/vagrant/.ssh
# Install NFS client
apt-get -y install nfs-common
|
#!/bin/bash
cd /var/web/html_new/ipost
#aws s3 cp s3://ipost-codedeploy-deployments/development/.env /var/web/html_new/ipost
composer update
php artisan cache:clear
php artisan config:cache
composer dump-autoload |
import Mali from "mali";
/**
* @param {any} ctx
*/
function DoSomething(ctx) {
ctx.res = {
fieldDouble: 0.1,
fieldFloat: 0.2,
fieldInt32: -3,
fieldInt64: -4,
fieldUint32: 5,
fieldUint64: 6,
fieldSint32: -7,
fieldSint64: -8,
fieldFixed32: 9,
fieldFixed64: 10,
fieldSfixed32: -11,
fieldSfixed64: -12,
fieldBool: true,
fieldString: JSON.stringify(ctx.req),
fieldBytes: "1234",
fieldStrings: ["a", "b", "c"],
fieldEnum: "two",
fieldNestedEnum: "four",
fieldChild: {
foo: "bar",
},
fieldNestedChild: {
bar: "baz",
},
fieldRecursive: {
depth: 1,
recursive: {
depth: 2,
},
},
};
}
/**
* @param {number} port
*/
export async function run(port) {
const app = new Mali("test/__fixtures__/kitchensink.proto", "KitchenSink");
app.use({ DoSomething });
await app.start(`localhost:${port}`);
return () => app.close();
}
|
import { RenderContext } from '../render';
import { Painter } from '../Painter';
import { RenderObject } from '../RenderObject';
import { Vector } from '../Vector';
export class LinePainter extends Painter {
public positions: Vector[] = [];
public strokeColor = '#000';
public paint(context: RenderContext, renderObject: RenderObject): void {
if (this.positions.length === 0) {
return;
}
const { min } = renderObject.getWorldBoundingBox();
const worldPositions = this.positions.map((position) => {
return position.clone().add(min);
});
context.strokePath(worldPositions, this.strokeColor);
}
}
|
# frozen_string_literal: true
require_relative 'asset_exchange_controller'
|
#include <iostream>
#include <cstring>
class Parser {
private:
bool m_first_line;
public:
/// Reset to initial parser state.
void reset()
{
m_first_line = true;
}
bool parse(const char* data, size_t size)
{
if (m_first_line) {
std::cout << "Processing first line: " << std::string(data, size) << std::endl;
m_first_line = false;
} else {
std::cout << "Processing subsequent line: " << std::string(data, size) << std::endl;
}
return true; // Placeholder return value
}
};
int main() {
Parser parser;
parser.reset();
parser.parse("First line of text", 17);
parser.parse("Second line of text", 18);
parser.reset();
parser.parse("Another first line", 19);
parser.parse("Another subsequent line", 24);
return 0;
} |
<reponame>bizmaercq/eda-reporting<gh_stars>0
select '31-JAN-2021' dar,
'00010' age,
cb.glcode com,
'01' cle,
'XAF' dev,
'999999' cli,
cb.glcode_5_digit chap,
(cb.cr_open - cb.dr_open) sldd,
(cb.cr_open - cb.dr_open) sldcvd,
cb.cr_mov cumcd,
cb.cr_mov cumccv,
cb.dr_mov cumdd,
cb.dr_mov cumdcv,
(cb.cr_end_balance - cb.dr_end_balance) sldf,
(cb.cr_end_balance - cb.dr_end_balance) sldcvf,
1 txb,
'31-JAN-2021' dcre,
'31-JAN-2021' dmod,
'CERBER' uticre,
'CERBER' utimod
from cerber_bal cb
WHERE cb.glcode not in ('475001000','475002000','475005000','475007000','560422000','560411000')
union
select
'31-JAN-2021' dar,
'00010' age,
gb.gl_code com,
'01' cle,
gb.ccy_code dev,
'999999' cli,
substr(gb.gl_code,1,5) chap,
sum((gb.open_cr_bal-gb.open_dr_bal)) sldd,
sum((gb.open_cr_bal_lcy-gb.open_dr_bal_lcy)) sldcvd,
sum(gb.cr_mov) cumcd,
sum(gb.cr_mov_lcy) cumccv,
sum(gb.dr_mov) cumdd,
sum(gb.dr_mov_lcy) cumdcv,
(sum(gb.cr_bal-gb.dr_bal)) sldf,
sum((gb.cr_bal_lcy-gb.dr_bal_lcy)) sldcvf,
round(max((gb.cr_bal_lcy-gb.dr_bal_lcy)/(gb.cr_bal-gb.dr_bal))) txb,
'31-JAN-2021' dcre,
'31-JAN-2021' dmod,
'CERBER' uticre,
'CERBER' utimod
from xafnfc.gltb_gl_bal gb
WHERE gb.gl_code in ('475001000','475002000','475005000','475007000','560422000','560411000')
and (gb.cr_bal-gb.dr_bal)<>0
and gb.fin_year ='FY2021' and gb.period_code='M01'
group by gb.gl_code ,gb.ccy_code ;
|
<html>
<head>
<title>Countries and Flags</title>
</head>
<body>
<h1>Countries and Flags</h1>
<ul>
<li>
<figure>
<img src="flag_of_united_states.png" alt="Flag Of United States" />
<figcaption>
<span>United States of America</span>
<span>Population: 328.2 Million</span>
</figcaption>
</figure>
</li>
<li>
<figure>
<img src="flag_of_australia.png" alt="Flag Of Australia" />
<figcaption>
<span>Australia</span>
<span>Population: 25.3 Million</span>
</figcaption>
</figure>
</li>
</ul>
</body>
</html> |
#!/usr/bin/env bash
koopa_linux_os_version() {
# """
# Linux OS version.
# @note Updated 2021-11-16.
# """
local app x
koopa_assert_has_no_args "$#"
declare -A app=(
[uname]="$(koopa_locate_uname)"
)
x="$("${app[uname]}" -r)"
[[ -n "$x" ]] || return 1
koopa_print "$x"
return 0
}
|
#!/bin/bash
sh run.sh ${FILES_LIST} ${DEST_DIR} ${UNCOMPRESS}
|
<reponame>Boatdude55/staging-website<gh_stars>0
/**
* @fileoverview
*
* Structor Component
*/
goog.provide('app.lib.structor.Structor');
goog.require('app.lib.interaction.ComponentInteraction');
goog.require('goog.ui.Component');
/**
* @constructor
* @param {string} name
* @extends {goog.ui.Component}
*/
app.lib.structor.Structor = function(name)
{
goog.base(this);
/**
* Name of the current view.
*
* @type {string}
*/
this.name = name;
/**
* View state
*
* @type {Object}
* @protected
*/
this.state = null;
this.sandbox = null;
/**
* Dynamically initialized components in the view
*
* @type {app.lib.interaction.ComponentInteraction}
* @protected
*/
this.componentController = new app.lib.interaction.ComponentInteraction();
this.componentController.setParentEventTarget(this);
this.registerDisposable(this.componentController);
};
goog.inherits(app.lib.structor.Structor, goog.ui.Component);
/**
* [headerContainer description]
* @type {Element}
*/
app.lib.structor.Structor.prototype.headerContainer = null;
/**
* [headerElem description]
* @type {Element}
*/
app.lib.structor.Structor.prototype.headerElem = null;
/**
* [contentContainer description]
* @type {Element}
*/
app.lib.structor.Structor.prototype.contentContainer = null;
/**
* [contentElem description]
* @type {Element}
*/
app.lib.structor.Structor.prototype.contentElem = null;
app.lib.structor.Structor.CSS_CLASS = goog.getCssName('ui-structor');
app.lib.structor.Structor.prototype.getCssClass = function() {
return app.lib.structor.Structor.CSS_CLASS;
};
app.lib.structor.Structor.prototype.getContentElement = function () {
return this.contentContainer;
};
/** @inheritDoc */
app.lib.structor.Structor.prototype.createDom = function()
{
var dom = this.getDomHelper();
var el = dom.createDom(goog.dom.TagName.DIV, 'ui-structor',
this.headerElem = dom.createDom(
goog.dom.TagName.DIV,
goog.getCssName(this.getCssClass(), 'header'),
this.headerContainer = dom.createDom(
goog.dom.TagName.H1,
goog.getCssName(this.getCssClass(), 'header-container'),
this.name
)
),
this.contentElem = dom.createDom(
goog.dom.TagName.DIV,
goog.getCssName(this.getCssClass(), 'content'),
this.contentContainer = dom.createDom(
goog.dom.TagName.DIV,
goog.getCssName(this.getCssClass(), 'content-container')
)
)
);
this.decorateInternal(el);
};
/** @inheritDoc */
app.lib.structor.Structor.prototype.decorateInternal = function(el)
{
goog.base(this, 'decorateInternal', el);
this.componentController.initialize({
element: this.getElement(),
selector: '.cmp'
});
};
/** @inheritDoc */
app.lib.structor.Structor.prototype.enterDocument = function()
{
goog.base(this, 'enterDocument');
this.componentController.getAll().forEach(function(child) {
if (!child.isInDocument())
{
child.enterDocument();
}
}, this);
};
/** @inheritDoc */
app.lib.structor.Structor.prototype.exitDocument = function()
{
goog.base(this, 'exitDocument');
this.componentController.getAll().forEach(function(child) {
if (child.isInDocument())
{
child.exitDocument();
}
}, this);
};
/**
* Returns sub component by specified name, which was initialized automatically
* (through .cmp selector)
*
* @param {string} name
* @return {goog.ui.Component}
*/
app.lib.structor.Structor.prototype.getComponent = function(name)
{
return this.componentController.getComponentByName(name);
};
/**
* Sets whether component is active.
*
* @param {boolean} isActive
*/
app.lib.structor.Structor.prototype.setActive = function(isActive)
{
this.forEachChild(function(child) {
if (child.setActive && typeof child.setActive == 'function')
{
child.setActive(isActive);
}
}, this);
this.isActive = isActive;
};
/**
* Sets the state of the view.
*
* @param {Object} state
*/
app.lib.structor.Structor.prototype.setState = function(state)
{
this.state = state;
};
/**
* [customRender description]
* @param {Element=} opt_parentElement opt_parentElement Optional parent element to render the component into.
* @return {Element|Node} [description]
*/
app.lib.structor.Structor.prototype.customRender = function(opt_parentElement) {
return this.customRender_(opt_parentElement);
};
/**
* [customRender_ description]
* @param {Element=} opt_parentElement opt_parentElement Optional parent element to render the component into.
* @param {Node=} opt_beforeNode Node before which the component is to be rendered. If left out the node is appended to the parent element.
* @return {Element|Node} [description]
*/
app.lib.structor.Structor.prototype.customRender_ = function(
opt_parentElement, opt_beforeNode) {
if (this.isInDocument()) {
throw new Error(goog.ui.Component.Error.ALREADY_RENDERED);
}
if (!this.getElement()) {
this.createDom();
}
// If this component has a parent component that isn't in the document yet,
// we don't call enterDocument() here. Instead, when the parent component
// enters the document, the enterDocument() call will propagate to its
// children, including this one. If the component doesn't have a parent
// or if the parent is already in the document, we call enterDocument().
if (!this.getParent() || this.getParent().isInDocument()) {
this.enterDocument();
}
return this.getElement();
};
app.lib.structor.Structor.prototype.setSandbox = function (sandbox) {
this.sandbox = sandbox;
};
app.lib.structor.Structor.prototype.getSandbox = function () {
return this.sandbox;
};
|
#!/bin/bash
set -x
consul watch -http-addr=http://${CONSUL_IP}:8500 -type keyprefix -prefix ${CONSUL_KEY_PREFIX} curl -H 'Content-Type:application/json' -X POST --data-binary @- http://${POD_IP}:8080/notify |
#!/usr/bin/env bash
#
# Copyright (c) 2020 The Wazzle Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Check for SIGNAL/SLOT connect style, removed since Qt4 support drop.
export LC_ALL=C
EXIT_CODE=0
OUTPUT=$(git grep -E '(SIGNAL|, ?SLOT)\(' -- src/qt)
if [[ ${OUTPUT} != "" ]]; then
echo "Use Qt5 connect style in:"
echo "$OUTPUT"
EXIT_CODE=1
fi
exit ${EXIT_CODE}
|
#include <stdio.h>
// Matrix Addition
// Number of rows and columns
#define N 3
int main()
{
int A[N][N], B[N][N], C[N][N];
int i, j;
// Input matrices
printf("Input elements in matrix A:
");
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
printf("A[%d][%d] = ", i, j);
scanf("%d", &A[i][j]);
}
}
printf("Input elements in matrix B:
");
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
printf("B[%d][%d] = ", i, j);
scanf("%d", &B[i][j]);
}
}
// Calculate matrix addition
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
C[i][j] = A[i][j] + B[i][j];
}
}
// Output result
printf("
//Matrix addition result
");
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
printf("C[%d][%d] = %d
", i, j, C[i][j]);
}
}
return 0;
} |
<filename>js/ui/filter_builder.js
"use strict";
/**
* @name dxfilterbuilder
* @publicName dxFilterBuilder
* @inherits Widget
* @module ui/filter_builder
* @export default
*/
module.exports = require("./filter_builder/filter_builder");
|
# Generated by nuclio.export.NuclioExporter
import numpy as np
import pandas as pd
import numbers
import sklearn
from sklearn.base import clone
from sklearn.utils import check_random_state
import matplotlib.pyplot as plt
import seaborn as sns
from cloudpickle import load
from mlrun.execution import MLClientCtx
from mlrun.datastore import DataItem
from mlrun.artifacts import get_model, PlotArtifact
from typing import Union, Callable, List
def _get_n_samples_bootstrap(n_samples, max_samples) -> int:
"""get the number of samples in a bootstrap sample
returns the total number of samples to draw for the bootstrap sample
private api in sklearn >= v0.24, taken from sklearn.ensemble._forest.py
:param n_samples: Number of samples in the dataset.
:param max_samples:
The maximum number of samples to draw from the total available:
- if float, this indicates a fraction of the total and should be
the interval `(0, 1)`;
- if int, this indicates the exact number of samples;
- if None, this indicates the total number of samples.
"""
if max_samples is None:
return n_samples
if isinstance(max_samples, numbers.Integral):
if not (1 <= max_samples <= n_samples):
msg = "`max_samples` must be in range 1 to {} but got value {}"
raise ValueError(msg.format(n_samples, max_samples))
return max_samples
if isinstance(max_samples, numbers.Real):
if not (0 < max_samples < 1):
msg = "`max_samples` must be in range (0, 1) but got value {}"
raise ValueError(msg.format(max_samples))
return int(round(n_samples * max_samples))
msg = "`max_samples` should be int or float, but got type '{}'"
raise TypeError(msg.format(type(max_samples)))
def _get_unsampled_ix(random_state, n_samples: int) -> np.array:
"""
future-proof get unsampled indices
"""
n_bootstrap = _get_n_samples_bootstrap(n_samples, n_samples)
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_bootstrap)
sample_counts = np.bincount(sample_indices, minlength=n_samples)
return np.arange(n_samples)[sample_counts == 0]
def _oob_classifier_accuracy(rf, X_train, y_train) -> float:
"""
Compute out-of-bag (OOB) accuracy for a scikit-learn forest classifier.
https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/ensemble/forest.py#L425
"""
X = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
y = y_train.values if isinstance(y_train, pd.Series) else y_train
n_samples = len(X)
n_classes = len(np.unique(y))
predictions = np.zeros((n_samples, n_classes))
for tree in rf.estimators_:
unsampled_indices = _get_unsampled_ix(tree.random_state, n_samples)
tree_preds = tree.predict_proba(X[unsampled_indices, :])
predictions[unsampled_indices] += tree_preds
predicted_class_indexes = np.argmax(predictions, axis=1)
predicted_classes = [rf.classes_[i] for i in predicted_class_indexes]
oob_score = np.mean(y == predicted_classes)
return oob_score
def permutation_importances(
context: MLClientCtx,
model: DataItem,
dataset: DataItem,
labels: str,
figsz=(10, 5),
plots_dest: str = "plots",
fitype: str = "permute",
) -> pd.DataFrame:
"""calculate change in metric
type 'permute' uses a pre-estimated model
type 'dropcol' uses a re-estimates model
:param context: the function's execution context
:param model: a trained model
:param dataset: features and ground truths, regression targets
:param labels name of the ground truths column
:param figsz: matplotlib figure size
:param plots_dest: path within artifact store
:
"""
model_file, model_data, _ = get_model(model.url, suffix=".pkl")
model = load(open(str(model_file), "rb"))
X = dataset.as_df()
y = X.pop(labels)
header = X.columns
metric = _oob_classifier_accuracy
baseline = metric(model, X, y)
imp = []
for col in X.columns:
if fitype is "permute":
save = X[col].copy()
X[col] = np.random.permutation(X[col])
m = metric(model, X, y)
X[col] = save
imp.append(baseline - m)
elif fitype is "dropcol":
X_ = X.drop(col, axis=1)
model_ = clone(model)
model_.random_state = random_state
model_.fit(X_, y)
o = model_.oob_score_
imp.append(baseline - o)
else:
raise ValueError("unknown fitype, only 'permute' or 'dropcol' permitted")
zipped = zip(imp, header)
feature_imp = pd.DataFrame(sorted(zipped), columns=["importance", "feature"])
feature_imp.sort_values(by="importance", ascending=False, inplace=True)
plt.clf()
plt.figure(figsize=figsz)
sns.barplot(x="importance", y="feature", data=feature_imp)
plt.title(f"feature importances-{fitype}")
plt.tight_layout()
context.log_artifact(
PlotArtifact(f"feature importances-{fitype}", body=plt.gcf()),
local_path=f"{plots_dest}/feature-permutations.html",
)
context.log_dataset(
f"feature-importances-{fitype}-tbl", df=feature_imp, index=False
)
|
'use strict';
// Comment out the following instruction to see more debugging logs
//process.env.DEBUG = 'actions-on-google:*';
const App = require('actions-on-google').DialogflowApp;
const functions = require('firebase-functions');
const admin = require('firebase-admin');
const requestLib = require('request');
const cors = require('cors')();
// Authentication Client Information (from Auth0)
//
// Note: This project's Google client information from the Google Cloud Console was added to Auth0 to
// connect the two providers. By using Auth0's client information, this allows us to use Auth0 for handling
// the authentication process and providing the approved user access to Google's APIs.
//
// Reference: https://auth0.com/docs/connections/social/google#3-enable-the-connection-in-auth0
const DOMAIN = "cellbots-ai.auth0.com";
const CLIENT_ID = "ZYqLm1MwEvYhQ5OJpqKWeZoQpRhSPux0";
const CLIENT_SECRET = "<KEY>";
const REDIRECT_URL = "";
// Auth0 Client
var AuthenticationClient = require('auth0').AuthenticationClient;
var auth0Client = new AuthenticationClient({
domain: DOMAIN,
clientId: CLIENT_ID
});
// Robot Actions
const ROBOT_ACTION_START = 'Robot.START';
const ROBOT_ACTION_STOP = 'Robot.STOP';
const ROBOT_ACTION_GOTO = 'Robot.GOTO';
const ROBOT_ACTION_COME_HERE = 'Robot.COME_HERE';
const ROBOT_ACTION_ANIMATION = 'Robot.ANIMATION';
// Variables for storing data parameters
let currentRobotName = null;
let currentDestinationName = null;
let currentDuration = null;
let currentAnimation = null;
// For message generation
let firebaseSequenceID = 1;
// initialize the SDK to talk to firebase
admin.initializeApp({
credential: admin.credential.applicationDefault(),
apiKey: "<KEY>",
authDomain: "cellbots-robot-app.firebaseapp.com",
databaseURL: "https://cellbots-robot-app.firebaseio.com",
storageBucket: "cellbots-robot-app.appspot.com",
});
// Twilio
const TWILIO_ACCOUNT_SID = "AC0e267eb539e2fb2b44d8a455b6b3b030";
const TWILIO_AUTH_TOKEN = "<PASSWORD>";
const validateFirebaseIdToken = (req, res, next) => {
cors(req, res, () => {
if (!req.headers.authorization || req.headers.authorization.split('Bearer ').length <= 1) {
console.log("Unauthenticated request: " + req.headers.authorization);
res.status(403).send('Unauthorized');
return;
}
const idToken = req.headers.authorization.split('Bearer ')[1];
admin.auth().verifyIdToken(idToken).then(decodedIdToken => {
console.log('ID Token correctly decoded', decodedIdToken);
req.user = decodedIdToken;
next();
}).catch(error => {
console.error('Error while verifying Firebase ID token:', error);
res.status(403).send('Unauthorized');
});
});
};
exports.getWebRTCIceServers = functions.https.onRequest((request, response) => {
validateFirebaseIdToken(request, response, () => {
var post_data = "";
var post_options = {
url: 'https://api.twilio.com/2010-04-01/Accounts/' + TWILIO_ACCOUNT_SID + '/Tokens.json',
body: '',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': Buffer.byteLength(post_data),
'Authorization': 'Basic ' + new Buffer(TWILIO_ACCOUNT_SID + ':' + TWILIO_AUTH_TOKEN).toString("base64")
}
};
var post_req = requestLib.post(post_options, function (error, response2, body) {
response.setHeader('Content-Type', 'application/json');
console.log('Response: ' + body);
response.status(200);
response.send(body);
});
post_req.write(post_data);
post_req.end();
});
});
exports.googleHome = functions.https.onRequest((request, response) => {
// To manage all the intents we create an object which has the
// collection of intents and it’s callback functions.
const ACTIONS = {
'Robot.START': {callback: makeRobotActionHandler(startIntentHandler)},
'Robot.STOP': {callback: makeRobotActionHandler(stopIntentHandler)},
'Robot.GOTO': {callback: makeRobotActionHandler(moveRobot)},
'Robot.COME_HERE': {callback: makeRobotActionHandler(comeHereHandler)},
'Robot.ANIMATION': {callback: makeRobotActionHandler(animationIntentHandler)},
'input.unknown': {callback: defaultFallbackIntentHandler},
'input.welcome': {callback: defaultWelcomeIntentHandler}
};
// Assign agent parameters
let robotGivenName = request.body.result.parameters.RobotGivenName;
let robotLastName = request.body.result.parameters.RobotLastName;
console.log("Given Name: " + robotGivenName + ", Last Name: " + robotLastName);
if (robotLastName) {
// Full Name (given + last)
currentRobotName = robotGivenName + " " + robotLastName;
} else {
// Invalid last name. So, use Given Name only.
currentRobotName = robotGivenName;
}
console.log("Full Name: " + currentRobotName);
currentDestinationName = request.body.result.parameters.Place;
currentDuration = request.body.result.parameters.duration;
currentAnimation = request.body.result.parameters.Animation;
// What the user says...
console.log('HEARD > ' + request.body.result.resolvedQuery);
// Maps the incoming intent to its corresponding handler function.
let app = new App({request: request, response: response});
let intent = app.getIntent();
try {
// In order to return a response we’ll get the data and build the statement.
app.handleRequest(ACTIONS[intent].callback);
} catch(e) {
generateErrorHeader("Uh oh! Something broke trying to handle the request. Please try again later.");
}
////////////////////////////////////////////////////////////////////////////
// Authorization functions
/**
* Find Firebase user with Access Token
*/
function findFirebaseUser(token) {
console.log("Access Token: " + token);
return findUserEmail(token).then(email => {
return admin.auth().getUserByEmail(email);
}).catch(e => {
return Promise.reject(e);
});
}
/**
* Find user email with Access Token
*/
function findUserEmail(token) {
return new Promise((resolve, reject) => {
auth0Client.getProfile(token, function(error, userInfo) {
if (error) {
console.log("Something broke trying to retrieve the user\'s email using auth0.");
} else {
console.log("Success! User Info: " + JSON.stringify(userInfo, null, 4));
resolve(userInfo.email);
}
});
});
}
////////////////////////////////////////////////////////////////////////////
/**
* Handler for execution the actions
*/
function makeRobotActionHandler(action) {
return function(app) {
// If the Companion app fires up this Cloud Function, it will not stop.
if (app.getUser() == null) {
console.log("User could not be identified. Closing app.");
generateSpeechHeader("Sorry, I could not identify your user account. Either sign in again or try again later.");
generateErrorHeader("Sorry, I could not identify your user account. Either sign in again or try again later.");
return;
};
// Log user's info for debugging purposes.
console.log("User: " + JSON.stringify(app.getUser(), null, 4));
// Accessing user information (UID)
findFirebaseUser(app.getUser().accessToken).then(user => {
admin.database().ref().child('robots').child(user.uid)
.once('value', function(snapshot) {
var ac = request.body.result.action;
console.log("ACTION: " + ac);
// Execute action using the user's current robot
// Length 1 is due to the possible case of <empty given name> + " " + <empty last name>.
if (currentRobotName && currentRobotName.length > 1) {
executeAction(ac, action, user, generateErrorHeader);
} else {
lookUpCurrentRobot(user.uid, function(robotName) {
if (robotName) {
currentRobotName = robotName;
executeAction(ac, action, user, generateErrorHeader);
} else {
generateErrorHeader("No working robots were found on your account. Try adding one in the RobotApp and try again later.");
return;
}
});
};
}, function (errorObject) {
generateErrorHeader("An unexpected error occured when trying to process the user's information.");
});
}).catch(e => {
generateErrorHeader("It seems like you didn't complete the sign in process correctly. Try signing in with the correct account and password, or create a new account.");
});
}
}
////////////////////////////////////////////////////////////////////////////
// Start & stop
/**
* Send "Start" or "Stop" command to Firebase
*/
function sendRobotMovementCommand(userID, robotID, action) {
console.log("sendRobotMovementCommand()");
// get time stamp from server
const dbTime = admin.database.ServerValue.TIMESTAMP;
var writeObject = {
'uuid': guid(),
'timestamp': dbTime,
'sequence': firebaseSequenceID++,
// Command payload
'value': action,
'was_executed': false,
'sNextSequenceNumber': firebaseSequenceID
};
var refString = "robot_goals/" + userID + "/" + robotID + "/curret_command";
admin.database().ref(refString).child(writeObject.uuid).set(writeObject);
console.log('Start/stop order executed correctly');
}
/**
* Start Intent Handler
*/
function startIntentHandler(userID, robotName) {
console.log('Start intent');
console.log("userID = " + userID);
console.log("robotName = " + robotName);
// Looks for robot name
lookUpRobotByName(userID, robotName, function(robotData) {
const robotID = robotData.uuid;
sendRobotMovementCommand(userID, robotID, "start_command");
generateSpeechHeader(robotName + " is starting");
});
}
/**
* Stop Intent Handler
*/
function stopIntentHandler(userID, robotName) {
console.log('Stop intent');
console.log("userID = " + userID);
console.log("robotName = " + robotName);
// Looks for robot name
lookUpRobotByName(userID, robotName, function(robotData) {
const robotID = robotData.uuid;
sendRobotMovementCommand(userID, robotID, "stop_command");
generateSpeechHeader(robotName + " is stopping");
});
}
////////////////////////////////////////////////////////////////////////////
// Go to
/**
* Go To action
*/
function moveRobot(userID, robotName, destinationName) {
console.log("moveRobot()");
console.log("userID = " + userID);
console.log("robotName = " + robotName);
console.log("destinationName = " + destinationName);
// Looks for robot name
lookUpRobotByName(userID, robotName, function(robotData) {
const robotID = robotData.uuid;
const mapID = robotData.map;
// Looks for POI name
lookUpPlaceIDByName(userID, mapID, destinationName, function(destinationID) {
// Send command to Firebase
sendRobotToPlaceCommand(userID, robotID, "drivePOI", destinationID.uuid);
generateSpeechHeader("The robot " + robotName + " is going to the " + destinationName);
});
});
}
/**
* Send "Go To" command to Firebase
*/
function sendRobotToPlaceCommand(userID, robotID, action, destinationID) {
console.log("sendRobotToPlaceCommand()");
// get time stamp from server
const dbTime = admin.database.ServerValue.TIMESTAMP;
var writeObject = {
'uuid': guid(),
'sequence': firebaseSequenceID++,
'timestamp': dbTime,
'name': action,
'version': "1.0.0",
'parameters': {"target": destinationID}, // destination
'priority': 100
};
var refString = "robot_goals/" + userID + "/" + robotID + "/goals";
admin.database().ref(refString).child(writeObject.uuid).set(writeObject);
}
////////////////////////////////////////////////////////////////////////////
// Come here
/**
* Come Here Intent Handler
*/
function comeHereHandler(userID, robotName, destinationName) {
console.log("comeHereHandler()");
console.log("userID = " + userID);
console.log("robotName = " + robotName);
console.log("destinationName = " + destinationName);
// Looks for robot name
lookUpRobotByName(userID, robotName, function(robotData) {
const robotID = robotData.uuid;
const mapID = robotData.map;
// Looks for POI name
lookUpPlaceIDByName(userID, mapID, destinationName, function(destinationID) {
// Come here actually goes to a POI named "google home"
sendRobotToPlaceCommand(userID, robotID, "drivePOI", destinationID.uuid);
generateSpeechHeader(robotName + " is going to Google Home's position");
});
});
}
////////////////////////////////////////////////////////////////////////////
// Animations
/**
* Animation action
*/
function animationIntentHandler(userID, currentRobotName, animationName) {
lookUpRobotByName(userID, currentRobotName, function(robotData) {
const robotID = robotData.uuid;
sendAnimationCommand(userID, robotID, "animation", animationName);
})
}
/**
* Send "Animation" command to Firebase
*/
function sendAnimationCommand(userID, robotID, animation) {
// get time stamp from server
const dbTime = admin.database.ServerValue.TIMESTAMP;
var writeObject = {
'name': "animation",
'parameters': {"animation": animation}, // animation
'timestamp': dbTime,
'version': "1.0.0",
'priority': 100
};
var refString = "robot_goals/" + userID + "/" + robotID + "/goals";
admin.database().ref(refString).child(writeObject.uuid).set(writeObject);
generateSpeechHeader(currentRobotName + " is executing the " + currentDestinationName + " animation");
}
////////////////////////////////////////////////////////////////////////////
// Other functions
/**
* Generate UID
*/
function guid() {
function s4() {
return Math.floor((1 + Math.random()) * 0x10000)
.toString(16)
.substring(1);
}
return s4() + s4() + '-' + s4() + '-' + s4() + '-' + s4() + '-' +
s4() + s4() + s4();
}
function lookUpCurrentRobot(userID, callback) {
console.log("Requested robot is null or invalid. Looking up user's current robot");
console.log("lookUpCurrentRobot()");
console.log("User ID: " + userID);
admin.database().ref('robots').child(userID).once('value', function (snapshot){
var robots = snapshot.val();
var robotName;
var latestUpdateTime = 0;
// Pick and store the name of the robot with latest update time.
// Case 1: User has only 1 robot
// Case 2: User has multiple robots
for (var robotID in robots) {
if (robots[robotID].name && robots[robotID].last_update_time) {
if (robots[robotID].last_update_time >= latestUpdateTime) {
robotName = robots[robotID].name;
latestUpdateTime = robots[robotID].last_update_time;
console.log("Found robot named " + robotName + " with latest update time " + latestUpdateTime);
}
}
}
console.log("Using latest updated robot: " + robotName);
callback(robotName);
})
}
/**
* Check if the robot exists for the current Firebase user
*/
function lookUpRobotByName(userID, robotName, callback) {
console.log("lookUpRobotByName()");
console.log("User ID: " + userID);
console.log("Robot Name: " + robotName);
admin.database().ref('robots').child(userID)
.once('value', function (snapshot) {
var robots = snapshot.val();
for (var robotID in robots) {
if (robots[robotID].name) {
if (robots[robotID].name.toLowerCase() ==
robotName.toLowerCase()) {
console.log("Found robot " + JSON.stringify(robots[robotID], null, 4));
callback(robots[robotID]);
return; // incase 2 robots with the same name
}
}
}
// Robot name not found
generateErrorHeader("No robot named " + robotName + " was found. Check if the robot name is correct and try again later.");
},function(errorObject){
generateErrorHeader("An unexpected error has occured while looking up the robot in the database. Check if the robot name is correct and try again later.");
});
}
/**
* Check if the POI exists for the current Firebase user
*/
function lookUpPlaceIDByName(userID, mapID, placeName, callback) {
console.log("lookUpPlaceIDByName()");
console.log("User ID: " + userID);
console.log("Map ID: " + mapID);
console.log("Place Name: " + placeName);
admin.database().ref('objects').child(userID).child(mapID)
.once('value', function(snapshot){
var objects = snapshot.val();
for (var objectID in objects) {
if (objects[objectID].type == 'point_of_interest') {
if ('variables' in objects[objectID]) {
if ('name' in objects[objectID].variables) {
if (objects[objectID].variables.name.toLowerCase()
== placeName.toLowerCase()) {
console.log("Found place " + objects[objectID]);
callback(objects[objectID]);
return; // in case of 2 robots with the same name
}
}
}
}
}
// Place not found
generateErrorHeader("I could not find the place " + placeName + ". Verify the place name and try again later.");
},function(errorObject) {
generateErrorHeader("An unexpected error has occured while trying to find the map in the database. Check the database or try again later.");
});
}
/**
* Executes the appropriate robot action based on action type.
*/
function executeAction(actionType, action, user, generateErrorHeader) {
switch(actionType) {
case ROBOT_ACTION_START:
case ROBOT_ACTION_STOP:
action(user.uid, currentRobotName);
break;
case ROBOT_ACTION_GOTO:
// Executes action if the robot has a name assigned
action(user.uid, currentRobotName, currentDestinationName);
break;
case ROBOT_ACTION_COME_HERE:
var location = "google home";
action(user.uid, currentRobotName, location);
break;
case ROBOT_ACTION_ANIMATION:
action(user.uid, currentRobotName, currentAnimation);
break;
default:
generateErrorHeader("An unexpected error has occurred while trying to execute the action " + action.toString + ". Please, try again later.");
}
}
/*
* Merge header with an error message
*/
function generateErrorHeader(errorMessage) {
console.log(errorMessage);
// If there is an error let the user know
response.setHeader('Content-Type', 'application/json');
response.send(JSON.stringify({ 'speech': errorMessage, 'displayText': errorMessage }));
}
/*
* Merge header with the speech response message
*/
function generateSpeechHeader(speechMessage) {
console.log(speechMessage);
response.setHeader('Content-Type', 'application/json');
response.send(JSON.stringify({ 'speech': speechMessage, 'displayText': speechMessage }));
}
////////////////////////////////////////////////////////////////////////////
// Default fallback handlers
/**
* Default Fallback Intent Handler
*/
function defaultFallbackIntentHandler(app) {
console.log('Default Fallback Intent');
}
/**
* Default Welcome Intent Handler
*/
function defaultWelcomeIntentHandler(app) {
console.log('Default Welcome intent');
}
});
|
<gh_stars>0
#include "libevent_utils.h"
#include <event2/thread.h>
namespace thread_loop {
UniqPtrEventBase createBaseEvent() {
UniqPtrEventBase eventBase(event_base_new(), &event_base_free);
if (!eventBase)
throw std::runtime_error("Failed to create event_base in main loop.");
if(0 != evthread_make_base_notifiable(eventBase.get()))
throw std::runtime_error("Failed to make_base_notifiable in main loop.");
return eventBase;
}
} // namespace thread_loop
|
using System;
using System.Threading;
using System.Threading.Tasks;
public class TransactionManager
{
private IDbTransaction transaction;
public async Task StartTransactionAsync(CancellationToken cancellationToken)
{
// Start a new database transaction
// Example: transaction = await connection.BeginTransactionAsync(cancellationToken);
}
public async Task CommitAsync(CancellationToken cancellationToken)
{
if (transaction != null)
{
// Commit the current transaction
// Example: await transaction.CommitAsync(cancellationToken);
transaction = null; // Reset transaction after commit
}
}
public async Task RollbackAsync(CancellationToken cancellationToken)
{
ThrowIfTransactionNull();
// Roll back the current transaction
// Example: await transaction.RollbackAsync(cancellationToken);
transaction = null; // Reset transaction after rollback
}
private void ThrowIfTransactionNull()
{
if (transaction == null)
{
throw new InvalidOperationException("No active transaction to roll back");
}
}
} |
The modified model should include changes to the architecture of the original model, such as adding or removing layers, or changing hyperparameters, such as the learning rate or optimizer. Additionally, data augmentation techniques, such as image cropping, rotation, and mirroring, should be applied to improve the model's performance on the new dataset. |
// Copyright (c) 2015-2016, ETH Zurich, <NAME>, Zurich Eye
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the ETH Zurich, Wyss Zurich, Zurich Eye nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL ETH Zurich, <NAME>urich, Zurich Eye BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pragma once
#include <iostream>
#include <functional>
#include <ze/common/types.hpp>
#include <ze/common/manifold.hpp>
namespace ze {
//! Template function to compute numerical derivatives. See unit tests for examples.
//! The traits used for this functions are defined in common/manifold.h
template<class Y, class X>
typename Eigen::Matrix<real_t, traits<Y>::dimension, traits<X>::dimension>
numericalDerivative(std::function<Y(const X&)> h, const X& x, real_t delta = 1e-5)
{
typedef typename Eigen::Matrix<real_t, traits<Y>::dimension, traits<X>::dimension> Jacobian;
typedef typename traits<Y>::TangentVector TangentY;
typedef typename traits<X>::TangentVector TangentX;
const int N_X = traits<X>::getDimension(x);
// Get value at x.
Y hx = h(x);
const int N_Y = traits<Y>::getDimension(hx);
// Prepare a tangent vector to perturb x.
TangentX dx(N_X, 1);
dx.setZero();
// Compute numerical Jacobian column by column.
Jacobian H(N_Y, N_X);
H.setZero();
real_t factor = 1.0 / (2.0 * delta);
for(int i = 0; i < N_X; ++i)
{
dx(i) = delta;
TangentY dy1 = traits<Y>::local(hx, h(traits<X>::retract(x, dx)));
dx(i) = -delta;
TangentY dy2 = traits<Y>::local(hx, h(traits<X>::retract(x, dx)));
dx(i) = 0;
H.col(i) << (dy1 - dy2) * factor;
}
return H;
}
} // namespace ze
|
import os
def parse_box_file(file_path):
with open(file_path, 'r') as file:
data = file.readlines()
function_name = None
ranges = []
for line in data:
if line.startswith('Function:'):
function_name = line.split(': ')[1].strip()
elif line.startswith('Range:'):
range_value = float(line.split(': ')[1].strip())
ranges.append(range_value)
if function_name and ranges:
return function_name, sum(ranges) / len(ranges)
else:
return None, None
def generate_summary_report(directory):
function_ranges = {}
for file_name in os.listdir(directory):
if file_name.endswith('.box'):
file_path = os.path.join(directory, file_name)
function_name, average_range = parse_box_file(file_path)
if function_name and average_range:
function_ranges[function_name] = average_range
print("Function Summary Report:")
print("------------------------")
for function, average_range in function_ranges.items():
print(f"Function: {function}, Average Range: {average_range}")
# Example usage
generate_summary_report('/path/to/box/files') |
#!/usr/bin/env bash
PATH=/opt/oulib/ojs/bin:/usr/local/bin:/usr/bin:/bin:/sbin:$PATH
## Require arguments
if [ -z "$1" ]; then
cat <<USAGE
ojs_restore.sh restores an existing site snapshot backup.
Usage: ojs_restore.sh \$SITEPATH \$DOW
\$SITEPATH path to OJS site to restore
\$DOW lowercase day-of-week abbreviation indicating backup
to restore. Must be one of sun, mon, tue, wed, thu, fri, or sat.
USAGE
exit 1;
fi
SITEPATH=$1
DOW=$2
SITE=$(basename "$SITEPATH")
SNAPSHOTFILE="${SITEPATH}/snapshots/${SITE}.${DOW}.tar.gz"
if [ ! -d "$SITEPATH" ]; then
echo "${SITEPATH} doesn't exist, nothing to restore."
exit 0
fi
if [ -z "${DOW}" ]; then
echo "No snapshot specified."
echo "The following snapshots exist:"
ls "${SITEPATH}/snapshots/"
exit 0
fi
if [ ! -f "$SNAPSHOTFILE" ]; then
echo "No snapshot at ${SNAPSHOTFILE}"
exit 0
fi
echo "Restoring ${DOW} snapshot of ${SITEPATH}."
# Tarballs include the $SITE folder, so we need to strip that off
# when extracting
sudo -u nginx tar -xvf "${SNAPSHOTFILE}" -C "${SITEPATH}" --strip-components=1 --no-overwrite-dir
echo "Files from snapshot restored."
echo "Now run ojs_importdb.sh ${SITEPATH} to restore the db for the site."
|
<gh_stars>1-10
const colors = require('vuetify/es5/util/colors').default;
module.exports = {
mode: 'universal',
srcDir: 'src',
head: {
titleTemplate: '%s - ' + process.env.npm_package_name,
title: process.env.npm_package_name || '',
meta: [
{ charset: 'utf-8' },
{ name: 'viewport', content: 'width=device-width, initial-scale=1' },
{
hid: 'description',
name: 'description',
content: process.env.npm_package_description || ''
}
],
link: [{ rel: 'icon', type: 'image/x-icon', href: '/favicon.ico' }]
},
loading: { color: '#0ff', continuous: true, height: '5px' },
css: [],
plugins: [
{ src: '~/plugins/fireAuth' },
{ src: '~/plugins/localStorage.js', ssr: false }
],
buildModules: ['@nuxtjs/vuetify'],
modules: [
// Doc: https://axios.nuxtjs.org/usage
'@nuxtjs/axios',
'@nuxtjs/pwa',
// Doc: https://github.com/nuxt-community/dotenv-module
'@nuxtjs/dotenv',
'@nuxtjs/sitemap'
],
/*
** See https://axios.nuxtjs.org/options
*/
axios: {},
/*
** https://github.com/nuxt-community/vuetify-module
*/
vuetify: {
customVariables: ['~/assets/variables.scss'],
treeShake: true,
theme: {
dark: true,
themes: {
dark: {
primary: colors.blue.darken2,
accent: colors.grey.darken3,
secondary: colors.amber.darken3,
info: colors.teal.lighten1,
warning: colors.amber.base,
error: colors.deepOrange.accent4,
success: colors.green.accent3
}
}
}
},
sitemap: {
//https://github.com/nuxt-community/sitemap-module
hostname: 'https://ssr-test-afbfd.firebaseapp.com',
gzip: true,
exclude: ['/admin/**'],
async routes() {
const { db } = require('./services/fireInit');
return []; //remove this line once db calls are in place.
}
},
build: {
extractCSS: true,
//publicPath: '/',
extend(config, ctx) {}
}
};
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2789-1
#
# Security announcement date: 2013-11-01 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:44 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - strongswan:4.4.1-5.4
#
# Last versions recommanded by security team:
# - strongswan:4.4.1-5.8
#
# CVE List:
# - CVE-2013-6075
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade strongswan=4.4.1-5.8 -y
|
#!/bin/bash
#
# Copyright (c) 2014, 2022, Oracle and/or its affiliates.
#
#Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
# If AdminServer.log does not exists, container is starting for 1st time
# So it should start NM and also associate with AdminServer
# Otherwise, only start NM (container restarted)
########### SIGTERM handler ############
function _term() {
echo "Stopping container."
echo "SIGTERM received, shutting down the server!"
${DOMAIN_HOME}/bin/stopWebLogic.sh
}
########### SIGKILL handler ############
function _kill() {
echo "SIGKILL received, shutting down the server!"
kill -9 $childPID
}
# Set SIGTERM handler
trap _term SIGTERM
# Set SIGKILL handler
trap _kill SIGKILL
#Define DOMAIN_HOME
export DOMAIN_HOME=/u01/oracle/user_projects/domains/$DOMAIN_NAME
echo "Domain Home is: " $DOMAIN_HOME
mkdir -p $ORACLE_HOME/properties
# Create Domain only if 1st execution
if [ ! -e ${DOMAIN_HOME}/servers/${ADMIN_NAME}/logs/${ADMIN_NAME}.log ]; then
echo "Create Domain"
PROPERTIES_FILE=/u01/oracle/properties/domain.properties
if [ ! -e "$PROPERTIES_FILE" ]; then
echo "A properties file with the username and password needs to be supplied."
exit
fi
# Get Username
USER=`awk '{print $1}' $PROPERTIES_FILE | grep username | cut -d "=" -f2`
if [ -z "$USER" ]; then
echo "The domain username is blank. The Admin username must be set in the properties file."
exit
fi
# Get Password
PASS=`awk '{print $1}' $PROPERTIES_FILE | grep password | cut -d "=" -f2`
if [ -z "$PASS" ]; then
echo "The domain password is blank. The Admin password must be set in the properties file."
exit
fi
# Create an empty domain
wlst.sh -skipWLSModuleScanning -loadProperties $PROPERTIES_FILE /u01/oracle/create-wls-domain.py
mkdir -p ${DOMAIN_HOME}/servers/${ADMIN_NAME}/security/
chmod -R g+w ${DOMAIN_HOME}
echo "username=${USER}" >> $DOMAIN_HOME/servers/${ADMIN_NAME}/security/boot.properties
echo "password=${PASS}" >> $DOMAIN_HOME/servers/${ADMIN_NAME}/security/boot.properties
${DOMAIN_HOME}/bin/setDomainEnv.sh
fi
# Start Admin Server and tail the logs
${DOMAIN_HOME}/startWebLogic.sh
if [ -e ${DOMAIN_HOME}/servers/${ADMIN_NAME}/logs/${ADMIN_NAME}.log ]; then
echo "${DOMAIN_HOME}/servers/${ADMIN_NAME}/logs/${ADMIN_NAME}.log"
fi
touch ${DOMAIN_HOME}/servers/${ADMIN_NAME}/logs/${ADMIN_NAME}.log
tail -f ${DOMAIN_HOME}/servers/${ADMIN_NAME}/logs/${ADMIN_NAME}.log
childPID=$!
wait $childPID
|
def is_in_range(number, lower_range, upper_range):
return number >= lower_range and number <= upper_range
isInRange = is_in_range(10,1,15)
print(isInRange) |
<gh_stars>0
package com.twu.biblioteca.servicetest;
import com.twu.biblioteca.entity.BookEntity;
import com.twu.biblioteca.service.BookService;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
/**
* Created by <NAME> on 2017/2/24.
*/
public class BookServiceTest {
@Before
public void setUp() throws Exception {
BookService bookService = BookService.getInstance();
List<BookEntity> bookEntityList = new ArrayList<BookEntity>();
}
/**
* Method: getBookList()
*/
@Test
public void testGetBookList() throws Exception {
}
/**
* Method: getBookListStr()
*/
@Test
public void testGetBookListStr() throws Exception {
}
/**
* Method: getBook(Integer id)
*/
@Test
public void testGetBookId() throws Exception {
}
/**
* Method: getBook(String name)
*/
@Test
public void testGetBookName() throws Exception {
}
/**
* Method: checkoutBook(Integer id)
*/
@Test
public void testCheckoutBookId() throws Exception {
}
/**
* Method: checkoutBook(String bookName)
*/
@Test
public void testCheckoutBookBookName() throws Exception {
}
/**
* Method: returnBook(Integer id)
*/
@Test
public void testReturnBookId() throws Exception {
}
/**
* Method: returnBook(String bookName)
*/
@Test
public void testReturnBookBookName() throws Exception {
}
/**
* Method: init()
*/
@Test
public void testInit() throws Exception {
}
/**
* Method: getBook(Integer id, String name)
*/
@Test
public void testGetBookIsOk() throws Exception {
}
/**
* Method: checkoutBook(Integer inputId, String inputBookName)
*/
@Test
public void testCheckoutBook() throws Exception {
}
/**
* Method: returnBook(Integer inputId, String inputBookName)
*/
@Test
public void testReturnBook() throws Exception {
}
}
|
/*
* MovementDetector.h
*
* Created on: Oct 7, 2015
* Author: richard
*
* Copyright 2017 <NAME>
* Licensed under the MIT License
*/
#ifndef IMAGEPROCESSING_MOVEMENTDETECTOR_H_
#define IMAGEPROCESSING_MOVEMENTDETECTOR_H_
#include <memory>
#include "../Component.h"
class Frame;
class MovementDetector : public Component {
public:
virtual void process_next_frame(std::shared_ptr<Frame>& frame) = 0;
virtual ~MovementDetector();
};
#endif // IMAGEPROCESSING_MOVEMENTDETECTOR_H_
|
#!/bin/bash
set -e
set -u
if [ -e ./firmadyne.config ]; then
source ./firmadyne.config
elif [ -e ../firmadyne.config ]; then
source ../firmadyne.config
else
echo "Error: Could not find 'firmadyne.config'!"
exit 1
fi
if check_number $1; then
echo "Usage: makeImage.sh <image ID> [<architecture]"
exit 1
fi
IID=${1}
if check_root; then
echo "Error: This script requires root privileges!"
exit 1
fi
if [ $# -gt 1 ]; then
if check_arch "${2}"; then
echo "Error: Invalid architecture!"
exit 1
fi
ARCH=${2}
else
echo -n "Querying database for architecture... "
ARCH=$(psql -d firmware -U firmadyne -h 127.0.0.1 -t -q -c "SELECT arch from image WHERE id=${1};")
ARCH="${ARCH#"${ARCH%%[![:space:]]*}"}"
echo "${ARCH}"
if [ -z "${ARCH}" ]; then
echo "Error: Unable to lookup architecture. Please specify {armel,mipseb,mipsel} as the second argument!"
exit 1
fi
fi
echo "----Running----"
WORK_DIR=`get_scratch ${IID}`
IMAGE=`get_fs ${IID}`
IMAGE_DIR=`get_fs_mount ${IID}`
CONSOLE=`get_console ${ARCH}`
LIBNVRAM=`get_nvram ${ARCH}`
DEVICE=`get_device`
echo "----Copying Filesystem Tarball----"
mkdir -p "${WORK_DIR}"
chmod a+rwx "${WORK_DIR}"
chown -R "${USER}" "${WORK_DIR}"
chgrp -R "${USER}" "${WORK_DIR}"
if [ ! -e "${WORK_DIR}/${IID}.tar.gz" ]; then
if [ ! -e "${TARBALL_DIR}/${IID}.tar.gz" ]; then
echo "Error: Cannot find tarball of root filesystem for ${IID}!"
exit 1
else
cp "${TARBALL_DIR}/${IID}.tar.gz" "${WORK_DIR}/${IID}.tar.gz"
fi
fi
echo "----Creating QEMU Image----"
qemu-img create -f raw "${IMAGE}" 1G
chmod a+rw "${IMAGE}"
echo "----Creating Partition Table----"
echo -e "o\nn\np\n1\n\n\nw" | /sbin/fdisk "${IMAGE}"
echo "----Mounting QEMU Image----"
kpartx -a -s -v "${IMAGE}"
sleep 1
echo "----Creating Filesystem----"
mkfs.ext2 "${DEVICE}"
sync
echo "----Making QEMU Image Mountpoint----"
if [ ! -e "${IMAGE_DIR}" ]; then
mkdir "${IMAGE_DIR}"
chown "${USER}" "${IMAGE_DIR}"
fi
echo "----Mounting QEMU Image Partition 1----"
mount "${DEVICE}" "${IMAGE_DIR}"
echo "----Extracting Filesystem Tarball----"
tar -xf "${WORK_DIR}/$IID.tar.gz" -C "${IMAGE_DIR}"
rm "${WORK_DIR}/${IID}.tar.gz"
echo "----Creating FIRMADYNE Directories----"
mkdir "${IMAGE_DIR}/firmadyne/"
mkdir "${IMAGE_DIR}/firmadyne/libnvram/"
mkdir "${IMAGE_DIR}/firmadyne/libnvram.override/"
echo "----Patching Filesystem (chroot)----"
cp $(which busybox) "${IMAGE_DIR}"
cp "${SCRIPT_DIR}/fixImage.sh" "${IMAGE_DIR}"
chroot "${IMAGE_DIR}" /busybox ash /fixImage.sh
rm "${IMAGE_DIR}/fixImage.sh"
rm "${IMAGE_DIR}/busybox"
echo "----Setting up FIRMADYNE----"
cp "${CONSOLE}" "${IMAGE_DIR}/firmadyne/console"
chmod a+x "${IMAGE_DIR}/firmadyne/console"
mknod -m 666 "${IMAGE_DIR}/firmadyne/ttyS1" c 4 65
cp "${LIBNVRAM}" "${IMAGE_DIR}/firmadyne/libnvram.so"
chmod a+x "${IMAGE_DIR}/firmadyne/libnvram.so"
cp "${SCRIPT_DIR}/preInit.sh" "${IMAGE_DIR}/firmadyne/preInit.sh"
chmod a+x "${IMAGE_DIR}/firmadyne/preInit.sh"
echo "----Unmounting QEMU Image----"
sync
umount "${DEVICE}"
kpartx -d "${IMAGE}"
losetup -d "${DEVICE}" &>/dev/null
dmsetup remove $(basename "$DEVICE") &>/dev/null
|
#!/bin/bash
KEY="mykey"
TESTKEY="test"
CHAINID="ethermint-100"
MONIKER="localtestnet"
# stop and remove existing daemon and client data and process(es)
rm -rf $PWD/.ethermint*
pkill -f "ethermint*"
type "ethermintd" 2> /dev/null || make build-ethermint
type "ethermintcli" 2> /dev/null || make build-ethermint
$PWD/build/ethermintcli config keyring-backend test
# Set up config for CLI
$PWD/build/ethermintcli config chain-id $CHAINID
$PWD/build/ethermintcli config output json
$PWD/build/ethermintcli config indent true
$PWD/build/ethermintcli config trust-node true
# if $KEY exists it should be deleted
$PWD/build/ethermintcli keys add $KEY
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
$PWD/build/ethermintd init $MONIKER --chain-id $CHAINID
# Change parameter token denominations to aphoton
cat $HOME/.ethermintd/config/genesis.json | jq '.app_state["staking"]["params"]["bond_denom"]="aphoton"' > $HOME/.ethermintd/config/tmp_genesis.json && mv $HOME/.ethermintd/config/tmp_genesis.json $HOME/.ethermintd/config/genesis.json
cat $HOME/.ethermintd/config/genesis.json | jq '.app_state["crisis"]["constant_fee"]["denom"]="aphoton"' > $HOME/.ethermintd/config/tmp_genesis.json && mv $HOME/.ethermintd/config/tmp_genesis.json $HOME/.ethermintd/config/genesis.json
cat $HOME/.ethermintd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.ethermintd/config/tmp_genesis.json && mv $HOME/.ethermintd/config/tmp_genesis.json $HOME/.ethermintd/config/genesis.json
cat $HOME/.ethermintd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.ethermintd/config/tmp_genesis.json && mv $HOME/.ethermintd/config/tmp_genesis.json $HOME/.ethermintd/config/genesis.json
# Enable faucet
cat $HOME/.ethermintd/config/genesis.json | jq '.app_state["faucet"]["enable_faucet"]=true' > $HOME/.ethermintd/config/tmp_genesis.json && mv $HOME/.ethermintd/config/tmp_genesis.json $HOME/.ethermintd/config/genesis.json
# Allocate genesis accounts (cosmos formatted addresses)
$PWD/build/ethermintd add-genesis-account "$("$PWD"/build/ethermintcli keys show "$KEY$i" -a)" 100000000000000000000aphoton
# Sign genesis transaction
$PWD/build/ethermintd gentx --name $KEY --amount=1000000000000000000aphoton --keyring-backend test
# Collect genesis tx
$PWD/build/ethermintd collect-gentxs
# Run this to ensure everything worked and that the genesis file is setup correctly
$PWD/build/ethermintd validate-genesis
# Start the node (remove the --pruning=nothing flag if historical queries are not needed) in background and log to file
$PWD/build/ethermintd start --pruning=nothing --rpc.unsafe --log_level "main:info,state:info,mempool:info" --trace > ethermintd.log &
sleep 1
# Start the rest server with unlocked faucet key in background and log to file
$PWD/build/ethermintcli rest-server --laddr "tcp://localhost:8545" --unlock-key $KEY --chain-id $CHAINID --trace > ethermintcli.log &
solcjs --abi $PWD/tests-solidity/suites/basic/contracts/Counter.sol --bin -o $PWD/tests-solidity/suites/basic/counter
mv $PWD/tests-solidity/suites/basic/counter/*.abi $PWD/tests-solidity/suites/basic/counter/counter_sol.abi
mv $PWD/tests-solidity/suites/basic/counter/*.bin $PWD/tests-solidity/suites/basic/counter/counter_sol.bin
ACCT=$(curl --fail --silent -X POST --data '{"jsonrpc":"2.0","method":"eth_accounts","params":[],"id":1}' -H "Content-Type: application/json" http://localhost:8545 | grep -o '\0x[^"]*' 2>&1)
echo $ACCT
curl -X POST --data '{"jsonrpc":"2.0","method":"personal_unlockAccount","params":["'$ACCT'", ""],"id":1}' -H "Content-Type: application/json" http://localhost:8545
PRIVKEY="$("$PWD"/build/ethermintcli keys unsafe-export-eth-key $KEY)"
echo $PRIVKEY
## need to get the private key from the account in order to check this functionality.
cd tests-solidity/suites/basic/ && go get && go run main.go $ACCT
|
package all
import (
_ "github.com/direct-connect/go-dcpp/hub/plugins/hubstats"
_ "github.com/direct-connect/go-dcpp/hub/plugins/myip"
// LUA is loaded the last
_ "github.com/direct-connect/go-dcpp/hub/plugins/lua"
_ "github.com/direct-connect/go-dcpp/hub/plugins/lua/px"
)
|
# -*- encoding: utf-8 -*-
# this is required because of the use of eval interacting badly with require_relative
require 'razor/acceptance/utils'
require 'yaml'
confine :except, :roles => %w{master dashboard database frictionless}
test_name 'Configure Razor server for basic authentication'
step 'https://testrail.ops.puppetlabs.net/index.php?/cases/view/259'
config_yaml = '/opt/puppetlabs/server/apps/razor-server/config-defaults.yaml'
shiro_ini = '/etc/puppetlabs/razor-server/shiro.ini'
agents.each do |agent|
begin
step "Enable authentication on #{agent}"
with_backup_of(agent, config_yaml) do |config_tmpdir|
config = on(agent, "cat #{config_yaml}").output
yaml = YAML.load(config)
yaml['all']['auth']['enabled'] = true
config = YAML.dump(yaml)
File.open(File::join(config_tmpdir, 'new-config.yaml'), 'w') {|f| f.write(config) }
step "Copy modified config.yaml to #{agent}"
scp_to agent, File::join(config_tmpdir, 'new-config.yaml'), config_yaml
on agent, "chmod +r #{config_yaml}"
step "Verify shiro on #{agent}"
verify_shiro_default(agent)
with_backup_of(agent, shiro_ini) do |_|
step "Restart Razor Service on #{agent}"
# the redirect to /dev/null is to work around a bug in the init script or
# service, per: https://tickets.puppetlabs.com/browse/RAZOR-247
restart_razor_service(agent, "https://razor:razor@#{agent}:8151/api")
step "Verify authentication on #{agent}"
text = on(agent, "razor -u https://bad_username:bad_password@#{agent}:8151/api", acceptable_exit_codes: 1).output
assert_match(/Credentials are required/, text,
'The request should be unauthorized')
end
end
rescue => e
puts "Error: #{e}"
raise e
ensure
step "Restart Razor Service to revert authentication on #{agent}"
# the redirect to /dev/null is to work around a bug in the init script or
# service, per: https://tickets.puppetlabs.com/browse/RAZOR-247
restart_razor_service(agent)
step "Verify restart was successful on #{agent}"
agents.each do |agent|
text = on(agent, "razor").output
assert_match(/Collections:/, text,
'The help information should be displayed again')
end
end
end
|
public interface HotelSystem {
// Methods to check-in, check-out, and list all guests
public void checkIn(String name, Room room);
public void checkOut(String name);
public void printGuests();
// Methods to book, cancel, and list all rooms
public void bookRoom(String name, Room room);
public void cancelRoom(String name);
public void printRooms();
// Method to get information about a specific room
public Room getRoomInfo(int roomNumber);
} |
<filename>streampipes-pipeline-management/src/main/java/org/apache/streampipes/manager/matching/ConnectionStorageHandler.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.streampipes.manager.matching;
import org.apache.streampipes.manager.util.TreeUtils;
import org.apache.streampipes.model.SpDataStream;
import org.apache.streampipes.model.base.InvocableStreamPipesEntity;
import org.apache.streampipes.model.base.NamedStreamPipesEntity;
import org.apache.streampipes.model.client.connection.Connection;
import org.apache.streampipes.model.pipeline.Pipeline;
import org.apache.streampipes.storage.management.StorageDispatcher;
public class ConnectionStorageHandler {
private Pipeline pipeline;
private InvocableStreamPipesEntity rootPipelineElement;
public ConnectionStorageHandler(Pipeline pipeline,
InvocableStreamPipesEntity rootPipelineElement) {
this.pipeline = pipeline;
this.rootPipelineElement = rootPipelineElement;
}
public void storeConnection() {
String fromId = rootPipelineElement.getConnectedTo().get(rootPipelineElement.getConnectedTo().size() - 1);
NamedStreamPipesEntity sepaElement = TreeUtils.findSEPAElement(fromId, pipeline.getSepas(), pipeline.getStreams());
String sourceId;
if (sepaElement instanceof SpDataStream) {
sourceId = sepaElement.getElementId();
} else {
sourceId = ((InvocableStreamPipesEntity) sepaElement).getBelongsTo();
}
Connection connection = new Connection(sourceId, rootPipelineElement.getBelongsTo());
StorageDispatcher.INSTANCE.getNoSqlStore().getConnectionStorageApi().addConnection(connection);
}
}
|
'use strict';
(function(window) {
var observers = {},
// Set default message size with 300KB
settings = {
'dom.mms.operatorSizeLimitation' : 300,
'homegesture.enabled': false,
'software-button.enabled': false
},
removedObservers = {},
requests = [];
function mns_mLockSet(obj) {
// Set values.
for (var key in obj) {
settings[key] = obj[key];
}
// Trigger observers to mimic real mozSettings implementation.
for (var key in obj) {
mns_mTriggerObservers(
key,
{ settingName: key, settingValue: obj[key] }
);
}
var req = {
onsuccess: null,
onerror: null
};
setTimeout(function() {
if (req.onsuccess) {
req.onsuccess();
}
});
return req;
}
function mns_clearRequests() {
requests = [];
}
function mns_mReplyToRequests() {
try {
requests.forEach(function(request) {
if (request.onsuccess) {
request.onsuccess({
target: request
});
}
});
}
finally {
requests = [];
}
}
function mns_mLockGet(key) {
var resultObj = {};
resultObj[key] = settings[key];
var settingsRequest = {
result: resultObj,
addEventListener: function(name, cb) {
settingsRequest['on' + name] = cb;
}
};
if (!MockNavigatorSettings.mSyncRepliesOnly) {
setTimeout(function() {
if (settingsRequest.onsuccess) {
settingsRequest.onsuccess();
}
});
} else {
requests.push(settingsRequest);
}
return settingsRequest;
}
function mns_addObserver(name, cb) {
observers[name] = observers[name] || [];
observers[name].push(cb);
}
function mns_removeObserver(name, cb) {
removedObservers[name] = removedObservers[name] || [];
removedObservers[name].push(cb);
var index = observers[name].indexOf(cb);
if (index > -1) {
observers[name].splice(index, 1);
}
}
function mns_createLock() {
return {
set: mns_mLockSet,
get: mns_mLockGet
};
}
function mns_mTriggerObservers(name, args) {
var theseObservers = observers[name];
if (!theseObservers) {
return;
}
theseObservers.forEach(function(func) {
func(args);
});
}
function mns_teardown() {
observers = {};
settings = {};
removedObservers = {};
requests = [];
}
window.MockNavigatorSettings = {
addObserver: mns_addObserver,
removeObserver: mns_removeObserver,
createLock: mns_createLock,
mClearRequests: mns_clearRequests,
mReplyToRequests: mns_mReplyToRequests,
mTriggerObservers: mns_mTriggerObservers,
mTeardown: mns_teardown,
mSyncRepliesOnly: false,
get mObservers() {
return observers;
},
get mSettings() {
return settings;
},
get mRemovedObservers() {
return removedObservers;
},
get mRequests() {
return requests;
}
};
})(this);
|
<reponame>PlayingIO/playing-team-services
const fp = require('mostly-func');
const { helpers } = require('mostly-feathers-mongoose');
const { createTeamActivity, membersNotifications } = require('../../helpers');
// lock team activity
const lockTeam = (context) => {
const team = helpers.getHookData(context);
const actor = helpers.getCurrentUser(context);
if (!team || !actor) return;
const notifications = membersNotifications(team.members);
const custom = {
actor: `user:${actor}`,
verb: 'group.lock',
message: 'Team is locked',
};
return [
createTeamActivity(context, team, custom),
`user:${actor}`, // add to actor's activity log
`team:${team.id}`, // add to mission's activity log
notifications // add to members' notification stream
];
};
// unlock team activity
const unlockTeam = (context) => {
const team = helpers.getHookData(context);
const actor = helpers.getCurrentUser(context);
if (!team || !actor) return;
const notifications = membersNotifications(team.members);
const custom = {
actor: `user:${actor}`,
verb: 'group.unlock',
message: 'Team is unlocked',
};
return [
createTeamActivity(context, team, custom),
`user:${actor}`, // add to actor's activity log
`team:${team.id}`, // add to mission's activity log
notifications // add to members' notification stream
];
};
module.exports = {
'group.lock': lockTeam,
'group.unlock': unlockTeam
};
|
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
set -v # print commands as they're executed
# aliases aren't expanded in non-interactive shells by default.
shopt -s expand_aliases
# Instead of exiting on any failure with "set -e", we'll call set_status after
# each command and exit $STATUS at the end.
STATUS=0
function set_status() {
local last_status=$?
if [[ $last_status -ne 0 ]]
then
echo "<<<<<<FAILED>>>>>> Exit code: $last_status"
fi
STATUS=$(($last_status || $STATUS))
}
# Check env vars set
echo "${TF_VERSION:?}" && \
echo "${TRAX_TEST:?}" && \
echo "${TRAVIS_PYTHON_VERSION:?}"
set_status
if [[ $STATUS -ne 0 ]]
then
exit $STATUS
fi
# Check import.
python -c "import trax"
set_status
# # Run pytest with coverage.
# alias pytest='coverage run -m pytest'
# Check tests, separate out directories for easy triage.
if [[ "${TRAX_TEST}" == "lib" ]]
then
## Core Trax and Supervised Learning
# Disabled the decoding test for now, since it OOMs.
# TODO(afrozm): Add the decoding_test.py back again.
# training_test and trainer_lib_test parse flags, so can't use with --ignore
pytest \
--ignore=trax/supervised/trainer_lib_test.py \
--ignore=trax/supervised/training_test.py \
--ignore=trax/supervised/decoding_test.py \
--ignore=trax/supervised/decoding_timing_test.py \
trax/supervised
set_status
# Testing these separately here.
pytest \
trax/supervised/trainer_lib_test.py \
trax/supervised/training_test.py
set_status
pytest trax/data
set_status
# Ignoring acceleration_test's test_chunk_grad_memory since it is taking a
# lot of time on OSS.
pytest \
--deselect=trax/layers/acceleration_test.py::AccelerationTest::test_chunk_grad_memory \
--deselect=trax/layers/acceleration_test.py::AccelerationTest::test_chunk_memory \
--ignore=trax/layers/initializers_test.py \
trax/layers
set_status
pytest trax/layers/initializers_test.py
set_status
pytest trax/fastmath
set_status
pytest trax/optimizers
set_status
# Catch-all for futureproofing.
pytest \
--ignore=trax/trax2keras_test.py \
--ignore=trax/data \
--ignore=trax/fastmath \
--ignore=trax/layers \
--ignore=trax/models \
--ignore=trax/optimizers \
--ignore=trax/rl \
--ignore=trax/supervised \
--ignore=trax/tf_numpy
set_status
else
# Models, RL and misc right now.
## Models
# Disabled tests are quasi integration tests.
pytest \
--ignore=trax/models/reformer/reformer_e2e_test.py \
--ignore=trax/models/reformer/reformer_memory_test.py \
--ignore=trax/models/reformer/reformer_oom_test.py \
trax/models
set_status
## RL Trax
pytest trax/rl
set_status
## Trax2Keras
pytest trax/trax2keras_test.py
set_status
# Check notebooks.
# TODO(afrozm): Add more.
jupyter nbconvert --ExecutePreprocessor.kernel_name=python3 \
--ExecutePreprocessor.timeout=600 --to notebook --execute \
trax/intro.ipynb;
set_status
fi
# TODO(traxers): Test tf-numpy separately.
exit $STATUS
|
package com.java.study.algorithm.zuo.cadvanced.advanced_class_07;
/**
* 回文最小分割数
* 【题目】 给定两个字符串str,请把str切割,保证每一部分都是回文串,求最小的分割 数。
* 【举例】 str="AA12321BB",切成"AA","12321","BB",每一部分都是回文串,分出3个 部分,所以返回3
*/
public class Code_06_PalindromeMinCut{
} |
#include <catch/catch.hpp>
#if LINGO_TEST_SPLIT
#include <lingo/page/iso_8859.hpp>
#include <lingo/page/unicode.hpp>
#include <lingo/page/intermediate.hpp>
#include <lingo/page/point_mapper.hpp>
#else
#include <lingo/test/include_all.hpp>
#endif
#include <fstream>
#include <iomanip>
#include <limits>
#include <map>
#include <sstream>
#include <tuple>
#include <type_traits>
namespace
{
template <typename Page, std::size_t Part>
struct page_description
{
using page_type = Page;
static LINGO_CONSTEXPR11 std::size_t part_index = Part;
};
using test_pages = std::tuple<
lingo::page::iso_8859<1>,
lingo::page::iso_8859<2>,
lingo::page::iso_8859<3>,
lingo::page::iso_8859<4>,
lingo::page::iso_8859<5>,
lingo::page::iso_8859<6>,
lingo::page::iso_8859<7>,
lingo::page::iso_8859<8>,
lingo::page::iso_8859<9>,
lingo::page::iso_8859<10>,
lingo::page::iso_8859<11>,
lingo::page::iso_8859<13>,
lingo::page::iso_8859<14>,
lingo::page::iso_8859<15>,
lingo::page::iso_8859<16>>;
}
TEMPLATE_LIST_TEST_CASE("iso_8859 types are correctly defined", "", test_pages)
{
REQUIRE(lingo::page::iso_8859<1>::part_index == 1);
REQUIRE(lingo::page::iso_8859<2>::part_index == 2);
REQUIRE(lingo::page::iso_8859<3>::part_index == 3);
REQUIRE(lingo::page::iso_8859<4>::part_index == 4);
REQUIRE(lingo::page::iso_8859<5>::part_index == 5);
REQUIRE(lingo::page::iso_8859<6>::part_index == 6);
REQUIRE(lingo::page::iso_8859<7>::part_index == 7);
REQUIRE(lingo::page::iso_8859<8>::part_index == 8);
REQUIRE(lingo::page::iso_8859<9>::part_index == 9);
REQUIRE(lingo::page::iso_8859<10>::part_index == 10);
REQUIRE(lingo::page::iso_8859<11>::part_index == 11);
REQUIRE(lingo::page::iso_8859<13>::part_index == 13);
REQUIRE(lingo::page::iso_8859<14>::part_index == 14);
REQUIRE(lingo::page::iso_8859<15>::part_index == 15);
REQUIRE(lingo::page::iso_8859<16>::part_index == 16);
REQUIRE(lingo::page::iso_8859_1::part_index == 1);
REQUIRE(lingo::page::iso_8859_2::part_index == 2);
REQUIRE(lingo::page::iso_8859_3::part_index == 3);
REQUIRE(lingo::page::iso_8859_4::part_index == 4);
REQUIRE(lingo::page::iso_8859_5::part_index == 5);
REQUIRE(lingo::page::iso_8859_6::part_index == 6);
REQUIRE(lingo::page::iso_8859_7::part_index == 7);
REQUIRE(lingo::page::iso_8859_8::part_index == 8);
REQUIRE(lingo::page::iso_8859_9::part_index == 9);
REQUIRE(lingo::page::iso_8859_10::part_index == 10);
REQUIRE(lingo::page::iso_8859_11::part_index == 11);
REQUIRE(lingo::page::iso_8859_13::part_index == 13);
REQUIRE(lingo::page::iso_8859_14::part_index == 14);
REQUIRE(lingo::page::iso_8859_15::part_index == 15);
REQUIRE(lingo::page::iso_8859_16::part_index == 16);
}
TEMPLATE_LIST_TEST_CASE("iso_8859 can be mapped to and from unicode", "", test_pages)
{
using iso_page_type = TestType;
using unicode_page_type = lingo::page::unicode_default;
using iso_point_type = typename iso_page_type::point_type;
using unicode_point_type = typename unicode_page_type::point_type;
using iso_unicode_point_mapper = lingo::page::point_mapper<iso_page_type, unicode_page_type>;
using unicode_iso_point_mapper = lingo::page::point_mapper<unicode_page_type, iso_page_type>;
LINGO_CONSTEXPR11 std::size_t part_index = iso_page_type::part_index;
std::stringstream file_name;
file_name << LINGO_SPEC_DIRECTORY;
file_name << "/unicode/MAPPINGS/ISO8859/8859-";
file_name << part_index;
file_name << ".TXT";
std::ifstream mapping_file(file_name.str());
REQUIRE(mapping_file.is_open());
std::map<uint_least32_t, uint_least32_t> to_unicode;
std::map<uint_least32_t, uint_least32_t> from_unicode;
while (!mapping_file.eof())
{
if (mapping_file.peek() != '0')
{
mapping_file.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
continue;
}
uint_least32_t iso_point, unicode_point;
mapping_file >> std::hex >> iso_point;
mapping_file >> std::hex >> unicode_point;
mapping_file.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
REQUIRE(mapping_file.good());
to_unicode.emplace(iso_point, unicode_point);
from_unicode.emplace(unicode_point, iso_point);
}
// To unicode
for (std::size_t i = 0; i < iso_page_type::point_range; ++i)
{
const iso_point_type iso_point = static_cast<iso_point_type>(i);
const auto unicode_result = iso_page_type::template map_to<unicode_page_type>(iso_point);
const auto mapped_unicode_result = iso_unicode_point_mapper::map(iso_point);
const auto it = to_unicode.find(iso_point);
if (it != to_unicode.end())
{
REQUIRE(unicode_result.error == lingo::error::error_code::success);
REQUIRE(unicode_result.point == static_cast<unicode_point_type>(it->second));
REQUIRE(mapped_unicode_result.error == lingo::error::error_code::success);
REQUIRE(mapped_unicode_result.point == static_cast<unicode_point_type>(it->second));
}
else
{
REQUIRE(unicode_result.error == lingo::error::error_code::no_mapping);
REQUIRE(mapped_unicode_result.error == lingo::error::error_code::no_mapping);
}
}
// From unicode
for (std::size_t i = 0; i < unicode_page_type::point_range; ++i)
{
const unicode_point_type unicode_point = static_cast<unicode_point_type>(i);
const auto iso_result = iso_page_type::template map_from<unicode_page_type>(unicode_point);
const auto mapped_iso_result = unicode_iso_point_mapper::map(unicode_point);
const auto it = from_unicode.find(unicode_point);
if (it != from_unicode.end())
{
REQUIRE(iso_result.error == lingo::error::error_code::success);
REQUIRE(iso_result.point == static_cast<iso_point_type>(it->second));
REQUIRE(mapped_iso_result.error == lingo::error::error_code::success);
REQUIRE(mapped_iso_result.point == static_cast<unicode_point_type>(it->second));
}
else
{
REQUIRE(iso_result.error == lingo::error::error_code::no_mapping);
REQUIRE(mapped_iso_result.error == lingo::error::error_code::no_mapping);
}
}
} |
#!/bin/bash
# This file is meant to be included by the parent cppbuild.sh script
if [[ -z "$PLATFORM" ]]; then
pushd ..
bash cppbuild.sh "$@" spinnaker
popd
exit
fi
SPINNAKER_VERSION=1.15.0.63
case $PLATFORM in
linux-arm*)
if [[ ! -f "../../downloads/spinnaker.${SPINNAKER_VERSION}_armhf.tar.gz" ]]; then
echo "Please place spinnaker.${SPINNAKER_VERSION}_armhf.tar.gz in the downloads directory"
exit 1
fi
echo "Decompressing archives..."
tar -xzf ../../downloads/spinnaker.${SPINNAKER_VERSION}_armhf.tar.gz
rm -Rf $PLATFORM
mv spinnaker.${SPINNAKER_VERSION}_armhf $PLATFORM
mv $PLATFORM/lib/C/* $PLATFORM/lib
mv $PLATFORM/include/C/* $PLATFORM/include
;;
linux-x86*)
if [[ ! -d "/usr/include/spinnaker/" ]]; then
echo "Please install Spinnaker under the default installation directory"
exit 1
fi
;;
windows-*)
if [[ ! -d "/C/Program Files/Point Grey Research/" ]]; then
echo "Please install Spinnaker under the default installation directory"
exit 1
fi
;;
*)
echo "Error: Platform \"$PLATFORM\" is not supported"
;;
esac
|
#!/bin/bash
set -e
lerna run build --scope @oyster/common --scope web
lerna run export --scope web
readonly BUCKET_DEV='momenti-staging-nft-0'
readonly DESTINATION="gs://${BUCKET_DEV}/"
gsutil -m -h "Cache-Control:private, max-age=0, no-store no-transform" rsync -R ./build/web ${DESTINATION}
|
export const contractAddress = "0x9e8538B28228fc53f4824701ACb35778f9882067"
export const ownerAddress = "0xe4b7fd374470267c531D2B9aDA64046ABb3E6Fdb"
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jena.tdb.base.objectfile;
import static org.apache.jena.tdb.sys.SystemTDB.ObjectFileWriteCacheSize ;
import static org.apache.jena.tdb.sys.SystemTDB.SizeOfInt ;
import java.nio.ByteBuffer ;
import java.util.Iterator ;
import org.apache.jena.atlas.iterator.Iter ;
import org.apache.jena.atlas.iterator.IteratorSlotted ;
import org.apache.jena.atlas.lib.Pair ;
import org.apache.jena.atlas.logging.Log ;
import org.apache.jena.tdb.base.block.Block ;
import org.apache.jena.tdb.base.file.BufferChannel ;
import org.apache.jena.tdb.base.file.FileException ;
import org.apache.jena.tdb.sys.SystemTDB ;
import org.slf4j.Logger ;
import org.slf4j.LoggerFactory ;
/** Variable length ByteBuffer file on disk.
* Buffering for delayed writes.
*/
public class ObjectFileStorage implements ObjectFile
{
private static Logger log = LoggerFactory.getLogger(ObjectFileStorage.class) ;
public static boolean logging = false ;
private void log(String fmt, Object... args)
{
if ( ! logging ) return ;
log.debug(state()+" "+String.format(fmt, args)) ;
}
/*
* No synchronization - assumes that the caller has some appropriate lock
* because the combination of file and cache operations needs to be thread safe.
*
* The position of the channel is assumed to be the end of the file always.
* Read operations are done with absolute channel calls,
* which do not reset the position.
*
* Writing is buffered.
*/
// The object length slot.
private ByteBuffer lengthBuffer = ByteBuffer.allocate(SizeOfInt) ;
// Delayed write buffer.
private final ByteBuffer writeBuffer ;
private final BufferChannel file ; // Access to storage
private long filesize ; // Size of on-disk.
// Two-step write - alloc, write
private boolean inAllocWrite = false ;
private Block allocBlock = null ;
private long allocLocation = -1 ;
// Old values for abort.
int oldBufferPosn = -1 ;
int oldBufferLimit = -1 ;
public ObjectFileStorage(BufferChannel file)
{
this(file, ObjectFileWriteCacheSize) ;
}
public ObjectFileStorage(BufferChannel file, int bufferSize)
{
this.file = file ;
filesize = file.size() ;
this.file.position(filesize) ; // End of file.
log("File size: 0x%X, posn: 0x%X", filesize, file.position()) ;
writeBuffer = (bufferSize >= 0) ? ByteBuffer.allocate(bufferSize) : null ;
}
@Override
public long write(ByteBuffer bb)
{
log("W") ;
if ( inAllocWrite )
Log.fatal(this, "In the middle of an alloc-write") ;
inAllocWrite = false ;
if ( writeBuffer == null )
{
long x = rawWrite(bb) ;
log("W -> 0x%X", x);
return x ;
}
int len = bb.limit() - bb.position() ;
int spaceNeeded = len + SizeOfInt ;
if ( writeBuffer.position()+spaceNeeded > writeBuffer.capacity() )
// No room - flush.
flushOutputBuffer() ;
if ( writeBuffer.position()+spaceNeeded > writeBuffer.capacity() )
{
long x = rawWrite(bb) ;
if ( logging )
log("W -> 0x%X", x);
return x ;
}
long loc = writeBuffer.position()+filesize ;
writeBuffer.putInt(len) ;
writeBuffer.put(bb) ;
if ( logging )
log("W -> 0x%X", loc);
return loc ;
}
private long rawWrite(ByteBuffer bb)
{
if ( logging )
log("RW %s", bb) ;
int len = bb.limit() - bb.position() ;
lengthBuffer.rewind() ;
lengthBuffer.putInt(len) ;
lengthBuffer.flip() ;
long location = file.position() ;
file.write(lengthBuffer) ;
int x = file.write(bb) ;
if ( x != len )
throw new FileException() ;
filesize = filesize+x+SizeOfInt ;
if ( logging )
{
log("Posn: %d", file.position());
log("RW ->0x%X",location) ;
}
return location ;
}
@Override
public Block allocWrite(int bytesSpace)
{
//log.info("AW("+bytesSpace+"):"+state()) ;
if ( inAllocWrite )
Log.fatal(this, "In the middle of an alloc-write") ;
// Include space for length.
int spaceRequired = bytesSpace + SizeOfInt ;
// Find space.
if ( writeBuffer != null && spaceRequired > writeBuffer.remaining() )
flushOutputBuffer() ;
if ( writeBuffer == null || spaceRequired > writeBuffer.remaining() )
{
// Too big. Have flushed buffering if buffering.
inAllocWrite = true ;
ByteBuffer bb = ByteBuffer.allocate(bytesSpace) ;
allocBlock = new Block(filesize, bb) ;
allocLocation = -1 ;
//log.info("AW:"+state()+"-> ----") ;
return allocBlock ;
}
// Will fit.
inAllocWrite = true ;
int start = writeBuffer.position() ;
// Old values for restoration
oldBufferPosn = start ;
oldBufferLimit = writeBuffer.limit() ;
// id (but don't tell the caller yet).
allocLocation = filesize+start ;
// Slice it.
writeBuffer.putInt(bytesSpace) ;
writeBuffer.position(start + SizeOfInt) ;
writeBuffer.limit(start+spaceRequired) ;
ByteBuffer bb = writeBuffer.slice() ;
allocBlock = new Block(allocLocation, bb) ;
if ( logging )
log("AW: %s->0x%X", state(), allocLocation) ;
return allocBlock ;
}
@Override
public void completeWrite(Block block)
{
if ( logging )
log("CW: %s @0x%X",block, allocLocation) ;
if ( ! inAllocWrite )
throw new FileException("Not in the process of an allocated write operation pair") ;
if ( allocBlock != null && ( allocBlock.getByteBuffer() != block.getByteBuffer() ) )
throw new FileException("Wrong byte buffer in an allocated write operation pair") ;
inAllocWrite = false ;
ByteBuffer buffer = block.getByteBuffer() ;
if ( allocLocation == -1 )
{
// It was too big to use the buffering.
rawWrite(buffer) ;
return ;
}
// Write area is 0 -> limit
if ( 0 != buffer.position() )
log.warn("ObjectFleStorage: position != 0") ;
buffer.position(0) ;
int actualLength = buffer.limit()-buffer.position() ;
// Insert object length
int idx = (int)(allocLocation-filesize) ;
writeBuffer.putInt(idx, actualLength) ;
// And bytes to idx+actualLength+4 are used
allocBlock = null ;
int newLen = idx+actualLength+4 ;
writeBuffer.position(newLen);
writeBuffer.limit(writeBuffer.capacity()) ;
allocLocation = -1 ;
oldBufferPosn = -1 ;
oldBufferLimit = -1 ;
}
@Override
public void abortWrite(Block block)
{
allocBlock = null ;
int oldstart = (int)(allocLocation-filesize) ;
if ( oldstart != oldBufferPosn)
throw new FileException("Wrong reset point: calc="+oldstart+" : expected="+oldBufferPosn) ;
writeBuffer.position(oldstart) ;
writeBuffer.limit(oldBufferLimit) ;
allocLocation = -1 ;
oldBufferPosn = -1 ;
oldBufferLimit = -1 ;
inAllocWrite = false ;
}
private void flushOutputBuffer()
{
if ( logging )
log("Flush") ;
if ( writeBuffer == null ) return ;
if ( writeBuffer.position() == 0 ) return ;
if ( false )
{
String x = getLabel() ;
if ( x.contains("nodes") )
{
long x1 = filesize ;
long x2 = writeBuffer.position() ;
long x3 = x1 + x2 ;
System.out.printf("Flush(%s) : %d/0x%04X (%d/0x%04X) %d/0x%04X\n", getLabel(), x1, x1, x2, x2, x3, x3) ;
}
}
long location = filesize ;
writeBuffer.flip();
int x = file.write(writeBuffer) ;
filesize += x ;
writeBuffer.clear() ;
}
@Override
public void reposition(long posn)
{
if ( inAllocWrite )
throw new FileException("In the middle of an alloc-write") ;
if ( posn < 0 || posn > length() )
throw new IllegalArgumentException("reposition: Bad location: "+posn) ;
flushOutputBuffer() ;
file.truncate(posn) ;
filesize = posn ;
}
@Override
public void truncate(long size)
{
//System.out.println("truncate: "+size+" ("+filesize+","+writeBuffer.position()+")") ;
reposition(size) ;
}
@Override
public ByteBuffer read(long loc)
{
if ( logging )
log("R(0x%X)", loc) ;
if ( inAllocWrite )
throw new FileException("In the middle of an alloc-write") ;
if ( loc < 0 )
throw new IllegalArgumentException("ObjectFile.read["+file.getLabel()+"]: Bad read: "+loc) ;
// Maybe it's in the in the write buffer.
// Maybe the write buffer should keep more structure?
if ( loc >= filesize )
{
if ( loc >= filesize+writeBuffer.position() )
throw new IllegalArgumentException("ObjectFileStorage.read["+file.getLabel()+"]: Bad read: location="+loc+" >= max="+(filesize+writeBuffer.position())) ;
int x = writeBuffer.position() ;
int y = writeBuffer.limit() ;
int offset = (int)(loc-filesize) ;
int len = writeBuffer.getInt(offset) ;
int posn = offset + SizeOfInt ;
// Slice the data bytes,
writeBuffer.position(posn) ;
writeBuffer.limit(posn+len) ;
ByteBuffer bb = writeBuffer.slice() ;
writeBuffer.limit(y) ;
writeBuffer.position(x) ;
return bb ;
}
// No - it's in the underlying file storage.
lengthBuffer.clear() ;
int x = file.read(lengthBuffer, loc) ;
if ( x != 4 )
throw new FileException("ObjectFileStorage.read["+file.getLabel()+"]("+loc+")[filesize="+filesize+"][file.size()="+file.size()+"]: Failed to read the length : got "+x+" bytes") ;
int len = lengthBuffer.getInt(0) ;
// Sanity check.
if ( len > filesize-(loc+SizeOfInt) )
{
String msg = "ObjectFileStorage.read["+file.getLabel()+"]("+loc+")[filesize="+filesize+"][file.size()="+file.size()+"]: Impossibly large object : "+len+" bytes > filesize-(loc+SizeOfInt)="+(filesize-(loc+SizeOfInt)) ;
SystemTDB.errlog.error(msg) ;
throw new FileException(msg) ;
}
ByteBuffer bb = ByteBuffer.allocate(len) ;
if ( len == 0 )
// Zero bytes.
return bb ;
x = file.read(bb, loc+SizeOfInt) ;
bb.flip() ;
if ( x != len )
throw new FileException("ObjectFileStorage.read: Failed to read the object ("+len+" bytes) : got "+x+" bytes") ;
return bb ;
}
@Override
public long length()
{
if ( writeBuffer == null ) return filesize ;
return filesize+writeBuffer.position() ;
}
@Override
public boolean isEmpty()
{
if ( writeBuffer == null ) return filesize == 0 ;
return writeBuffer.position() == 0 && filesize == 0 ;
}
@Override
public void close() { flushOutputBuffer() ; file.close() ; }
@Override
public void sync() { flushOutputBuffer() ; file.sync() ; }
@Override
public String getLabel() { return file.getLabel() ; }
@Override
public String toString() { return file.getLabel() ; }
@Override
public Iterator<Pair<Long, ByteBuffer>> all()
{
flushOutputBuffer() ;
//file.position(0) ;
ObjectIterator iter = new ObjectIterator(0, filesize) ;
//return iter ;
if ( writeBuffer == null || writeBuffer.position() == 0 ) return iter ;
return Iter.concat(iter, new BufferIterator(writeBuffer)) ;
}
private String state()
{
if ( writeBuffer == null )
return String.format(getLabel()+": filesize=0x%X, file=(0x%X, 0x%X)", filesize, file.position(), file.size()) ;
else
return String.format(getLabel()+": filesize=0x%X, file=(0x%X, 0x%X), writeBuffer=(0x%X,0x%X)", filesize, file.position(), file.size(), writeBuffer.position(), writeBuffer.limit()) ;
}
private class BufferIterator extends IteratorSlotted<Pair<Long, ByteBuffer>> implements Iterator<Pair<Long, ByteBuffer>>
{
private ByteBuffer buffer ;
private int posn ;
public BufferIterator(ByteBuffer buffer)
{
this.buffer = buffer ;
this.posn = 0 ;
}
@Override
protected Pair<Long, ByteBuffer> moveToNext()
{
if ( posn >= buffer.limit() )
return null ;
int x = buffer.getInt(posn) ;
posn += SystemTDB.SizeOfInt ;
ByteBuffer bb = ByteBuffer.allocate(x) ;
int p = buffer.position() ;
buffer.position(posn) ;
buffer.get(bb.array()) ;
buffer.position(p);
posn += x ;
return new Pair<>((long)x, bb) ;
}
@Override
protected boolean hasMore()
{
return posn < buffer.limit();
}
}
private class ObjectIterator implements Iterator<Pair<Long, ByteBuffer>>
{
final private long start ;
final private long finish ;
private long current ;
public ObjectIterator(long start, long finish)
{
this.start = start ;
this.finish = finish ;
this.current = start ;
}
@Override
public boolean hasNext()
{
return ( current < finish ) ;
}
@Override
public Pair<Long, ByteBuffer> next()
{
// read, but reserving the file position.
long x = current ;
long filePosn = file.position() ;
ByteBuffer bb = read(current) ;
file.position(filePosn) ;
current = current + bb.limit() + 4 ;
return new Pair<>(x, bb) ;
}
@Override
public void remove()
{ throw new UnsupportedOperationException() ; }
}
}
|
package com.siyuan.enjoyreading.api.request;
import java.io.File;
public class HttpRequestCallback {
/**
* 网络请求回调
*/
public interface Callback {
/**
* 开始调用网络请求
*/
void onStart();
/**
* 结果回调
*
* @param result 结果
*/
void onResponse(com.lzy.okgo.model.Response result);
/**
* 错误回调
*
* @param error 错误提示
*/
void onError(String error);
/**
* 结束请求
*/
void onFinish();
}
/**
* 下载回调
*/
public interface FileCallback {
/**
* 进度
*
* @param progress 进度0.00 - 0.50 - 1.00
* @param total 文件总大小 单位字节
*/
void onProgress(float progress, long total);
/**
* 错误回调
*
* @param error 错误提示
*/
void onError(String error);
/**
* 结果回调
*
* @param file 下载好的文件
*/
void onResponse(File file);
/**
* 请求之前
*/
void onBefore();
}
}
|
#!/usr/bin/env bash
if [[ -z $WORKSPACE ]]; then
echo "This should be run by Jenkins only"
exit 1
fi
set -e
# Checkout supporting scripts
# Used by add_suffix_to_latest_results function above and jenkins/upload-tests.sh
rm -rf jenkins-common
git clone git@github.com:confluentinc/jenkins-common.git
cp $MUCKRAKE_PEM muckrake.pem
. jenkins-common/resources/scripts/extract-iam-credential.sh
set -x
# Immediately flush output when running python
export PYTHONUNBUFFERED=1
TEST_PATH=tests/kafkatest/tests/client
LIBRDKAFKA_BRANCH=master
KAFKA_BRANCH=2.6.0 # Tag
REPO=https://github.com/apache/kafka.git
CACHE=$WORKSPACE/cache # Helps with reusing vagrant cluster
RESULTS=$WORKSPACE/results
KAFKA_DIR=$WORKSPACE/kafka
# Bringing up a Vagrant cluster is slow, so we may want to reuse a preexisting cluster
# These flags provide some control over caching behavior
DESTROY_BEFORE=true # Destroy cluster (if applicable) *before* test run?
# VAGRANT_CLEANUP specifies what action to take after running the tests
NO_ACTION="no_action"
DESTROY="destroy"
SHUTDOWN="shutdown"
VAGRANT_CLEANUP=$DESTROY
# Build python client wheels and deploy on vagrant workers
# Note: a virtualenv must be active.
function build_python_client {
local this_host=`curl http://169.254.169.254/latest/meta-data/local-ipv4`
export DOCKER_HOST="tcp://$this_host:2375"
tools/build-linux-selfcontained.sh $LIBRDKAFKA_BRANCH wheels
# Deploy wheels on workers
confluent_kafka/kafkatest/deploy.sh --prepare $KAFKA_DIR wheels
# Synchronize workers
pushd $KAFKA_DIR
vagrant rsync
popd # $KAFKA_DIR
}
function is_ducktape_session_id {
local string="$1"
if [[ -z "$(echo "$string" | egrep "^[0-9]{4}-[0-9]{2}-[0-9]{2}--[0-9]{3}$")" ]]; then
echo "false"
else
echo "true"
fi
}
# add a suffix which contains additional information such as
# github user, branch, commit id
function add_suffix_to_latest_results {
if [[ -d "$RESULTS" ]]; then
cd $RESULTS
else
return
fi
# easier to reason about state if we get rid of symlink
rm -f latest || true
# most recently modified
latest_name="$(basename "$(ls -tr | tail -1)")"
# We only want to rename latest_name if it is an unadulterated ducktape session id
if [[ "$(is_ducktape_session_id "$latest_name")" == "false" ]]; then
return
fi
suffix="$($WORKSPACE/jenkins-common/scripts/system-tests/kafka-system-test/make-repo-identifier.sh --directory $KAFKA_DIR)"
archive_name="${latest_name}.${suffix}"
mv "$latest_name" "$archive_name"
echo "$BUILD_URL" > "$archive_name/jenkins.txt"
}
trap cleanup EXIT
function cleanup() {
add_suffix_to_latest_results
}
# Return false if at least one node is not running, else return true
function vagrant_alive() {
vagrant status | egrep "(poweroff)|(not)|(stopped)" > /dev/null
result=$?
if [ "x$result" != "x0" ]; then
echo true
else
echo false
fi
}
# Clear results from the last run
# Do this before a test run rather than after so that jenkins can archive test output
rm -rf $RESULTS
# Get kafka and build
if [ ! -d $KAFKA_DIR ]; then
echo "Downloading kafka..."
git clone $REPO $KAFKA_DIR
fi
echo "Checking out $KAFKA_BRANCH ..."
cd $KAFKA_DIR
git pull
git checkout $KAFKA_BRANCH
./gradlew clean assemble systemTestLibs
# Cached vagrant data
if [ -d $CACHE ]; then
cd $CACHE
# Mark cluster for destruction if provisioning script has changed
# TODO - vagrant doesn't seem to deal well with any changes, so
# we might want to be more aggressive with overriding DESTROY_BEFORE
if [ -f vagrant/base.sh ]; then
if [ ! -z `diff vagrant/base.sh $KAFKA_DIR/vagrant/base.sh` ]; then
echo "Vagrant provisioning has changed, so vagrant cluster will not be reused"
DESTROY_BEFORE=true
fi
fi
# Cached VM data
if [ -d .vagrant ]; then
if [ "x$DESTROY_BEFORE" != "xtrue" ]; then
echo "Pulling in cached Vagrant data from previous test run..."
cp -r .vagrant/ $KAFKA_DIR/.vagrant/
fi
fi
fi
echo "Grabbing Vagrantfile.local"
cp $WORKSPACE/jenkins-common/scripts/system-tests/kafka-system-test/Vagrantfile.local $KAFKA_DIR
# The client system tests only need about 12 workers, rather than
# the default 30 (or so). This speeds up test start up times.
sed -i=bak 's/^num_workers.*/num_workers = 12/g' $KAFKA_DIR/Vagrantfile.local
if [ "x$DESTROY_BEFORE" == "xtrue" ]; then
echo "Destroying Vagrant cluster before running tests..."
cd $KAFKA_DIR
vagrant destroy -f || true
fi
# Bring up cluster if necessary
alive=`vagrant_alive`
if [ "x$alive" == "xtrue" ]; then
echo "Vagrant cluster is already running"
echo "Syncing contents of kafka directory to virtual machines..."
vagrant rsync
else
echo "Bringing up cluster..."
if [[ -e vagrant/vagrant-up.sh ]]; then
vagrant/vagrant-up.sh --aws
else
vagrant up --provider=aws --no-parallel --no-provision
echo "Provisioning cluster..."
vagrant provision
fi
fi
# Set up python dependencies
cd $KAFKA_DIR
virtualenv venv
. venv/bin/activate
cd tests
python setup.py develop
# Build Python client
cd $WORKSPACE
build_python_client
# Downgrade bcrypt since 3.2.0 no longer works with Python 2.
# Remove this when ducktape runs on Python 3.
pip install bcrypt==3.1.7
# Run the tests
cd $KAFKA_DIR
python `which ducktape` --debug $TEST_PATH \
--globals tests/confluent-kafka-python/globals.json \
--results-root $RESULTS \
--compress
|
<reponame>debersonpaula/Test1<gh_stars>0
import { Router } from 'express';
import { TNEMAServer, TModel } from 'tnema';
import * as utils from './utils';
/** Authenticated API */
export class TAuthAPI {
private _router: Router;
private _server: TNEMAServer;
private _model: TModel;
/** create authenticated route */
constructor (server: TNEMAServer, document: string, url: string) {
this._server = server;
this._router = server.HttpServer.Router(url);
this._model = server.MongoServer.SearchModel(document);
this.defineRouteAll();
this.defineRouteOne();
this.defineRoutePost();
this.defineRoutePut();
this.defineRouteDelete();
}
private defineRouteAll() {
this._router.get('/', this._server.AuthServer.AuthRoute, (req, res) => {
this._model.find({}, (result, error) => utils.sendjson2(res, result, error));
});
}
private defineRouteOne() {
this._router.get('/:id', this._server.AuthServer.AuthRoute, (req, res) => {
this._model.find({ _id: req.params.id }, (result, error) => utils.sendjson2(res, result, error));
});
}
private defineRoutePost() {
this._router.post('/', this._server.AuthServer.AuthRoute, (req, res) => {
this._model.insert(req.body, (result, error) => utils.sendjson2(res, result, error));
});
}
private defineRoutePut() {
this._router.put('/', this._server.AuthServer.AuthRoute, (req, res) => {
if (req.body._id) {
this._model.updateById(req.body, req.body._id, (result, error) => utils.sendjson2(res, result, error));
} else {
utils.sendjson(res, 400, ['Invalid Request - Id = null']);
}
});
}
private defineRouteDelete() {
this._router.delete('/:_id', this._server.AuthServer.AuthRoute, (req, res) => {
if (req.params._id) {
this._model.delete({_id: req.params._id});
utils.sendjson(res, 200, [true]);
} else {
utils.sendjson(res, 400, ['Invalid Request - Id = null']);
}
});
}
} |
from typing import Union
def to_unicode_not_empty(value) -> Union[str, None]:
if value is not None and value != "":
return str(value)
else:
return None |
#!/usr/bin/env bash
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2017-09-22 17:01:38 +0200 (Fri, 22 Sep 2017)
#
# https://github.com/harisekhon/nagios-plugins
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn and optionally send me feedback to help improve or steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
set -euo pipefail
[ -n "${DEBUG:-}" ] && set -x
srcdir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$srcdir/..";
. ./tests/utils.sh
section "P r e s t o S Q L"
export PRESTO_TERADATA_VERSIONS="latest 0.152 0.157 0.167 0.179"
export PRESTO_VERSIONS="${@:-${PRESTO_VERSIONS:-$PRESTO_TERADATA_VERSIONS}}"
PRESTO_HOST="${DOCKER_HOST:-${PRESTO_HOST:-${HOST:-localhost}}}"
PRESTO_HOST="${PRESTO_HOST##*/}"
PRESTO_HOST="${PRESTO_HOST%%:*}"
export PRESTO_HOST
export PRESTO_WORKER_HOST="$PRESTO_HOST"
export PRESTO_PORT_DEFAULT=8080
export HAPROXY_PORT_DEFAULT=8080
export PRESTO_PORT="${PRESTO_PORT:-$PRESTO_PORT_DEFAULT}"
# only for docker change default port
export PRESTO_WORKER_PORT_DEFAULT=8081
export PRESTO_WORKER_PORT="${PRESTO_WORKER_PORT:-$PRESTO_PORT}"
export PRESTO_ENVIRONMENT="${PRESTO_ENVIRONMENT:-development}"
if [ -z "${NODOCKER:-}" ]; then
check_docker_available
fi
trap_debug_env presto
# recent Facebook releases e.g 0.187 can take a long time eg. 70 secs to fully start up
startupwait 90
presto_worker_tests(){
if ! [ "$PRESTO_HOST" != "$PRESTO_WORKER_HOST" -o "$PRESTO_PORT" != "$PRESTO_WORKER_PORT" ]; then
echo "Presto worker is not a separate host, skipping Presto Worker only checks"
return 0
fi
echo "Now starting Presto Worker tests:"
when_url_content "http://$PRESTO_HOST:$PRESTO_WORKER_PORT/v1/service/general/presto" environment # or "services" which is blank on worker
hr
# not running proxy for workers
#when_url_content "http://$PRESTO_HOST:$HAPROXY_PORT/v1/service/general/presto" environment # or "services" which is blank on worker
#hr
# this info is not available via the Presto worker API
run_fail 3 ./check_presto_version.py --expected "$version(-t.\d+.\d+)?" -P "$PRESTO_WORKER_PORT"
run_fail 2 ./check_presto_coordinator.py -P "$PRESTO_WORKER_PORT"
run ./check_presto_environment.py -P "$PRESTO_WORKER_PORT"
run ./check_presto_environment.py --expected development -P "$PRESTO_WORKER_PORT"
# endpoint only found on Presto 0.128 onwards
if [ "$version" = "latest" -o \
"$version" = "NODOCKER" ] ||
[ "${version#0.}" -ge 128 ]; then
run ./check_presto_state.py -P "$PRESTO_WORKER_PORT"
else
echo "state endpoint not available in this version $version < 0.128, expecting 'critical' 404 status:"
run_404 ./check_presto_state.py -P "$PRESTO_WORKER_PORT"
fi
# doesn't show up as registered for a while, so run this test last and iterate for a little while
max_node_up_wait=20
echo "allowing to $max_node_up_wait secs for worker to be detected as online by the Presto Coordinator:"
retry $max_node_up_wait ./check_presto_worker_nodes.py -w 1
run++
hr
run_fail 3 ./check_presto_worker_node.py --list-nodes
set +o pipefail
worker_node="$(./check_presto_worker_node.py --list-nodes | tail -n 1)"
set -o pipefail
echo "determined presto worker from live running config = '$worker_node'"
hr
echo "lastResponseTime field is not immediately initialized in node data on coordinator, retrying for 10 secs to give node lastResponseTime a chance to be populated"
retry 10 ./check_presto_worker_node.py --node "$worker_node"
run++
hr
# strip https?:// leaving host:port
worker_node="${worker_node/*\/}"
run ./check_presto_worker_node.py --node "$worker_node"
# strip :port leaving just host
worker_node="${worker_node%:*}"
run ./check_presto_worker_node.py --node "$worker_node"
run_fail 2 ./check_presto_worker_node.py --node "nonexistentnode2"
echo "retrying worker nodes failed as this doesn't settle immediately after node addition:"
retry 10 ./check_presto_worker_nodes_failed.py
run++
hr
# will get a 404 Not Found against worker API
run_404 ./check_presto_worker_nodes_failed.py -P "$PRESTO_WORKER_PORT"
if [ "${version#0.}" = 74 ]; then
# gets "UNKNOWN: ValueError: No JSON object could be decoded." for Presto 0.74, there is just a smiley face ":)" in the returned output
run_fail 3 ./check_presto_unfinished_queries.py -P "$PRESTO_WORKER_PORT"
elif [ "$version" != "latest" -a \
"$version" != "NODOCKER" ] &&
[ "${version#0.}" -le 148 ]; then
# succeeds with zero queries on versions <= 0.148, not sure why yet - is this another Presto bug?
run ./check_presto_unfinished_queries.py -P "$PRESTO_WORKER_PORT"
else
# will get a 404 Not Found against worker API in modern versions of Presto
run_404 ./check_presto_unfinished_queries.py -P "$PRESTO_WORKER_PORT"
fi
run ./check_presto_tasks.py -P "$PRESTO_WORKER_PORT"
# will get a 404 Not Found against worker API
run_404 ./check_presto_worker_nodes.py -w 1 -P "$PRESTO_WORKER_PORT"
run ./check_presto_worker_nodes_response_lag.py
# will get a 404 Not Found against worker API
run_404 ./check_presto_worker_nodes_response_lag.py -P "$PRESTO_WORKER_PORT"
run ./check_presto_worker_nodes_recent_failure_ratio.py
# will get a 404 Not Found against worker API
run_404 ./check_presto_worker_nodes_recent_failure_ratio.py -P "$PRESTO_WORKER_PORT"
run ./check_presto_worker_nodes_recent_failures.py
# will get a 404 Not Found against worker API
run_404 ./check_presto_worker_nodes_recent_failures.py -P "$PRESTO_WORKER_PORT"
}
test_presto2(){
local version="$1"
run_count=0
if [ -z "${NODOCKER:-}" ]; then
if [ "$version" = "0.70" ]; then
echo "Presto 0.70 does not start up due to bug 'java.lang.ClassCastException: org.slf4j.impl.JDK14LoggerAdapter cannot be cast to ch.qos.logback.classic.Logger', skipping..."
return
fi
DOCKER_CONTAINER="${DOCKER_CONTAINER:-$DOCKER_CONTAINER}"
section2 "Setting up Presto $version test container"
docker_compose_pull
# reset container as we start a presto worker inside later so we don't want to start successive workers on compounding failed runs
[ -n "${KEEPDOCKER:-}" ] || VERSION="$version" docker-compose down || :
VERSION="$version" docker-compose up -d
echo "getting Presto dynamic port mapping:"
docker_compose_port PRESTO "Presto Coordinator"
DOCKER_SERVICE=presto-haproxy docker_compose_port HAProxy
fi
hr
when_ports_available "$PRESTO_HOST" "$PRESTO_PORT" "$HAPROXY_PORT"
hr
# endpoint initializes blank, wait until there is some content, eg. nodeId
# don't just run ./check_presto_state.py (this also doesn't work < 0.128)
when_url_content "http://$PRESTO_HOST:$PRESTO_PORT/v1/service/presto/general" nodeId
hr
echo "Checking HAProxy Presto Coordinator:"
when_url_content "http://$PRESTO_HOST:$PRESTO_PORT/v1/service/presto/general" nodeId
hr
expected_version="$version"
if [ "$version" = "latest" -o \
"$version" = "NODOCKER" ]; then
if [ "$teradata_distribution" = 1 ]; then
echo "latest version, fetching latest version from DockerHub master branch"
expected_version="$(dockerhub_latest_version presto)"
else
# don't want to have to pull presto versions script from Dockerfiles repo
expected_version=".*"
fi
fi
echo "expecting Presto version '$expected_version'"
hr
# presto service not found in list of endpoints initially even after it's come up, hence reason for when_url_content test above
if [ -n "${NODOCKER:-}" ]; then
# custom compiled presto has a version like 'dc91f48' which results in UNKNOWN: Presto Coordinator version unrecognized 'dc91f48'
run_fail "0 3" ./check_presto_version.py --expected "$expected_version(-t.\d+.\d+)?"
run_fail "2 3" ./check_presto_version.py --expected "fail-version"
else
run ./check_presto_version.py --expected "$expected_version(-t.\d+.\d+)?"
run_fail 2 ./check_presto_version.py --expected "fail-version"
fi
run_conn_refused ./check_presto_version.py --expected "$expected_version(-t.\d+.\d+)?"
# coordinator field not available in Presto 0.93
if [ "$version" = "latest" -o \
"$version" = "NODOCKER" ] ||
[ "${version#0.}" -ge 94 ]; then
run ./check_presto_coordinator.py
else
echo "coordinator attribute will not be available in this version $version < 0.94, expecting 'unknown' status:"
run_fail 3 ./check_presto_coordinator.py
fi
run_conn_refused ./check_presto_coordinator.py
run ./check_presto_environment.py
run ./check_presto_environment.py --expected "$PRESTO_ENVIRONMENT"
run_conn_refused ./check_presto_environment.py --expected "$PRESTO_ENVIRONMENT"
run ./check_presto_worker_nodes_failed.py
run_conn_refused ./check_presto_worker_nodes_failed.py
run ./check_presto_unfinished_queries.py
run_conn_refused ./check_presto_unfinished_queries.py
run ./check_presto_tasks.py
run_conn_refused ./check_presto_tasks.py
run_fail 2 ./check_presto_worker_nodes.py -w 1
run_conn_refused ./check_presto_worker_nodes.py -w 1
run_fail 3 ./check_presto_queries.py --list
if [ -n "${NODOCKER:-}" -o -n "${KEEPDOCKER:-}" ]; then
run_fail "0 1 2" ./check_presto_queries.py --running
run_fail "0 1 2" ./check_presto_queries.py --failed
run_fail "0 1 2" ./check_presto_queries.py --blocked
run_fail "0 1 2" ./check_presto_queries.py --queued
else
echo "checking presto queries, but in docker there will be none by this point so expecting warning:"
run_fail 1 ./check_presto_queries.py --running
run_fail 1 ./check_presto_queries.py --failed
run_fail 1 ./check_presto_queries.py --blocked
run_fail 1 ./check_presto_queries.py --queued
fi
# endpoint only found on Presto 0.128 onwards
if [ "$version" = "latest" -o \
"$version" = "NODOCKER" ] ||
[ "${version#0.}" -ge 128 ]; then
run ./check_presto_state.py
else
echo "state endpoint is not available in this version $version < $0.128, expecting 'critical' 404 status:"
run_404 ./check_presto_state.py
fi
run_conn_refused ./check_presto_state.py
run_fail 2 ./check_presto_worker_node.py --node "nonexistentnode"
run_fail 1 ./check_presto_worker_nodes_response_lag.py
run_conn_refused ./check_presto_worker_nodes_response_lag.py
run_fail 1 ./check_presto_worker_nodes_recent_failure_ratio.py
run_conn_refused ./check_presto_worker_nodes_recent_failure_ratio.py
run_fail 1 ./check_presto_worker_nodes_recent_failures.py
run_conn_refused ./check_presto_worker_nodes_recent_failures.py
if [ -n "${NODOCKER:-}" ]; then
presto_worker_tests
echo
echo "External Presto, skipping worker setup + teardown checks..."
echo
echo "Completed $run_count Presto tests"
return 0
fi
# Starting process in the same container for convenience, short lived only for tests so doesn't need separate container
echo "Now reconfiguring to run additional Presto worker:"
docker exec -i "$DOCKER_CONTAINER" bash <<EOF
# set -x
set -euo pipefail
if [ -d /etc/presto ]; then
echo "detected Teradata distribution path"
BIN_DIR=/usr/lib/presto/bin
CONF_DIR=/etc/presto
elif [ -d /presto/ ]; then
echo "detected Facebook distribution path"
BIN_DIR=/presto/bin
CONF_DIR=/presto/etc
else
echo "FAILED to detect Presto paths!"
exit 1
fi
for x in node config; do
cp -vf "\$CONF_DIR"/"\$x".properties{,.worker}
done
sed -i 's/node.id=.*/node.id=2/' "\$CONF_DIR"/node.properties.worker
sed -i 's/coordinator=true/coordinator=false/' "\$CONF_DIR"/config.properties.worker
sed -i 's/http-server.http.port=8080/http-server.http.port=8081/' "\$CONF_DIR"/config.properties.worker
"\$BIN_DIR"/launcher --config="\$CONF_DIR"/config.properties.worker --node-config "\$CONF_DIR"/node.properties.worker --pid-file /var/run/worker-launcher.pid start
echo
echo "creating some sample queries to test check_presto_queries.py against:"
presto <<EOF2
select 1+1;
select 2+2;
select 3+3;
select failure;
select failure2;
-- localfile catalog not available in older versions of Presto
--select count(*) from localfile.logs.http_request_log;
EOF2
EOF
hr
# missing key error also returns UNKNOWN, so check we actual output a query we expect
ERRCODE=3 run_grep 'select 1\+1' ./check_presto_queries.py --list
run ./check_presto_queries.py --running
run ./check_presto_queries.py --failed --exclude 'failure|localfile.logs.http_request_log|SHOW FUNCTIONS|information_schema.tables'
run ./check_presto_queries.py --queued
run ./check_presto_queries.py --blocked
echo "checking ./check_presto_queries.py with implicit --warning 0 should raise warning after failed queries above are detected:"
run_fail 1 ./check_presto_queries.py --failed # implicit -w 0
# this should be -c 1 but sometimes queries get the following error and are marked as abandoned, reducing the select failure count, seems to happen mainly on older versions of Presto < 0.130 eg 0.126, setting to -c 0 for more resilience in case only one query was actually executed to fail instead of two:
#
# [ERROR] Failed to disable litteral next character
# java.lang.InterruptedException
# at java.lang.Object.wait(Native Method)
# at java.lang.Object.wait(Object.java:502)
# at java.lang.UNIXProcess.waitFor(UNIXProcess.java:395)
# at jline.internal.TerminalLineSettings.waitAndCapture(TerminalLineSettings.java:339)
# at jline.internal.TerminalLineSettings.exec(TerminalLineSettings.java:311)
# at jline.internal.TerminalLineSettings.stty(TerminalLineSettings.java:282)
# at jline.internal.TerminalLineSettings.undef(TerminalLineSettings.java:158)
# at jline.UnixTerminal.disableLitteralNextCharacter(UnixTerminal.java:185)
# at jline.console.ConsoleReader.readLine(ConsoleReader.java:2448)
# at jline.console.ConsoleReader.readLine(ConsoleReader.java:2372)
# at com.facebook.presto.cli.LineReader.readLine(LineReader.java:51)
# at jline.console.ConsoleReader.readLine(ConsoleReader.java:2360)
# at com.facebook.presto.cli.Console.runConsole(Console.java:149)
# at com.facebook.presto.cli.Console.run(Console.java:128)
# at com.facebook.presto.cli.Presto.main(Presto.java:32)
#
run_fail 2 ./check_presto_queries.py --failed -c 0
run ./check_presto_queries.py --running --include 'select 1\+1'
run ./check_presto_queries.py --failed --include 'select 1\+1'
run ./check_presto_queries.py --blocked --include 'select 1\+1'
run ./check_presto_queries.py --queued --include 'select 1\+1'
run_fail 1 ./check_presto_queries.py --failed --include 'failure'
run ./check_presto_queries.py --running --include 'failure'
run ./check_presto_queries.py --blocked --include 'failure'
run ./check_presto_queries.py --queued --include 'failure'
run_fail 2 ./check_presto_queries.py --failed --include 'failure' -c 0
run_fail 1 ./check_presto_queries.py --running --include 'nonexistentquery'
run_fail 1 ./check_presto_queries.py --failed --include 'nonexistentquery'
run_fail 1 ./check_presto_queries.py --blocked --include 'nonexistentquery'
run_fail 1 ./check_presto_queries.py --queued --include 'nonexistentquery'
echo "getting Presto Worker dynamic port mapping:"
docker_compose_port "Presto Worker"
hr
presto_worker_tests
echo "finding presto docker container IP for specific node registered checks:"
# hostname command not installed
#hostname="$(docker exec -i "$DOCKER_CONTAINER" hostname -f)"
# registering IP not hostname
#hostname="$(docker exec -i "$DOCKER_CONTAINER" tail -n1 /etc/hosts | awk '{print $2}')"
ip="$(docker exec -i "$DOCKER_CONTAINER" tail -n1 /etc/hosts | awk '{print $1}')"
echo "determined presto container IP = '$ip'"
hr
echo "lastResponseTime field is not immediately initialized in node data on coordinator, retrying for 10 secs to give node lastResponseTime a chance to be populated:"
retry 10 ./check_presto_worker_node.py --node "http://$ip:$PRESTO_WORKER_PORT_DEFAULT"
run++
hr
run ./check_presto_worker_node.py --node "$ip:$PRESTO_WORKER_PORT_DEFAULT"
run ./check_presto_worker_node.py --node "$ip"
# query failures never hit the worker
run ./check_presto_worker_node.py --node "$ip" --max-age 20 --max-ratio 0.0 --max-failures 0.0 --max-requests 100
run_fail 1 ./check_presto_worker_node.py --node "$ip" --max-requests 1
echo "Now killing Presto Worker:"
# Presto Worker runs the same com.facebook.presto.server.PrestoServer class with a different node id
# worker doesn't show up as a failed node in coorindator API if we send a polite kill signal, must kill -9 worker
docker exec -i "$DOCKER_CONTAINER" /usr/bin/pkill -9 -f -- -Dnode.id=2
# This doesn't work because the port still responds as open, even when the mapped port is down
# must be a result of docker networking
#when_ports_down 20 "$PRESTO_HOST" "$PRESTO_WORKER_PORT"
SECONDS=0
max_kill_time=20
while docker exec "$DOCKER_CONTAINER" ps -ef | grep -q -- -Dnode.id=2; do
if [ $SECONDS -gt $max_kill_time ]; then
echo "Presto Worker process did not go down after $max_kill_time secs!"
exit 1
fi
echo "waiting for Presto Worker process to go down"
sleep 1
done
hr
# endpoint only found on Presto 0.128 onwards but will fail here regardless
run_conn_refused ./check_presto_state.py -P "$PRESTO_WORKER_PORT"
echo "re-running failed worker node check against the coordinator API to detect failure of the worker we just killed:"
# usually detects in around 5-10 secs
max_detect_secs=60
set +o pipefail
SECONDS=0
while true; do
# can't just test status code as gets 500 Internal Server Error within a few secs
if ./check_presto_worker_nodes_failed.py | tee /dev/stderr | grep -q -e 'WARNING: Presto SQL - 1 worker node failed' -e "500 Internal Server Error"; then
break
fi
if [ $SECONDS -gt $max_detect_secs ]; then
echo
echo "FAILED: Presto worker did not detect worker failure after $max_detect_secs secs!"
exit 1
fi
echo "waited $SECONDS secs, will try again until Presto coordinator detects worker failure..."
# sleeping can miss API might change and hit 500 Internal Server Error bug as it only very briefly returns 1 failed node
# sometimes misses the state change before the API breaks
# do not enable
#sleep 0.5
done
set -o pipefail
# subsequent queries to the API expose a bug in the Presto API returning 500 Internal Server Error
hr
# XXX: this still passes as worker is still found, only response time lag and recent failures / recent failure ratios will reliably detect worker failure, not drop in the number of nodes
run_fail "0 2" ./check_presto_worker_nodes.py -w 1
run_fail 2 ./check_presto_worker_node.py --node "http://$ip:$PRESTO_WORKER_PORT_DEFAULT"
run_fail 2 ./check_presto_worker_node.py --node "$ip:$PRESTO_WORKER_PORT_DEFAULT"
run_fail 2 ./check_presto_worker_node.py --node "$ip"
# XXX: must permit error state 2 on checks below to pass 500 Internal Server Error caused by Presto Bug:
#
# https://github.com/prestodb/presto/issues/9158
#
run_fail "1 2" ./check_presto_worker_nodes_failed.py
run_fail "0 2" ./check_presto_worker_nodes_response_lag.py --max-age 1
run_fail "1 2" ./check_presto_worker_nodes_recent_failure_ratio.py
run_fail "1 2" ./check_presto_worker_nodes_recent_failures.py
echo "Completed $run_count Presto tests"
hr
[ -z "${KEEPDOCKER:-}" ] || return 0
[ -n "${NODOCKER:-}" ] ||
docker-compose down
hr
echo
}
if [ -n "${NODOCKER:-}" ]; then
PRESTO_VERSIONS="NODOCKER"
fi
test_presto(){
local version="$1"
local teradata_distribution=0
local teradata_only=0
local facebook_only=0
if [[ "$version" =~ .*-teradata$ ]]; then
version="${version%-teradata}"
teradata_distribution=1
teradata_only=1
elif [[ "$version" =~ .*-facebook$ ]]; then
version="${version%-facebook}"
facebook_only=1
else
for teradata_version in $PRESTO_TERADATA_VERSIONS; do
if [ "$version" = "$teradata_version" ]; then
teradata_distribution=1
break
fi
done
fi
if [ "$teradata_distribution" = "1" -a $facebook_only -eq 0 ]; then
echo "Testing Teradata's Presto distribution version: '$version'"
COMPOSE_FILE="$srcdir/docker/presto-docker-compose.yml" test_presto2 "$version"
# must call this manually here as we're sneaking in an extra batch of tests that run_test_versions is generally not aware of
let total_run_count+=$run_count
# reset this so it can be used in test_presto to detect now testing Facebook
teradata_distribution=0
fi
if [ -n "${NODOCKER:-}" ]; then
echo "Testing External Presto:"
else
echo "Testing Facebook's Presto release version: '$version'"
fi
if [ $teradata_only -eq 0 ]; then
COMPOSE_FILE="$srcdir/docker/presto-dev-docker-compose.yml" test_presto2 "$version"
fi
}
run_test_versions Presto
|
package com.datasift.client.pylon;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.ArrayList;
import java.util.List;
public class PylonTaskAnalysisResult {
@JsonProperty("analysis_type")
protected String analysisType;
@JsonProperty
protected PylonParametersData parameters;
@JsonProperty
protected boolean redacted;
@JsonProperty
protected List<PylonTaskAnalysisResultBucket> results = new ArrayList<PylonTaskAnalysisResultBucket>();
public PylonTaskAnalysisResult() {
}
public String getAnalysisType() {
return this.analysisType;
}
public PylonParametersData getParameters() {
return this.parameters;
}
public boolean getRedacted() {
return this.redacted;
}
public List<PylonTaskAnalysisResultBucket> getResults() {
return this.results;
}
}
|
export { default } from 'ember-keyword-complete/components/keyword-complete'; |
package modele;
import javafx.scene.control.TextArea;
import java.util.ArrayList;
public class Console {
public int maxLines = 250;
private ArrayList<String> lines;
private TextArea textArea;
private static Console INSTANCE;
/*
Constructeur : Console()
------------------------------------------
Couche d'abstraction pour gérer les messages évènementiels
et les afficher dans une un objet Textarea.
Cette classe utilise le pattern de programmation Singleton
pour n'avoir qu'une seule instance de Console dans tout
le programme;
*/
private Console() { // constructeur
lines = new ArrayList<>();
}
/*
Méthode : getInstance()
------------------------------------------
Permet de récupérer l'instance de console en
suivant les principes du pattern Singleton.
returns: l'instance du singleton console
*/
public static synchronized Console getInstance() {
// si on a pas encore initialisé l'instance on le fait
if (INSTANCE == null) {
INSTANCE = new Console();
}
return INSTANCE;
}
/*
Méthode : attachTextarea()
------------------------------------------
Permet d'attancher une instance de textarea
à l'instance actuelle de console.
ta (TextArea) : l'object à attacher à l'instance
*/
public void attachTextarea(TextArea ta) { // attache la textArea javafx à la console
textArea = ta;
}
/*
Méthode : addLine(message)
------------------------------------------
Ajoute une ligne à la console. Si le nombre
maximal de lignes est atteint, on supprime
les lignes les plus anciennes.
message(String): ligne à ajouter à la console
*/
public void addLine(String message) { // ajout d'une ligne à l'invit de commande
if (lines.size() > maxLines) {
lines.remove(0);
}
lines.add(message);
update();
}
/*
Méthode : getLines()
------------------------------------------
Retourne les lignes stockées dans la console.
returns: les lignes stockées dans la console
*/
public ArrayList<String> getLines() {
return lines;
}
/*
Méthode : addLines(messages)
------------------------------------------
Ajoute une liste de ligne à la console..
messages(String []): les lignes à ajouter
*/
public void addLines(String[] messages) {
for (String msg : messages)
addLine(msg);
}
/*
Méthode : addLines(messages)
------------------------------------------
Ajoute une liste de ligne à la console..
messages(ArrayList<String>): les lignes à ajouter
*/
public void addLines(ArrayList<String> messages) {
for (String msg : messages)
addLine(msg);
}
/*
Méthode : update()
------------------------------------------
Met a jour la textarea avec les lignes si elle
est attachée à l'instance. Dans ce cas on va
scoller jusqu'en bas du container pour afficher
les messages les plus récents.
*/
public void update() {
if (textArea != null) {
// on crée le messgae à afficher
String content = "";
for (String msg : lines)
content += msg + "\n";
// on update la textarea
textArea.setText(content);
// on scrolle tout en bas
textArea.setScrollTop(Double.MAX_VALUE);
}
}
/*
Méthode : getAttachedTextArea()
------------------------------------------
Permet de récupérer la référence vers l'objet textarea
rattaché à la console.
returns: l'objet textarea attaché à l'instance
*/
public TextArea getAttachedTextArea() {
return textArea;
}
/*
Méthode : printHelloWorld()
------------------------------------------
Affiche le message d'accueil dans la console
au lancement du programme.
*/
public void printHelloWorld() {
addLine("#=====================================#");
addLine("Programme d'aide à la décision");
addLine("Imaginé et développé par :");
addLine("\t- <NAME>");
addLine("\t- <NAME>");
addLine("\t- <NAME>");
addLine("#=====================================#");
addLine("");
addLine("[+] Programme prêt à fonctionner !");
}
} |
def update_dictionary(dictionary, new_keys):
for key in new_keys:
if key not in dictionary:
dictionary[key] = 0
return dictionary |
#!/usr/bin/env bash
set -eux
(cd .. && ./gradlew jar) || exit 1
docker-compose up -d --build || exit 1
exec ./client.sh
|
<gh_stars>10-100
package org.shipkit.github.release;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
/**
* The plugin, ideally with zero business logic, but only the Gradle integration code
*/
public class GithubReleasePlugin implements Plugin<Project> {
public void apply(Project project) {
project.getTasks().register("githubRelease", GithubReleaseTask.class, t -> {
t.setGithubApiUrl("https://api.github.com");
String tagName = "v" + project.getVersion();//
t.setReleaseTag(tagName);
t.setReleaseName(tagName);
});
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.