text stringlengths 1 1.05M |
|---|
<gh_stars>0
var searchData=
[
['filedialog',['fileDialog',['../class_parameter_window.html#a0d5e07e3c9faf784ecc4e0e052974d48',1,'ParameterWindow']]]
];
|
<filename>cwe_run.go
package cwe
import (
"fmt"
"mvdan.cc/sh/interp"
"mvdan.cc/sh/syntax"
"os"
"strings"
)
// Run executes the program with the environment required
func (cwe *CallWithEnvironment) Run(program string) error {
variables := cwe.buildEnvironment()
dir, err := os.Getwd()
if err != nil {
return err
}
p, err := syntax.NewParser().Parse(strings.NewReader(program), "")
if err != nil {
return err
}
env := variables
r := interp.Runner{
Dir: dir,
Env: env,
Exec: interp.DefaultExec,
Open: interp.OpenDevImpls(interp.DefaultOpen),
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
}
if err = r.Reset(); err != nil {
return err
}
err = r.Run(p)
return err
}
// buildEnvironment returns the amended env list
func (cwe *CallWithEnvironment) buildEnvironment() []string {
current := os.Environ()
for k, v := range cwe.Environment {
if !cwe.Quiet {
fmt.Println(fmt.Sprintf("%s: %s", k, v))
}
current = append(current, fmt.Sprintf("%s=%s", k, v))
}
if !cwe.Quiet {
fmt.Println()
}
return current
}
|
// Code generated by tutone: DO NOT EDIT
package workloads
import (
"encoding/json"
"fmt"
"github.com/newrelic/newrelic-client-go/pkg/accounts"
"github.com/newrelic/newrelic-client-go/pkg/entities"
"github.com/newrelic/newrelic-client-go/pkg/nrtime"
"github.com/newrelic/newrelic-client-go/pkg/users"
)
// WorkloadGroupRemainingEntitiesRuleBy - Indicates by which field the remaining entities rule should be grouped.
type WorkloadGroupRemainingEntitiesRuleBy string
var WorkloadGroupRemainingEntitiesRuleByTypes = struct {
// Group the remaining entities rule by entity type.
ENTITY_TYPE WorkloadGroupRemainingEntitiesRuleBy
// Do not apply any grouping to the remaining entities rule.
NONE WorkloadGroupRemainingEntitiesRuleBy
}{
// Group the remaining entities rule by entity type.
ENTITY_TYPE: "ENTITY_TYPE",
// Do not apply any grouping to the remaining entities rule.
NONE: "NONE",
}
// WorkloadResultingGroupType - Represents the type of the rule that the resulting group of entities belongs to.
type WorkloadResultingGroupType string
var WorkloadResultingGroupTypeTypes = struct {
// The rule considers the entities within a specific group in the workload.
REGULAR_GROUP WorkloadResultingGroupType
// The rule considers all the entities within the workload that aren’t evaluated in any other rule.
REMAINING_ENTITIES WorkloadResultingGroupType
}{
// The rule considers the entities within a specific group in the workload.
REGULAR_GROUP: "REGULAR_GROUP",
// The rule considers all the entities within the workload that aren’t evaluated in any other rule.
REMAINING_ENTITIES: "REMAINING_ENTITIES",
}
// WorkloadRollupStrategy - Represents the rollup strategy that is applied to a group of entities.
type WorkloadRollupStrategy string
var WorkloadRollupStrategyTypes = struct {
// The group status matches the less critical status of all belonging entities.
BEST_STATUS_WINS WorkloadRollupStrategy
// The group status matches the most critical status of all belonging entities.
WORST_STATUS_WINS WorkloadRollupStrategy
}{
// The group status matches the less critical status of all belonging entities.
BEST_STATUS_WINS: "BEST_STATUS_WINS",
// The group status matches the most critical status of all belonging entities.
WORST_STATUS_WINS: "WORST_STATUS_WINS",
}
// WorkloadRuleThresholdType - Represents the type of the threshold defined for a rule.
type WorkloadRuleThresholdType string
var WorkloadRuleThresholdTypeTypes = struct {
// The worst status is rolled up only after a certain number of entities within the workload are not operational.
FIXED WorkloadRuleThresholdType
// The worst status is rolled up only after a certain percentage of entities within the workload are not operational.
PERCENTAGE WorkloadRuleThresholdType
}{
// The worst status is rolled up only after a certain number of entities within the workload are not operational.
FIXED: "FIXED",
// The worst status is rolled up only after a certain percentage of entities within the workload are not operational.
PERCENTAGE: "PERCENTAGE",
}
// WorkloadStatusSource - Indicates where the status value derives from.
type WorkloadStatusSource string
var WorkloadStatusSourceTypes = struct {
// Refers to the result of an automatic rule defined for a workload.
ROLLUP_RULE WorkloadStatusSource
// Refers to a static status defined for a workload.
STATIC WorkloadStatusSource
// Refers to an undetermined status source.
UNKNOWN WorkloadStatusSource
// Refers to the override policy that is applied to a set of partial results within a workload. Any static status always overrides any other status values calculated automatically. Otherwise, the worst status of the partial results is rolled up.
WORKLOAD WorkloadStatusSource
}{
// Refers to the result of an automatic rule defined for a workload.
ROLLUP_RULE: "ROLLUP_RULE",
// Refers to a static status defined for a workload.
STATIC: "STATIC",
// Refers to an undetermined status source.
UNKNOWN: "UNKNOWN",
// Refers to the override policy that is applied to a set of partial results within a workload. Any static status always overrides any other status values calculated automatically. Otherwise, the worst status of the partial results is rolled up.
WORKLOAD: "WORKLOAD",
}
// WorkloadStatusValue - The status of the workload, which is derived from the static and the automatic statuses configured. Any static status always overrides any other status values calculated automatically.
type WorkloadStatusValue string
var WorkloadStatusValueTypes = struct {
// The status of the workload is degraded.
DEGRADED WorkloadStatusValue
// The status of the workload is disrupted.
DISRUPTED WorkloadStatusValue
// The status of the workload is operational.
OPERATIONAL WorkloadStatusValue
// The status of the workload is unknown.
UNKNOWN WorkloadStatusValue
}{
// The status of the workload is degraded.
DEGRADED: "DEGRADED",
// The status of the workload is disrupted.
DISRUPTED: "DISRUPTED",
// The status of the workload is operational.
OPERATIONAL: "OPERATIONAL",
// The status of the workload is unknown.
UNKNOWN: "UNKNOWN",
}
// WorkloadStatusValueInput - The status value. Any static status always overrides any other status values calculated automatically.
type WorkloadStatusValueInput string
var WorkloadStatusValueInputTypes = struct {
// The status of the workload is degraded.
DEGRADED WorkloadStatusValueInput
// The status of the workload is disrupted.
DISRUPTED WorkloadStatusValueInput
// The status of the workload is operational.
OPERATIONAL WorkloadStatusValueInput
}{
// The status of the workload is degraded.
DEGRADED: "DEGRADED",
// The status of the workload is disrupted.
DISRUPTED: "DISRUPTED",
// The status of the workload is operational.
OPERATIONAL: "OPERATIONAL",
}
// WorkloadAutomaticStatus - The automatic status configuration.
type WorkloadAutomaticStatus struct {
// Whether the automatic status configuration is enabled or not.
Enabled bool `json:"enabled"`
// An additional meta-rule that can consider all entities that haven't been evaluated by any other rule.
RemainingEntitiesRule WorkloadRemainingEntitiesRule `json:"remainingEntitiesRule,omitempty"`
// A list of rules.
Rules []WorkloadRegularRule `json:"rules"`
}
// WorkloadAutomaticStatusInput - An input object used to represent an automatic status configuration. If not provided, a status configuration will be created by default.
type WorkloadAutomaticStatusInput struct {
// Whether the automatic status configuration is enabled or not.
Enabled bool `json:"enabled"`
// An additional meta-rule that can consider all entities that haven't been evaluated by any other rule.
RemainingEntitiesRule WorkloadRemainingEntitiesRuleInput `json:"remainingEntitiesRule,omitempty"`
// A list of rules.
Rules []WorkloadRegularRuleInput `json:"rules,omitempty"`
}
// WorkloadCollection - A user defined group of entities.
type WorkloadCollection struct {
// The account the workload belongs to.
Account accounts.AccountReference `json:"account"`
// The moment when the object was created, represented in milliseconds since the Unix epoch.
CreatedAt *nrtime.EpochMilliseconds `json:"createdAt"`
// The user who created the workload.
CreatedBy users.UserReference `json:"createdBy"`
// Relevant information about the workload.
Description string `json:"description,omitempty"`
// A list of entity GUIDs. These entities will belong to the collection as long as their accounts are included in the scope accounts of the collection.
Entities []WorkloadEntityRef `json:"entities"`
// A list of entity search queries. The resulting entities will be limited to the scope accounts of the collection.
EntitySearchQueries []WorkloadEntitySearchQuery `json:"entitySearchQueries"`
// The entity search query that returns the full collection of entities.
EntitySearchQuery string `json:"entitySearchQuery,omitempty"`
// The unique entity identifier of the workload in New Relic.
GUID entities.EntityGUID `json:"guid"`
// The unique identifier of the workload.
ID int `json:"id"`
// The workload's name.
Name string `json:"name"`
// The URL of the workload.
Permalink string `json:"permalink"`
// Accounts that will be used to get entities from.
ScopeAccounts WorkloadScopeAccounts `json:"scopeAccounts"`
// Status of the workload.
Status WorkloadWorkloadStatus `json:"status"`
// The configuration that defines how the status of the workload is calculated.
StatusConfig WorkloadStatusConfig `json:"statusConfig,omitempty"`
// The moment when the object was last updated, represented in milliseconds since the Unix epoch.
UpdatedAt *nrtime.EpochMilliseconds `json:"updatedAt,omitempty"`
// The user who last updated the workload.
UpdatedBy users.UserReference `json:"updatedBy,omitempty"`
}
// WorkloadCreateInput - The input object used to represent the workload to be created.
type WorkloadCreateInput struct {
// Relevant information about the workload.
Description string `json:"description,omitempty"`
// A list of entity GUIDs composing the workload.
EntityGUIDs []entities.EntityGUID `json:"entityGuids"`
// A list of entity search queries used to retrieve the entities that compose the workload.
EntitySearchQueries []WorkloadEntitySearchQueryInput `json:"entitySearchQueries,omitempty"`
// The name of the workload.
Name string `json:"name"`
// Accounts that will be used to get entities from.
ScopeAccounts *WorkloadScopeAccountsInput `json:"scopeAccounts,omitempty"`
// The configuration that defines how the status of the workload is calculated.
StatusConfig *WorkloadStatusConfigInput `json:"statusConfig,omitempty"`
}
// WorkloadDuplicateInput - The input object used to represent the workload duplicate.
type WorkloadDuplicateInput struct {
// The name of the workload duplicate. If the name isn't specified, the name + ' copy' of the source workload is used to compose the new name.
Name string `json:"name,omitempty"`
}
// WorkloadEntityRef - A reference to a New Relic entity.
type WorkloadEntityRef struct {
// The unique entity identifier in New Relic.
GUID entities.EntityGUID `json:"guid,omitempty"`
}
// WorkloadEntitySearchQuery - An entity search query used to dynamically retrieve a group of entities.
type WorkloadEntitySearchQuery struct {
// The moment when the object was created, represented in milliseconds since the Unix epoch.
CreatedAt *nrtime.EpochMilliseconds `json:"createdAt"`
// The user who created the entity search query.
CreatedBy users.UserReference `json:"createdBy"`
// The unique identifier of the entity search query.
ID int `json:"id"`
// The entity search query that is used to perform the search of a group of entities.
Query string `json:"query"`
// The moment when the object was last updated, represented in milliseconds since the Unix epoch.
UpdatedAt *nrtime.EpochMilliseconds `json:"updatedAt,omitempty"`
}
// WorkloadEntitySearchQueryInput - The input object used to represent the entity search query to be created.
type WorkloadEntitySearchQueryInput struct {
// The entity search query that is used to perform the search of a group of entities.
Query string `json:"query"`
}
// WorkloadRegularRule - The definition of a rule, which consists of a group of entities and a rollup strategy.
type WorkloadRegularRule struct {
// A list of entity GUIDs. These entities will belong to the collection as long as their accounts are included in the scope accounts of the collection.
Entities []WorkloadEntityRef `json:"entities"`
// A list of entity search queries. These queries are constrained to the workload contents.
EntitySearchQueries []WorkloadEntitySearchQuery `json:"entitySearchQueries"`
// The unique identifier of the rule.
ID int `json:"id,omitempty"`
// The rollup strategy.
Rollup WorkloadRollup `json:"rollup"`
}
// WorkloadRegularRuleInput - The input object used to represent a rule.
type WorkloadRegularRuleInput struct {
// A list of entity GUIDs composing the rule.
EntityGUIDs []entities.EntityGUID `json:"entityGuids"`
// A list of entity search queries used to retrieve the entities that compose the rule.
EntitySearchQueries []WorkloadEntitySearchQueryInput `json:"entitySearchQueries,omitempty"`
// The input object used to represent a rollup strategy.
Rollup WorkloadRollupInput `json:"rollup,omitempty"`
}
// WorkloadRemainingEntitiesRule - The definition of a remaining entities rule.
type WorkloadRemainingEntitiesRule struct {
// The rollup strategy.
Rollup WorkloadRemainingEntitiesRuleRollup `json:"rollup"`
}
// WorkloadRemainingEntitiesRuleInput - The input object used to represent a remaining entities rule.
type WorkloadRemainingEntitiesRuleInput struct {
// The input object used to represent a rollup strategy.
Rollup WorkloadRemainingEntitiesRuleRollupInput `json:"rollup,omitempty"`
}
// WorkloadRemainingEntitiesRuleRollup - The rollup strategy.
type WorkloadRemainingEntitiesRuleRollup struct {
// The grouping to be applied to the remaining entities.
GroupBy WorkloadGroupRemainingEntitiesRuleBy `json:"groupBy"`
// The rollup strategy that is applied to a group of entities.
Strategy WorkloadRollupStrategy `json:"strategy"`
// Type of threshold defined for the rule. This is an optional field that only applies when strategy is WORST_STATUS_WINS. Use a threshold to roll up the worst status only after a certain amount of entities are not operational.
ThresholdType WorkloadRuleThresholdType `json:"thresholdType,omitempty"`
// Threshold value defined for the rule. This optional field is used in combination with thresholdType. If the threshold type is null, the threshold value will be ignored.
ThresholdValue int `json:"thresholdValue,omitempty"`
}
// WorkloadRemainingEntitiesRuleRollupInput - The input object used to represent a rollup strategy.
type WorkloadRemainingEntitiesRuleRollupInput struct {
// The grouping to be applied to the remaining entities.
GroupBy WorkloadGroupRemainingEntitiesRuleBy `json:"groupBy"`
// The rollup strategy that is applied to a group of entities.
Strategy WorkloadRollupStrategy `json:"strategy"`
// Type of threshold defined for the rule. This is an optional field that only applies when strategy is WORST_STATUS_WINS. Use a threshold to roll up the worst status only after a certain amount of entities are not operational.
ThresholdType WorkloadRuleThresholdType `json:"thresholdType,omitempty"`
// Threshold value defined for the rule. This optional field is used in combination with thresholdType. If the threshold type is null, the threshold value will be ignored.
ThresholdValue int `json:"thresholdValue,omitempty"`
}
// WorkloadRollup - The rollup strategy.
type WorkloadRollup struct {
// The rollup strategy that is applied to the group of entities.
Strategy WorkloadRollupStrategy `json:"strategy"`
// Type of threshold defined for the rule. This is an optional field that only applies when strategy is WORST_STATUS_WINS. Use a threshold to roll up the worst status only after a certain amount of entities are not operational.
ThresholdType WorkloadRuleThresholdType `json:"thresholdType,omitempty"`
// Threshold value defined for the rule. This optional field is used in combination with thresholdType. If the threshold type is null, the threshold value will be ignored.
ThresholdValue int `json:"thresholdValue,omitempty"`
}
// WorkloadRollupInput - The input object used to represent a rollup strategy.
type WorkloadRollupInput struct {
// The rollup strategy that is applied to a group of entities.
Strategy WorkloadRollupStrategy `json:"strategy"`
// Type of threshold defined for the rule. This is an optional field that only applies when strategy is WORST_STATUS_WINS. Use a threshold to roll up the worst status only after a certain amount of entities are not operational.
ThresholdType WorkloadRuleThresholdType `json:"thresholdType,omitempty"`
// Threshold value defined for the rule. This optional field is used in combination with thresholdType. If the threshold type is null, the threshold value will be ignored.
ThresholdValue int `json:"thresholdValue,omitempty"`
}
// WorkloadRollupRuleDetails - Represents the details of a rollup rule.
type WorkloadRollupRuleDetails struct {
// A list of entity search queries defined in the rule.
EntitySearchQueries []string `json:"entitySearchQueries"`
// Indicates if the rule has individual entities.
HasIndividualEntities bool `json:"hasIndividualEntities"`
// The amount of not operational entities after evaluating the rule.
NotOperationalEntities int `json:"notOperationalEntities"`
// The amount of operational entities after evaluating the rule.
OperationalEntities int `json:"operationalEntities"`
// The resulting type of the rollup rule.
ResultingGroupType WorkloadResultingGroupType `json:"resultingGroupType"`
// Type of threshold defined for the rule.
ThresholdType WorkloadRuleThresholdType `json:"thresholdType,omitempty"`
// The amount of entities the status of which is unknown.
UnknownStatusEntities int `json:"unknownStatusEntities"`
}
// WorkloadRollupRuleStatusResult - A rollup rule that was involved in the calculation of the workload status.
type WorkloadRollupRuleStatusResult struct {
// Represents the details of a rollup rule.
RollupRuleDetails WorkloadRollupRuleDetails `json:"rollupRuleDetails,omitempty"`
// Indicates where the status value derives from.
Source WorkloadStatusSource `json:"source"`
// The status of a rollup rule.
Value WorkloadStatusValue `json:"value"`
}
func (x *WorkloadRollupRuleStatusResult) ImplementsWorkloadStatusResult() {}
// WorkloadScopeAccounts - Accounts that will be used to get entities from.
type WorkloadScopeAccounts struct {
// A list of accounts that will be used to get entities from.
AccountIDs []int `json:"accountIds"`
}
// WorkloadScopeAccountsInput - The input object containing accounts that will be used to get entities from.
type WorkloadScopeAccountsInput struct {
// A list of accounts that will be used to get entities from.
AccountIDs []int `json:"accountIds"`
}
// WorkloadStaticStatus - The static status configuration.
type WorkloadStaticStatus struct {
// A description that provides additional details about the status of the workload.
Description string `json:"description,omitempty"`
// Whether the static status configuration is enabled or not. Note that only one static status can be enabled at a given time.
Enabled bool `json:"enabled"`
// The unique identifier of the static status.
ID int `json:"id"`
// The status of the workload.
Status WorkloadStatusValue `json:"status"`
// A short description of the status of the workload.
Summary string `json:"summary,omitempty"`
}
// WorkloadStaticStatusInput - The input object used to represent the configuration of a static status.
type WorkloadStaticStatusInput struct {
// A description that provides additional details about the status of the workload.
Description string `json:"description,omitempty"`
// Whether the static status configuration is enabled or not.
Enabled bool `json:"enabled"`
// The status of the workload.
Status WorkloadStatusValueInput `json:"status"`
// A short description of the status of the workload.
Summary string `json:"summary,omitempty"`
}
// WorkloadStaticStatusResult - A static status that was involved in the calculation of the workload status.
type WorkloadStaticStatusResult struct {
// A description that provides additional details about the status of the workload.
Description string `json:"description,omitempty"`
// Indicates where the status value derives from.
Source WorkloadStatusSource `json:"source"`
// A short description of the status of the workload.
Summary string `json:"summary,omitempty"`
// The value of a static status.
Value WorkloadStatusValue `json:"value"`
}
func (x *WorkloadStaticStatusResult) ImplementsWorkloadStatusResult() {}
// WorkloadStatusConfig - The configuration that defines how the status of the workload is calculated.
type WorkloadStatusConfig struct {
// An automatic status configuration.
Automatic WorkloadAutomaticStatus `json:"automatic,omitempty"`
// A list of static status configurations.
Static []WorkloadStaticStatus `json:"static"`
}
// WorkloadStatusConfigInput - The input object used to provide the configuration that defines how the status of the workload is calculated.
type WorkloadStatusConfigInput struct {
// An input object used to represent an automatic status configuration.
Automatic WorkloadAutomaticStatusInput `json:"automatic,omitempty"`
// A list of static status configurations. You can only configure one static status for a workload.
Static []WorkloadStaticStatusInput `json:"static,omitempty"`
}
// WorkloadStatusResult - The details of a status that was involved in the calculation of the workload status.
type WorkloadStatusResult struct {
// Indicates where the status value derives from.
Source WorkloadStatusSource `json:"source"`
// The value of a status.
Value WorkloadStatusValue `json:"value"`
}
func (x *WorkloadStatusResult) ImplementsWorkloadStatusResult() {}
// WorkloadUpdateAutomaticStatusInput - An input object used to represent an automatic status configuration.
type WorkloadUpdateAutomaticStatusInput struct {
// Whether the automatic status configuration is enabled or not.
Enabled bool `json:"enabled"`
// An additional meta-rule that can consider all entities that haven't been evaluated by any other rule.
RemainingEntitiesRule WorkloadRemainingEntitiesRuleInput `json:"remainingEntitiesRule,omitempty"`
// A list of rules.
Rules []WorkloadUpdateRegularRuleInput `json:"rules,omitempty"`
}
// WorkloadUpdateCollectionEntitySearchQueryInput - The input object used to represent the entity search query to be updated.
type WorkloadUpdateCollectionEntitySearchQueryInput struct {
// The unique identifier of the entity search query to be updated. If not provided, a new entity search query is created.
ID int `json:"id,omitempty"`
// The entity search query that is used to perform the search of a group of entities.
Query string `json:"query"`
}
// WorkloadUpdateInput - The input object used to identify the workload to be updated and the new values.
type WorkloadUpdateInput struct {
// Relevant information about the workload.
Description string `json:"description,omitempty"`
// A list of entity GUIDs composing the workload.
EntityGUIDs []entities.EntityGUID `json:"entityGuids"`
// A list of entity search queries used to retrieve the groups of entities that compose the workload.
EntitySearchQueries []WorkloadUpdateCollectionEntitySearchQueryInput `json:"entitySearchQueries,omitempty"`
// The name of the workload.
Name string `json:"name,omitempty"`
// Accounts that will be used to get entities from.
ScopeAccounts *WorkloadScopeAccountsInput `json:"scopeAccounts,omitempty"`
// The configuration that defines how the status of the workload is calculated.
StatusConfig *WorkloadUpdateStatusConfigInput `json:"statusConfig,omitempty"`
}
// WorkloadUpdateRegularRuleInput - The input object used to represent a rule.
type WorkloadUpdateRegularRuleInput struct {
// A list of entity GUIDs composing the rule.
EntityGUIDs []entities.EntityGUID `json:"entityGuids"`
// A list of entity search queries used to retrieve the groups of entities that compose the rule.
EntitySearchQueries []WorkloadUpdateCollectionEntitySearchQueryInput `json:"entitySearchQueries,omitempty"`
// The unique identifier of the rule to be updated. If not provided, a new rule is created.
ID int `json:"id,omitempty"`
// The input object used to represent a roll-up strategy.
Rollup WorkloadRollupInput `json:"rollup,omitempty"`
}
// WorkloadUpdateStaticStatusInput - The input object used to represent the configuration of a static status.
type WorkloadUpdateStaticStatusInput struct {
// A description that provides additional details about the status of the workload.
Description string `json:"description,omitempty"`
// Whether the static status configuration is enabled or not.
Enabled bool `json:"enabled"`
// The unique identifier of the static status to be updated. If not provided, a new static status is created.
ID int `json:"id,omitempty"`
// The status of the workload.
Status WorkloadStatusValueInput `json:"status"`
// A short description of the static status.
Summary string `json:"summary,omitempty"`
}
// WorkloadUpdateStatusConfigInput - The input object used to provide the configuration that defines how the status of the workload is calculated.
type WorkloadUpdateStatusConfigInput struct {
// An input object used to represent an automatic status configuration.
Automatic WorkloadUpdateAutomaticStatusInput `json:"automatic,omitempty"`
// A list of static status configurations. You can only configure one static status for a workload.
Static []WorkloadUpdateStaticStatusInput `json:"static,omitempty"`
}
// WorkloadWorkloadStatus - Status of the workload.
type WorkloadWorkloadStatus struct {
// A description that provides additional details about the status of the workload.
Description string `json:"description,omitempty"`
// Indicates where the status value derives from.
Source WorkloadStatusSource `json:"source"`
// The details of the statuses that were involved in the calculation of the workload status.
StatusDetails []WorkloadStatusResultInterface `json:"statusDetails"`
// A short description of the status of the workload.
Summary string `json:"summary,omitempty"`
// The status of the workload.
Value WorkloadStatusValue `json:"value"`
}
// special
func (x *WorkloadWorkloadStatus) UnmarshalJSON(b []byte) error {
var objMap map[string]*json.RawMessage
err := json.Unmarshal(b, &objMap)
if err != nil {
return err
}
for k, v := range objMap {
if v == nil {
continue
}
switch k {
case "description":
err = json.Unmarshal(*v, &x.Description)
if err != nil {
return err
}
case "source":
err = json.Unmarshal(*v, &x.Source)
if err != nil {
return err
}
case "statusDetails":
if v == nil {
continue
}
var rawMessageStatusDetails []*json.RawMessage
err = json.Unmarshal(*v, &rawMessageStatusDetails)
if err != nil {
return err
}
for _, m := range rawMessageStatusDetails {
xxx, err := UnmarshalWorkloadStatusResultInterface(*m)
if err != nil {
return err
}
if xxx != nil {
x.StatusDetails = append(x.StatusDetails, *xxx)
}
}
case "summary":
err = json.Unmarshal(*v, &x.Summary)
if err != nil {
return err
}
case "value":
err = json.Unmarshal(*v, &x.Value)
if err != nil {
return err
}
}
}
return nil
}
// WorkloadStatusResult - The details of a status that was involved in the calculation of the workload status.
type WorkloadStatusResultInterface interface {
ImplementsWorkloadStatusResult()
}
// UnmarshalWorkloadStatusResultInterface unmarshals the interface into the correct type
// based on __typename provided by GraphQL
func UnmarshalWorkloadStatusResultInterface(b []byte) (*WorkloadStatusResultInterface, error) {
var err error
var rawMessageWorkloadStatusResult map[string]*json.RawMessage
err = json.Unmarshal(b, &rawMessageWorkloadStatusResult)
if err != nil {
return nil, err
}
// Nothing to unmarshal
if len(rawMessageWorkloadStatusResult) < 1 {
return nil, nil
}
var typeName string
if rawTypeName, ok := rawMessageWorkloadStatusResult["__typename"]; ok {
err = json.Unmarshal(*rawTypeName, &typeName)
if err != nil {
return nil, err
}
switch typeName {
case "WorkloadRollupRuleStatusResult":
var interfaceType WorkloadRollupRuleStatusResult
err = json.Unmarshal(b, &interfaceType)
if err != nil {
return nil, err
}
var xxx WorkloadStatusResultInterface = &interfaceType
return &xxx, nil
case "WorkloadStaticStatusResult":
var interfaceType WorkloadStaticStatusResult
err = json.Unmarshal(b, &interfaceType)
if err != nil {
return nil, err
}
var xxx WorkloadStatusResultInterface = &interfaceType
return &xxx, nil
}
} else {
keys := []string{}
for k := range rawMessageWorkloadStatusResult {
keys = append(keys, k)
}
return nil, fmt.Errorf("interface WorkloadStatusResult did not include a __typename field for inspection: %s", keys)
}
return nil, fmt.Errorf("interface WorkloadStatusResult was not matched against all PossibleTypes: %s", typeName)
}
|
#!/bin/sh
docker build -t sakila-db-img . |
<reponame>johanandren/lagom<filename>service/javadsl/server/src/main/scala/com/lightbend/lagom/internal/server/ServerBuilder.scala
/*
* Copyright (C) 2016 Lightbend Inc. <http://www.lightbend.com>
*/
package com.lightbend.lagom.internal.server
import java.net.URI
import java.util.function.{ BiFunction, Function => JFunction }
import java.util.{ Base64, Optional }
import javax.inject.{ Singleton, Provider, Inject }
import akka.stream.Materializer
import akka.stream.scaladsl._
import akka.stream.javadsl.{ Source => JSource }
import akka.stream.stage.{ TerminationDirective, SyncDirective, Context, PushStage }
import akka.util.ByteString
import com.lightbend.lagom.internal.api._
import com.lightbend.lagom.javadsl.api.Descriptor.{ RestCallId, Call }
import com.lightbend.lagom.javadsl.api.deser.MessageSerializer.{ NegotiatedSerializer, NegotiatedDeserializer }
import com.lightbend.lagom.javadsl.api.transport._
import com.lightbend.lagom.javadsl.api._
import com.lightbend.lagom.javadsl.api.deser._
import com.lightbend.lagom.javadsl.jackson.{ JacksonExceptionSerializer, JacksonSerializerFactory }
import com.lightbend.lagom.javadsl.server.ServiceGuiceSupport.{ ClassServiceBinding, InstanceServiceBinding }
import com.lightbend.lagom.javadsl.server.{ PlayServiceCall, ServiceGuiceSupport }
import org.pcollections.{ HashTreePMap, PSequence, TreePVector }
import play.api.mvc.{ RequestHeader => PlayRequestHeader, ResponseHeader => _, _ }
import play.api.{ Logger, Environment }
import play.api.http.HttpEntity.Strict
import play.api.http.websocket.{ CloseMessage, BinaryMessage, TextMessage, Message }
import play.api.http.{ HeaderNames, HttpConfiguration }
import play.api.inject.Injector
import play.api.libs.streams.{ AkkaStreams, Accumulator }
import play.api.routing.SimpleRouter
import play.api.routing.Router.Routes
import scala.collection.JavaConverters._
import scala.compat.java8.FutureConverters._
import scala.compat.java8.OptionConverters._
import scala.concurrent.{ Promise, ExecutionContext, Future }
import scala.util.{ Try, Right }
import scala.util.control.NonFatal
import java.util.concurrent.CompletionException
/**
* Turns a service implementation and descriptor into a Play router
*/
class ServerBuilder @Inject() (environment: Environment, httpConfiguration: HttpConfiguration,
jacksonSerializerFactory: JacksonSerializerFactory,
jacksonExceptionSerializer: JacksonExceptionSerializer)(implicit ec: ExecutionContext, mat: Materializer) {
/**
* Create a router for the given services.
*
* @param services An array of service interfaces to implementations.
* @return The services.
*/
def resolveServices(services: Seq[(Class[_], Any)]): ResolvedServices = {
val resolvedDescriptors = services.map {
case (interface, serviceImpl) if classOf[Service].isAssignableFrom(interface) =>
val descriptor = ServiceReader.readServiceDescriptor(
environment.classLoader,
interface.asSubclass(classOf[Service])
)
ResolvedService(interface.asInstanceOf[Class[Any]], serviceImpl, resolveDescriptor(descriptor))
case (interface, _) =>
throw new IllegalArgumentException(s"Don't know how to load services that don't implement Service: $interface")
}
ResolvedServices(resolvedDescriptors)
}
/**
* Resolve the given descriptor to the implementation of the service.
*/
def resolveDescriptor(descriptor: Descriptor): Descriptor = {
ServiceReader.resolveServiceDescriptor(descriptor, environment.classLoader,
Map(JacksonPlaceholderSerializerFactory -> jacksonSerializerFactory),
Map(JacksonPlaceholderExceptionSerializer -> jacksonExceptionSerializer))
}
/**
* Create a service info for the given interface.
*
* @param interface The interface to create the service info for.
* @return The service info.
*/
def createServiceInfo(interface: Class[_]): ServiceInfo = {
if (classOf[Service].isAssignableFrom(interface)) {
val descriptor = ServiceReader.readServiceDescriptor(
environment.classLoader,
interface.asSubclass(classOf[Service])
)
new ServiceInfo(descriptor.name())
} else {
throw new IllegalArgumentException(s"Don't know how to load services that don't implement Service: $interface")
}
}
}
case class ResolvedServices(services: Seq[ResolvedService[_]])
case class ResolvedService[T](interface: Class[T], service: T, descriptor: Descriptor)
@Singleton
class ResolvedServicesProvider(bindings: Seq[ServiceGuiceSupport.ServiceBinding[_]]) extends Provider[ResolvedServices] {
def this(bindings: Array[ServiceGuiceSupport.ServiceBinding[_]]) = this(bindings.toSeq)
@Inject var serverBuilder: ServerBuilder = null
@Inject var injector: Injector = null
lazy val get = {
serverBuilder.resolveServices(bindings.map {
case instance: InstanceServiceBinding[_] => (instance.serviceInterface, instance.service)
case clazz: ClassServiceBinding[_] => (clazz.serviceInterface, injector.instanceOf(clazz.serviceImplementation))
})
}
}
@Singleton
class ServiceRouter @Inject() (resolvedServices: ResolvedServices, httpConfiguration: HttpConfiguration)(implicit ec: ExecutionContext, mat: Materializer) extends SimpleRouter {
private val serviceRouters = resolvedServices.services.map { service =>
new SingleServiceRouter(service.descriptor, service.descriptor.calls.asScala.map { call =>
ServiceRoute(call, service.service)
}, httpConfiguration)
}
override val routes: Routes =
serviceRouters.foldLeft(PartialFunction.empty[PlayRequestHeader, Handler])((routes, router) => routes.orElse(router.routes))
override def documentation: Seq[(String, String, String)] = serviceRouters.flatMap(_.documentation)
}
case class ServiceRoute(call: Descriptor.Call[_, _], service: Any) {
val path = Path.fromCallId(call.callId)
val method = call.callId match {
case rest: RestCallId => rest.method
case _ => if (call.requestSerializer.isUsed) {
Method.POST
} else {
Method.GET
}
}
val isWebSocket = call.requestSerializer.isInstanceOf[StreamedMessageSerializer[_]] ||
call.responseSerializer.isInstanceOf[StreamedMessageSerializer[_]]
val holder: MethodServiceCallHolder = call.serviceCallHolder() match {
case holder: MethodServiceCallHolder => holder
}
def createServiceCall(params: Seq[Seq[String]]) = {
holder.create(service, params).asInstanceOf[ServiceCall[Any, Any]]
}
}
object SingleServiceRouter {
/** RFC 6455 Section 5.5 - maximum control frame size is 125 bytes */
val WebSocketControlFrameMaxLength = 125
}
class SingleServiceRouter(descriptor: Descriptor, serviceRoutes: Seq[ServiceRoute], httpConfiguration: HttpConfiguration)(implicit ec: ExecutionContext, mat: Materializer) extends SimpleRouter {
import SingleServiceRouter._
/**
* The routes partial function.
*/
override def routes: Routes = Function.unlift { request =>
val requestHeader = toRequestHeader(request)
val isWebSocket = request.headers.get(HeaderNames.UPGRADE).contains("websocket")
serviceRoutes.collectFirst(Function.unlift { route =>
// We match by method, but since we ignore the method if it's a WebSocket (because WebSockets require that GET
// is used) we also match if it's a WebSocket request and this can be handled as a WebSocket.
if (route.method.name == request.method || (isWebSocket && route.isWebSocket)) {
route.path.extract(requestHeader.uri.getRawPath, request.queryString).map { params =>
val serviceCall = route.createServiceCall(params)
// If both request and response are strict, handle it using an action, otherwise handle it using a websocket
(route.call.requestSerializer, route.call.responseSerializer) match {
case (strictRequest: StrictMessageSerializer[Any], strictResponse: StrictMessageSerializer[Any]) =>
action(route.call.asInstanceOf[Call[Any, Any]], descriptor, strictRequest, strictResponse,
requestHeader, serviceCall)
case _ => websocket(route.call.asInstanceOf[Call[Any, Any]], descriptor, requestHeader, serviceCall)
}
}
} else None
})
}
private val inMemoryBodyParser = BodyParsers.parse.maxLength(httpConfiguration.parser.maxMemoryBuffer, BodyParser { req =>
Accumulator(Sink.fold[ByteString, ByteString](ByteString.empty)((state, bs) => state ++ bs)).map(Right.apply)
})
/**
* Create the action.
*/
private def action[Request, Response](
call: Call[Request, Response], descriptor: Descriptor,
requestSerializer: StrictMessageSerializer[Request], responseSerializer: StrictMessageSerializer[Response],
requestHeader: RequestHeader, serviceCall: ServiceCall[Request, Response]
): EssentialAction = {
serviceCall match {
// If it's a Play service call, then rather than creating the action directly, we let it create the action, and
// pass it a callback that allows it to convert a service call into an action.
case playServiceCall: PlayServiceCall[Request, Response] =>
playServiceCall.invoke(
new java.util.function.Function[ServiceCall[Request, Response], play.mvc.EssentialAction] {
override def apply(serviceCall: ServiceCall[Request, Response]): play.mvc.EssentialAction = {
createAction(serviceCall, call, descriptor, requestSerializer, responseSerializer, requestHeader).asJava
}
}
)
case _ =>
createAction(serviceCall, call, descriptor, requestSerializer, responseSerializer, requestHeader)
}
}
/**
* Create an action to handle the given service call. All error handling is done here.
*/
private def createAction[Request, Response](
serviceCall: ServiceCall[Request, Response], call: Call[Request, Response], descriptor: Descriptor,
requestSerializer: StrictMessageSerializer[Request], responseSerializer: StrictMessageSerializer[Response],
requestHeader: RequestHeader
) = EssentialAction { request =>
try {
handleServiceCall(serviceCall, descriptor, requestSerializer, responseSerializer, requestHeader, request).recover {
case NonFatal(e) =>
logException(e, descriptor, call)
exceptionToResult(descriptor.exceptionSerializer, requestHeader, e)
}
} catch {
case NonFatal(e) =>
logException(e, descriptor, call)
Accumulator.done(exceptionToResult(
descriptor.exceptionSerializer,
requestHeader, e
))
}
}
/**
* Handle a regular service call, that is, either a ServerServiceCall, or a plain ServiceCall.
*/
private def handleServiceCall[Request, Response](
serviceCall: ServiceCall[Request, Response], descriptor: Descriptor,
requestSerializer: StrictMessageSerializer[Request], responseSerializer: StrictMessageSerializer[Response],
requestHeader: RequestHeader, playRequestHeader: PlayRequestHeader
): Accumulator[ByteString, Result] = {
val requestMessageDeserializer = requestSerializer.deserializer(requestHeader.protocol)
// Buffer the body in memory
inMemoryBodyParser(playRequestHeader).mapFuture {
// Error handling.
// If it's left of a result (which this particular body parser should never return) just return that result.
case Left(result) => Future.successful(result)
// If the payload was too large, throw that exception (exception serializer will handle it later).
case Right(Left(_)) => throw new PayloadTooLarge("Request body larger than " + httpConfiguration.parser.maxMemoryBuffer)
// Body was successfully buffered.
case Right(Right(body)) =>
// Deserialize request
val request = requestMessageDeserializer.deserialize(body)
// Invoke the service call
invokeServiceCall(serviceCall, requestHeader, request).map {
case (responseHeader, response) =>
// Serialize the response body
val serializer = responseSerializer.serializerForResponse(requestHeader.acceptedResponseProtocols())
val responseBody = serializer.serialize(response)
// If no content type was defined by the service call itself, then replace the protocol with the
// serializers protocol
val responseHeaderWithProtocol = if (!responseHeader.protocol.contentType.isPresent) {
responseHeader.withProtocol(serializer.protocol())
} else responseHeader
// Transform the response header
val transformedResponseHeader = descriptor.headerFilter.transformServerResponse(
responseHeaderWithProtocol,
requestHeader
)
// And create the result
Results.Status(transformedResponseHeader.status).sendEntity(Strict(
responseBody,
transformedResponseHeader.protocol.toContentTypeHeader.asScala
)).withHeaders(toResponseHeaders(transformedResponseHeader): _*)
}
}
}
private def logException(exc: Throwable, descriptor: Descriptor, call: Call[_, _]) = {
def log = Logger(descriptor.name)
val cause = exc match {
case c: CompletionException => c.getCause
case e => e
}
cause match {
case _: NotFound | _: Forbidden => // no logging
case e @ (_: UnsupportedMediaType | _: PayloadTooLarge | _: NotAcceptable) =>
log.warn(e.getMessage)
case e =>
log.error(s"Exception in ${call.callId()}", e)
}
}
/**
* Converts an exception to a result, using the configured exception serializer.
*/
private def exceptionToResult(exceptionSerializer: ExceptionSerializer, requestHeader: RequestHeader, e: Throwable): Result = {
val rawExceptionMessage = exceptionSerializer.serialize(e, requestHeader.acceptedResponseProtocols)
val responseHeader = descriptor.headerFilter.transformServerResponse(new ResponseHeader(
rawExceptionMessage.errorCode.http,
rawExceptionMessage.protocol,
HashTreePMap.empty()
), requestHeader)
Results.Status(responseHeader.status).sendEntity(Strict(
rawExceptionMessage.message,
responseHeader.protocol.toContentTypeHeader.asScala
)).withHeaders(toResponseHeaders(responseHeader): _*)
}
/**
* Convert a Play (Scala) request header to a Lagom request header.
*/
private def toRequestHeader(rh: PlayRequestHeader): RequestHeader = {
val requestHeader = new RequestHeader(
new Method(rh.method),
URI.create(rh.uri),
MessageProtocol.fromContentTypeHeader(rh.headers.get(HeaderNames.CONTENT_TYPE).asJava),
TreePVector.from(rh.acceptedTypes.map { mediaType =>
new MessageProtocol(
Optional.of(s"${mediaType.mediaType}/${mediaType.mediaSubType}"),
mediaType.parameters.find(_._1 == "charset").flatMap(_._2).asJava, Optional.empty()
)
}.asJava),
Optional.empty(),
rh.headers.toMap.foldLeft(HashTreePMap.empty[String, PSequence[String]]) {
case (map, (name, values)) => map.plus(name, TreePVector.from(values.asJava))
}
)
descriptor.headerFilter.transformServerRequest(requestHeader)
}
/**
* Convert a Lagom response header to a Play response header map.
*/
private def toResponseHeaders(responseHeader: ResponseHeader) = {
responseHeader.headers().asScala.toSeq.filter(!_._2.isEmpty).map {
case (key, values) => key -> values.asScala.head
}
}
/**
* Handle a service call as a WebSocket.
*/
private def websocket[Request, Response](call: Call[Request, Response], descriptor: Descriptor,
requestHeader: RequestHeader, serviceCall: ServiceCall[Request, Response]): WebSocket = WebSocket.acceptOrResult { rh =>
val requestProtocol = requestHeader.protocol
val acceptHeaders = requestHeader.acceptedResponseProtocols
// We need to return a future. Also, we need to handle any exceptions thrown. By doing this asynchronously, we can
// ensure all exceptions are handled in one place, in the future recover block.
Future {
// A promise for request body, which may be a stream or a single message, depending on the service call.
// This will be redeemed by the incoming sink, and on redemption, we'll be able to invoke the service call.
val requestPromise = Promise[Request]()
// This promise says when the incoming stream has cancelled. We block the cancel of the incoming stream,
// but and don't actually close the socket until the outgoing stream finishes. However, for strict outgoing
// responses, that will be immediately after that response has been sent, so in that case we need to ensure
// that that outgoing stream close is delayed until the incoming cancels.
val incomingCancelled = Promise[None.type]()
val requestMessageDeserializer = call.requestSerializer.deserializer(requestProtocol)
val responseMessageSerializer = call.responseSerializer.serializerForResponse(acceptHeaders)
// The incoming sink is the sink that we're going to return to Play to handle incoming websocket messages.
val incomingSink: Sink[ByteString, _] = call.requestSerializer match {
// If it's a strict message serializer, we return a sink that reads one message, deserializes that message, and
// then redeems the request promise with that message.
case strict: StrictMessageSerializer[Request] =>
val deserializer = requestMessageDeserializer.asInstanceOf[NegotiatedDeserializer[Request, ByteString]]
if (strict.isUsed) {
AkkaStreams.ignoreAfterCancellation[ByteString]
.toMat(Sink.headOption)(Keep.right)
.mapMaterializedValue(_.map { maybeBytes =>
val bytes = maybeBytes.getOrElse(ByteString.empty)
requestPromise.complete(Try(deserializer.deserialize(bytes)))
incomingCancelled.success(None)
})
} else {
// If it's not used, don't wait for the first message (because no message will come), just ignore the
// whole stream
requestPromise.complete(Try(deserializer.deserialize(ByteString.empty)))
incomingCancelled.success(None)
Sink.ignore
}
// If it's a streamed message serializer, we return a sink that when materialized (which effectively represents
// when the WebSocket handshake is complete), will redeem the request promise with a source that is hooked up
// directly to this sink.
case streamed: StreamedMessageSerializer[_] =>
val deserializer = requestMessageDeserializer.asInstanceOf[NegotiatedDeserializer[Request, JSource[ByteString, _]]]
val captureCancel = Flow[ByteString].transform(() => new PushStage[ByteString, ByteString] {
override def onDownstreamFinish(ctx: Context[ByteString]): TerminationDirective = {
incomingCancelled.success(None)
ctx.finish()
}
override def onPush(elem: ByteString, ctx: Context[ByteString]): SyncDirective = ctx.push(elem)
})
AkkaStreams.ignoreAfterCancellation via captureCancel to Sink.asPublisher[ByteString](fanout = false).mapMaterializedValue { publisher =>
requestPromise.complete(Try(deserializer.deserialize(JSource.fromPublisher(publisher))))
}
}
// The outgoing source is the source that we're going to return to Play to produce outgoing websocket messages.
val outgoingSource: Source[ByteString, _] = Source.asSubscriber[ByteString].mapMaterializedValue { subscriber =>
(for {
// First we need to get the request
request <- requestPromise.future
// Then we can invoke the service call
(responseHeader, response) <- invokeServiceCall(serviceCall, requestHeader, request)
} yield {
if (responseHeader != ResponseHeader.OK) {
Logger.warn("Response header contains a custom status code and/or custom protocol and/or custom headers, " +
"but this was invoked by a transport (eg WebSockets) that does not allow sending custom headers. " +
"This response header will be ignored: " + responseHeader)
}
val outgoingSource = call.responseSerializer() match {
// If strict, then the source will be a single source of the response message, concatenated with a lazy
// empty source so that the incoming stream is still able to receive messages.
case strict: StrictMessageSerializer[Response] =>
val serializer = responseMessageSerializer.asInstanceOf[NegotiatedSerializer[Response, ByteString]]
Source.single(serializer.serialize(response)).concat(
// The outgoing is responsible for closing, however when the response is strict, this needs to be in
// response to the incoming cancelling, since otherwise it will always close immediately after
// sending the strict response. We can't just let the incoming cancel directly, because that
// introduces a race condition, the strict message from the Source.single may not reach the connection
// before the cancel is received and closes the connection.
Source.maybe[ByteString].mapMaterializedValue(_.completeWith(incomingCancelled.future))
)
// If streamed, then the source is just the source stream.
case streamed: StreamedMessageSerializer[Response] =>
val serializer = responseMessageSerializer.asInstanceOf[NegotiatedSerializer[Response, JSource[ByteString, _]]]
serializer.serialize(response).asScala
}
// Connect the source to the subscriber
outgoingSource.runWith(Sink.fromSubscriber(subscriber))
}).recover {
case NonFatal(e) =>
Source.failed(e).runWith(Sink.fromSubscriber(subscriber))
}
}
// Todo: Add headers/content-type to response when https://github.com/playframework/playframework/issues/5322 is
// implemented
// First, a flow that converts Play WebSocket messages to ByteStrings. Then it goes through our incomingSink and
// outgoingSource, then gets mapped back to Play WebSocket messages.
Right(Flow[Message].collect {
case TextMessage(text) => ByteString(text)
case BinaryMessage(bytes) => bytes
case CloseMessage(statusCode, reason) if statusCode.exists(_ != 1000) =>
// This is an error, deserialize and throw
val messageBytes = if (requestProtocol.isText) {
ByteString(reason, requestProtocol.charset.get)
} else {
Try(ByteString(Base64.getDecoder.decode(reason))).toOption.getOrElse(ByteString(reason))
}
val rawExceptionMessage = new RawExceptionMessage(
TransportErrorCode.fromWebSocket(statusCode.get),
requestProtocol, messageBytes
)
throw descriptor.exceptionSerializer.deserialize(rawExceptionMessage)
} via Flow.fromSinkAndSource(incomingSink, outgoingSource) via Flow[ByteString].map { bytes =>
if (responseMessageSerializer.protocol.isText) {
TextMessage(bytes.decodeString(responseMessageSerializer.protocol.charset.get))
} else {
BinaryMessage(bytes)
}
}.recover {
case NonFatal(e) =>
logException(e, descriptor, call)
exceptionToCloseMessage(e, acceptHeaders)
})
}.recover {
case NonFatal(e) =>
logException(e, descriptor, call)
Left(exceptionToResult(descriptor.exceptionSerializer, requestHeader, e))
}
}
/** Convert an exception to a close message */
private def exceptionToCloseMessage(exception: Throwable, acceptHeaders: PSequence[MessageProtocol]) = {
// First attempt to serialize the exception using the exception serializer
val rawExceptionMessage = descriptor.exceptionSerializer.serialize(exception, acceptHeaders)
val safeExceptionMessage = if (rawExceptionMessage.message().size > WebSocketControlFrameMaxLength) {
// If the serializer produced an error message that was too big for WebSockets, fall back to a simpler error
// message.
val truncatedExceptionMessage = descriptor.exceptionSerializer.serialize(
new TransportException(
rawExceptionMessage.errorCode(),
new ExceptionMessage("Error message truncated", "")
), acceptHeaders
)
// It may be that the serialized exception message with no detail is still too big for a WebSocket, fall back to
// plain text message.
if (truncatedExceptionMessage.message().size > WebSocketControlFrameMaxLength) {
new RawExceptionMessage(
rawExceptionMessage.errorCode(),
new MessageProtocol().withContentType("text/plain").withCharset("utf-8"),
ByteString.fromString("Error message truncated")
)
} else truncatedExceptionMessage
} else rawExceptionMessage
CloseMessage(Some(safeExceptionMessage.errorCode().webSocket()), safeExceptionMessage.messageAsText())
}
/**
* Supply the request header to the service call
*/
def invokeServiceCall[Request, Response](
serviceCall: ServiceCall[Request, Response],
requestHeader: RequestHeader, request: Request
): Future[(ResponseHeader, Response)] = {
serviceCall match {
case play: PlayServiceCall[_, _] =>
throw new IllegalStateException("Can't invoke a Play service call for WebSockets or as a service call passed in by another Play service call: " + play)
case _ =>
serviceCall.handleRequestHeader(new JFunction[RequestHeader, RequestHeader] {
override def apply(t: RequestHeader) = requestHeader
}).handleResponseHeader(new BiFunction[ResponseHeader, Response, (ResponseHeader, Response)] {
override def apply(header: ResponseHeader, response: Response) = header -> response
}).invoke(request).toScala
}
}
}
|
#!/bin/bash
set -e
# Credentials
azureResourceGroup='ignitemod10'
adminUser='demouser'
adminPassword='Azuredemo@2020'
subname='11321b33-1608-46d8-8a51-5d32cdd1b38a'
location='southeastasia'
# DB Name
cosmosdbname='ignitemod10-cosmos-money'
sqldbname='ignitemod10-sql-money'
# Create Resource Group
az group create --subscription $subname --name $azureResourceGroup --location $location
# Create Azure Cosmos DB
az cosmosdb create --name $cosmosdbname --resource-group $azureResourceGroup --kind MongoDB --subscription $subname
cosmosConnectionString=$(az cosmosdb list-connection-strings --name $cosmosdbname --resource-group $azureResourceGroup --query connectionStrings[0].connectionString -o tsv --subscription $subname)
# Create Azure SQL Insance
az sql server create --location $location --resource-group $azureResourceGroup --name $sqldbname --admin-user $adminUser --admin-password $adminPassword --subscription $subname
az sql server firewall-rule create --resource-group $azureResourceGroup --server $sqldbname --name azure --start-ip-address 0.0.0.0 --end-ip-address 0.0.0.0 --subscription $subname
az sql db create --resource-group $azureResourceGroup --server $sqldbname --name tailwind --subscription $subname
sqlConnectionString=$(az sql db show-connection-string --server $sqldbname --name tailwind -c ado.net --subscription $subname)
echo $cosmosConnectionString
echo $sqlConnectionString
|
// ============================================================================
// Interview Problem: StackQueue
// ============================================================================
//
// -------
// Prompt:
// -------
//
// Implement your preferred Stack implementation, including the methods:
//
// - Push
// - Pop
// - Size
//
// Then, implement a Queue by instantiating two Stack instances for storage.
//
// The StackQueue implementation should include the following methods:
//
// - Enqueue
// - Dequeue
// - Size
//
// -----------
// Let's code!
// -----------
class Node {
// TODO: Implement the Node class!
constructor(val) {
this.value = val;
this.next = null;
}
}
class Stack {
// TODO: Implement the Stack class!
constructor() {
this.top = null;
this.bottom = null;
this.length = 0;
}
push(node) {
if (this.length) {
node.next = this.top;
} else {
this.bottom = node;
}
this.top = node;
this.length++;
return this.length;
}
pop() {
if (!this.length) return null;
const popped = this.top;
this.top = this.top.next;
this.length--;
if (!this.length) {
this.top = null;
this.bottom = null;
}
return popped;
}
size() {
return this.length;
}
}
class StackQueue {
// TODO: Implement the StackQueue class!
constructor() {
this.front = null;
this.back = null;
this.inStack = new Stack();
this.outStack = new Stack();
}
enqueue(val) {
const newNode = new Node(val);
if (!this.front) {
this.front = newNode;
this.back = newNode;
} else {
this.back.next = newNode;
this.back = newNode;
}
this.inStack.push(new Node(val));
return this.size();
}
dequeue() {
if (!this.front) {
return null;
} else if (this.size() === 1) {
this.front = null;
this.back = null;
} else {
this.front = this.front.next;
}
if (this.outStack.size() === 0) {
while (this.inStack.size() > 0) {
this.outStack.push(this.inStack.pop());
}
}
return this.outStack.pop();
}
size() {
return this.inStack.size() + this.outStack.size();
}
}
exports.Node = Node;
exports.Stack = Stack;
exports.StackQueue = StackQueue;
|
# Path to this plugin
PROTOC_GEN_TS_PATH="./node_modules/.bin/protoc-gen-ts"
# Directory to write generated code to (.js and .d.ts files)
OUT_DIR="./src/types/proto"
mkdir -p $OUT_DIR
protoc \
--plugin="protoc-gen-ts=${PROTOC_GEN_TS_PATH}" \
--js_out="import_style=commonjs,binary:${OUT_DIR}" \
--ts_out="service=grpc-web:${OUT_DIR}" \
-I../core/proto \
../core/proto/*.proto
|
<gh_stars>1-10
const run = require("./code/run");
module.exports = run;
|
// https://open.kattis.com/problems/maxcolinear
#include <bits/stdc++.h>
using namespace std;
using ii = tuple<int, int>;
using vii = vector<ii>;
int main() {
ios::sync_with_stdio(0);
cin.tie(0);
while (true) {
int n, x, y;
cin >> n;
if (!n) break;
vii a(n);
for (int i = 0; i < n; i++) {
cin >> x >> y;
a[i] = {x, y};
}
int m = 1;
for (int i = 0; i < n; i++) {
int x0, y0, x1, y1;
tie(x0, y0) = a[i];
for (int j = i + 1; j < n; j++) {
int c = 2;
tie(x1, y1) = a[j];
int x = x1 - x0, y = y1 - y0;
for (int k = j + 1; k < n; k++) {
tie(x1, y1) = a[k];
if (!(x * (y1 - y0) - (x1 - x0) * y)) c++;
}
if (c > m) m = c;
}
}
cout << m << "\n";
}
}
|
<filename>src/application/timeline/Channel.ts<gh_stars>1-10
import { ChannelId, UserId } from "../../domain/types"
import { IChannelTimelineQueryRepository, Parameters } from "../../domain/repository/query/ChannelTimeline"
import { ChannelReadStateEntity } from "../../domain/entity/ChannelReadState"
import { IChannelReadStateCommandRepository } from "../../domain/repository/command/ChannelReadState"
import { IChannelReadStateQueryRepository } from "../../domain/repository/query/ChannelReadState"
import { MessageEntity } from "../../domain/entity/Message"
import { ErrorCodes as ServiceErrorCodes } from "../../domain/permission/CreateChannel"
export const ErrorCodes = {
InternalError: "internal_error",
NameNotMeetPolicy: "name_not_meet_policy",
ParentNotFound: "parent_not_found",
...ServiceErrorCodes,
} as const
const getLatestMessage = (messageList: MessageEntity[]) => {
if (messageList.length == 1) {
return messageList[0]
}
const a = messageList[0]
const b = messageList[messageList.length - 1]
if (a.createdAt.getTime() > b.createdAt.getTime()) {
return a
}
return b
}
export class ChannelTimelineApplication {
private channelTimelineQueryRepository: IChannelTimelineQueryRepository
private channelReadStateQueryRepository: IChannelReadStateQueryRepository
private channelReadStateCommandRepository: IChannelReadStateCommandRepository
constructor(
channelTimelineQueryRepository: IChannelTimelineQueryRepository,
channelReadStateQueryRepository: IChannelReadStateQueryRepository,
channelReadStateCommandRepository: IChannelReadStateCommandRepository
) {
this.channelTimelineQueryRepository = channelTimelineQueryRepository
this.channelReadStateQueryRepository = channelReadStateQueryRepository
this.channelReadStateCommandRepository = channelReadStateCommandRepository
}
async listMessage({
userId,
channelId,
limit,
sortOrder,
maxId,
sinceId,
}: { userId: UserId; channelId: ChannelId } & Parameters) {
const messageList = await this.channelTimelineQueryRepository.listMessage({
channelId,
maxId,
sinceId,
limit,
sortOrder,
})
if (messageList.length > 0) {
const lastMessage = getLatestMessage(messageList)
const oldReadState = await this.channelReadStateQueryRepository.find(channelId, userId)
const newReadState = new ChannelReadStateEntity({
id: oldReadState ? oldReadState.id : -1,
channelId,
userId,
lastMessageId: lastMessage.id,
lastMessageCreatedAt: lastMessage.createdAt,
})
if (oldReadState == null) {
await this.channelReadStateCommandRepository.add(newReadState)
} else {
if (newReadState.lastMessageCreatedAt.getTime() > oldReadState.lastMessageCreatedAt.getTime()) {
await this.channelReadStateCommandRepository.update(newReadState)
}
}
}
return messageList
}
}
|
#!/bin/bash
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. ${HOME}/etc/shell.conf
PLANET=${1:-earth}
# set the planet using both old:
lg-run "echo $PLANET >/lg/planet"
# and new methods:
lg-run "echo \"planet=$PLANET\" >/tmp/query.txt"
|
#include <bits/stdc++.h>
using namespace std;
int max_sum_subarray(int arr[], int size)
{
int window_sum, max_sum = 0;
for (int start = 0; start < size - 3; start++) {
window_sum = arr[start] + arr[start+1] + arr[start+2] + arr[start+3];
max_sum = max(window_sum, max_sum);
}
return max_sum;
}
int main()
{
int arr[]={-1,4,3,2,-5,6};
int size = sizeof(arr)/sizeof(arr[0]);
cout<<max_sum_subarray(arr, size);
return 0;
} |
#!/bin/bash
set -e
EXIT_STATUS=0
git config --global user.name "$GIT_NAME"
git config --global user.email "$GIT_EMAIL"
git config --global credential.helper "store --file=~/.git-credentials"
echo "https://$GH_TOKEN:@github.com" > ~/.git-credentials
if [[ $EXIT_STATUS -eq 0 ]]; then
if [[ -n $TRAVIS_TAG ]]; then
echo "Skipping Tests to Publish Release"
./gradlew pTML assemble --no-daemon || EXIT_STATUS=$?
else
./gradlew --stop
./gradlew testClasses --no-daemon || EXIT_STATUS=$?
./gradlew --stop
./gradlew check --no-daemon || EXIT_STATUS=$?
fi
fi
if [[ $EXIT_STATUS -eq 0 ]]; then
echo "Publishing archives for branch $TRAVIS_BRANCH"
if [[ -n $TRAVIS_TAG ]] || [[ $TRAVIS_BRANCH =~ ^master$ && $TRAVIS_PULL_REQUEST == 'false' ]]; then
echo "Publishing archives"
./gradlew --stop
if [[ -n $TRAVIS_TAG ]]; then
./gradlew bintrayUpload --no-daemon --stacktrace || EXIT_STATUS=$?
if [[ $EXIT_STATUS -eq 0 ]]; then
./gradlew synchronizeWithMavenCentral --no-daemon
fi
else
./gradlew publish --no-daemon --stacktrace || EXIT_STATUS=$?
fi
if [[ $EXIT_STATUS -eq 0 ]]; then
./gradlew --console=plain --no-daemon docs || EXIT_STATUS=$?
git clone https://${GH_TOKEN}@github.com/micronaut-projects/micronaut-test.git -b gh-pages gh-pages --single-branch > /dev/null
cd gh-pages
# If this is the master branch then update the snapshot
if [[ $TRAVIS_BRANCH =~ ^master|[12]\..\.x$ ]]; then
mkdir -p snapshot
cp -r ../build/docs/. ./snapshot/
git add snapshot/*
fi
# If there is a tag present then this becomes the latest
if [[ -n $TRAVIS_TAG ]]; then
mkdir -p latest
cp -r ../build/docs/. ./latest/
git add latest/*
version="$TRAVIS_TAG"
version=${version:1}
majorVersion=${version:0:4}
majorVersion="${majorVersion}x"
mkdir -p "$version"
cp -r ../build/docs/. "./$version/"
git add "$version/*"
mkdir -p "$majorVersion"
cp -r ../build/docs/. "./$majorVersion/"
git add "$majorVersion/*"
fi
git commit -a -m "Updating docs for Travis build: https://travis-ci.org/$TRAVIS_REPO_SLUG/builds/$TRAVIS_BUILD_ID" && {
git push origin HEAD || true
}
cd ..
rm -rf gh-pages
fi
fi
fi
exit $EXIT_STATUS
|
#!/bin/sh
# Copyright (c) 2015-2019 Contributors as noted in the AUTHORS file
#
# This file is part of Solo5, a sandboxed execution environment.
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
# AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
prog_NAME="$(basename $0)"
die()
{
echo "${prog_NAME}: ERROR: $@" 1>&2
exit 1
}
warn()
{
echo "${prog_NAME}: WARNING: $@" 1>&2
}
cc_maybe_gcc()
{
${CC} -dM -E - </dev/null | grep -Eq '^#define __GNUC__ [4-9]$'
}
cc_is_clang()
{
${CC} -dM -E - </dev/null | grep -Eq '^#define __clang__ 1$'
}
cc_has_pie()
{
${CC} -dM -E - </dev/null | grep -Eq '^#define __PIE__ [1-9]$'
}
cc_is_gcc()
{
cc_maybe_gcc && ! cc_is_clang
}
gcc_check_option()
{
${CC} "$@" -x c -c -o /dev/null - <<EOM >/dev/null 2>&1
int main(int argc, char *argv[])
{
return 0;
}
EOM
}
gcc_check_header()
{
${CC} ${PKG_CFLAGS} -x c -o /dev/null - <<EOM >/dev/null 2>&1
#include <$@>
int main(int argc, char *argv[])
{
return 0;
}
EOM
}
gcc_check_lib()
{
${CC} -x c -o /dev/null - "$@" ${PKG_LIBS} <<EOM >/dev/null 2>&1
int main(int argc, char *argv[])
{
return 0;
}
EOM
}
ld_is_lld()
{
${LD} --version 2>&1 | grep -q '^LLD'
}
config_host_linux()
{
# On Linux/gcc we use -nostdinc and copy all the gcc-provided headers.
cc_is_gcc || die "Only 'gcc' 4.x+ is supported on Linux"
CC_INCDIR=$(${CC} -print-file-name=include)
[ -d "${CC_INCDIR}" ] || die "Cannot determine gcc include directory"
mkdir -p ${HOST_INCDIR}
cp -R ${CC_INCDIR}/. ${HOST_INCDIR}
MAKECONF_CFLAGS="-nostdinc"
# Recent distributions now default to PIE enabled. Disable it explicitly
# if that's the case here.
# XXX: This breaks MirageOS in (at least) the build of mirage-solo5 due
# to -fno-pie breaking the build of lib/dllmirage-solo5_bindings.so.
# Keep this disabled until that is resolved.
# cc_has_pie && MAKECONF_CFLAGS="${MAKECONF_CFLAGS} -fno-pie"
# Stack smashing protection:
#
# Any GCC configured for a Linux/x86_64 target (actually, any
# glibc-based target) will use a TLS slot to address __stack_chk_guard.
# Disable this behaviour and use an ordinary global variable instead.
if [ "${CONFIG_ARCH}" = "x86_64" ] || [ "${CONFIG_ARCH}" = "ppc64le" ]; then
gcc_check_option -mstack-protector-guard=global || \
die "GCC 4.9.0 or newer is required for -mstack-protector-guard= support"
MAKECONF_CFLAGS="${MAKECONF_CFLAGS} -mstack-protector-guard=global"
fi
# If the host toolchain is NOT configured to build PIE exectuables by
# default, assume it has no support for that and apply a workaround by
# locating the spt tender starting at a virtual address of 1 GB.
if ! cc_has_pie; then
warn "Host toolchain does not build PIE executables, spt guest size will be limited to 1GB"
warn "Consider upgrading to a Linux distribution with PIE support"
CONFIG_SPT_NO_PIE=1
fi
[ -n "${OPT_ONLY_TOOLS}" ] && return
CONFIG_HVT=1
CONFIG_SPT=1
if ! command -v pkg-config >/dev/null; then
die "pkg-config is required"
fi
if ! pkg-config libseccomp; then
die "libseccomp development headers are required"
else
if ! pkg-config --atleast-version=2.3.3 libseccomp; then
# TODO Make this a hard error once there are no distros with
# libseccomp < 2.3.3 in the various CIs.
warn "libseccomp >= 2.3.3 is required for correct spt operation"
warn "Proceeding anyway, expect tests to fail"
elif ! pkg-config --atleast-version=2.4.1 libseccomp; then
warn "libseccomp < 2.4.1 has known vulnerabilities"
warn "Proceeding anyway, but consider upgrading"
fi
MAKECONF_SPT_CFLAGS=$(pkg-config --cflags libseccomp)
MAKECONF_SPT_LDLIBS=$(pkg-config --libs libseccomp)
fi
if ! PKG_CFLAGS=${MAKECONF_SPT_CFLAGS} gcc_check_header seccomp.h; then
die "Could not compile with seccomp.h"
fi
if ! PKG_LIBS=${MAKECONF_SPT_LIBS} gcc_check_lib -lseccomp; then
die "Could not link with -lseccomp"
fi
[ "${CONFIG_ARCH}" = "x86_64" ] && CONFIG_VIRTIO=1
[ "${CONFIG_ARCH}" = "x86_64" ] && CONFIG_MUEN=1
[ "${CONFIG_ARCH}" = "x86_64" ] && CONFIG_GENODE=1
[ "${CONFIG_ARCH}" = "ppc64le" ] && CONFIG_HVT=
}
config_host_freebsd()
{
# On FreeBSD/clang we use -nostdlibinc which gives us access to the
# clang-provided headers for compiler instrinsics. We copy the rest
# (std*.h, float.h and their dependencies) from the host.
cc_is_clang || die "Only 'clang' is supported on FreeBSD"
[ "${CONFIG_ARCH}" = "x86_64" ] ||
die "Only 'x86_64' is supported on FreeBSD"
INCDIR=/usr/include
SRCS_MACH="machine/_stdint.h machine/_types.h machine/endian.h \
machine/_limits.h"
SRCS_SYS="sys/_null.h sys/_stdint.h sys/_types.h sys/cdefs.h \
sys/endian.h sys/_stdarg.h"
SRCS_X86="x86/float.h x86/_stdint.h x86/stdarg.h x86/endian.h \
x86/_types.h x86/_limits.h"
SRCS="float.h osreldate.h stddef.h stdint.h stdbool.h stdarg.h"
mkdir -p ${HOST_INCDIR}
mkdir -p ${HOST_INCDIR}/machine ${HOST_INCDIR}/sys ${HOST_INCDIR}/x86
for f in ${SRCS_MACH}; do cp -f ${INCDIR}/$f ${HOST_INCDIR}/machine; done
for f in ${SRCS_SYS}; do cp -f ${INCDIR}/$f ${HOST_INCDIR}/sys; done
for f in ${SRCS_X86}; do cp -f ${INCDIR}/$f ${HOST_INCDIR}/x86; done
for f in ${SRCS}; do cp -f ${INCDIR}/$f ${HOST_INCDIR}; done
# Stack smashing protection:
#
# FreeBSD toolchains use a global (non-TLS) __stack_chk_guard by
# default on x86_64, so there is nothing special we need to do here.
MAKECONF_CFLAGS="-nostdlibinc"
[ -n "${OPT_ONLY_TOOLS}" ] && return
CONFIG_HVT=1
CONFIG_SPT=
[ "${CONFIG_ARCH}" = "x86_64" ] && CONFIG_VIRTIO=1
[ "${CONFIG_ARCH}" = "x86_64" ] && CONFIG_MUEN=1
CONFIG_GENODE=
}
config_host_openbsd()
{
# On OpenBSD/clang we use -nostdlibinc which gives us access to the
# clang-provided headers for compiler instrinsics. We copy the rest
# (std*.h, cdefs.h and their dependencies) from the host.
cc_is_clang || die "Only 'clang' is supported on OpenBSD"
[ "${CONFIG_ARCH}" = "x86_64" ] ||
die "Only 'x86_64' is supported on OpenBSD"
if ! ld_is_lld; then
LD='/usr/bin/ld.lld'
warn "Using GNU 'ld' is not supported on OpenBSD"
warn "Falling back to 'ld.lld'"
[ -e ${LD} ] || die "/usr/bin/ld.lld does not exist"
fi
INCDIR=/usr/include
SRCS_MACH="machine/_float.h machine/endian.h machine/cdefs.h machine/_types.h"
SRCS_SYS="sys/_null.h sys/cdefs.h sys/_endian.h sys/endian.h sys/_types.h"
SRCS_AMD64="amd64/_float.h amd64/stdarg.h amd64/endian.h"
SRCS="float.h stddef.h stdint.h stdbool.h stdarg.h"
mkdir -p ${HOST_INCDIR}
mkdir -p ${HOST_INCDIR}/machine ${HOST_INCDIR}/sys ${HOST_INCDIR}/amd64
for f in ${SRCS_MACH}; do cp -f ${INCDIR}/$f ${HOST_INCDIR}/machine; done
for f in ${SRCS_SYS}; do cp -f ${INCDIR}/$f ${HOST_INCDIR}/sys; done
for f in ${SRCS_AMD64}; do cp -f ${INCDIR}/$f ${HOST_INCDIR}/amd64; done
for f in ${SRCS}; do cp -f ${INCDIR}/$f ${HOST_INCDIR}; done
MAKECONF_CFLAGS="-mno-retpoline -fno-ret-protector -nostdlibinc"
MAKECONF_LDFLAGS="-nopie"
[ -n "${OPT_ONLY_TOOLS}" ] && return
CONFIG_HVT=1
CONFIG_SPT=
[ "${CONFIG_ARCH}" = "x86_64" ] && CONFIG_VIRTIO=1
[ "${CONFIG_ARCH}" = "x86_64" ] && CONFIG_MUEN=1
CONFIG_GENODE=
}
# Check for a tools-only build.
OPT_ONLY_TOOLS=
if [ -n "$1" ]; then
if [ "$1" = "--only-tools" ]; then
OPT_ONLY_TOOLS=1
else
die "usage: configure.sh [ --only-tools ]"
fi
fi
# Allow external override of CC.
CC=${CC:-cc}
LD=${LD:-ld}
CC_MACHINE=$(${CC} -dumpmachine)
[ $? -ne 0 ] &&
die "Could not run '${CC} -dumpmachine', is your compiler working?"
# Determine HOST and ARCH based on what the toolchain reports.
case ${CC_MACHINE} in
x86_64-*linux*)
CONFIG_ARCH=x86_64 CONFIG_HOST=Linux
CONFIG_GUEST_PAGE_SIZE=0x1000
;;
aarch64-*linux*)
CONFIG_ARCH=aarch64 CONFIG_HOST=Linux
CONFIG_GUEST_PAGE_SIZE=0x1000
;;
powerpc64le-*linux*|ppc64le-*linux*)
CONFIG_ARCH=ppc64le CONFIG_HOST=Linux
CONFIG_GUEST_PAGE_SIZE=0x10000
;;
x86_64-*freebsd*)
CONFIG_ARCH=x86_64 CONFIG_HOST=FreeBSD
CONFIG_GUEST_PAGE_SIZE=0x1000
;;
amd64-*openbsd*)
CONFIG_ARCH=x86_64 CONFIG_HOST=OpenBSD
CONFIG_GUEST_PAGE_SIZE=0x1000
;;
*)
die "Unsupported toolchain target: ${CC_MACHINE}"
;;
esac
# Host-provided header files are installed here for in-tree builds. OPAM will
# install these to $(OPAM_INCDIR)/host where they will be picked up by
# pkg-config.
HOST_INCDIR=${PWD}/include/crt
CONFIG_HVT=
CONFIG_SPT=
CONFIG_VIRTIO=
CONFIG_MUEN=
CONFIG_GENODE=
MAKECONF_CFLAGS=
MAKECONF_LDFLAGS=
MAKECONF_SPT_CFLAGS=
MAKECONF_SPT_LDLIBS=
CONFIG_SPT_NO_PIE=
case "${CONFIG_HOST}" in
Linux)
config_host_linux
;;
FreeBSD)
config_host_freebsd
;;
OpenBSD)
config_host_openbsd
;;
*)
die "Unsupported build OS: ${CONFIG_HOST}"
;;
esac
# WARNING:
#
# The generated Makeconf is dual-use! It is both sourced by GNU make, and by
# the test suite. As such, a subset of this file must parse in both shell *and*
# GNU make. Given the differences in quoting rules between the two
# (unable to sensibly use VAR="VALUE"), our convention is as follows:
#
# 1. GNU make parses the entire file, i.e. all variables defined below are
# available to Makefiles.
#
# 2. Shell scripts parse the subset of *lines* starting with "CONFIG_". I.e.
# only variables named "CONFIG_..." are available. When adding new variables
# to this group you must ensure that they do not contain more than a single
# "word".
#
# Please do NOT add variable names with new prefixes without asking first.
#
cat <<EOM >Makeconf
# Generated by configure.sh $@, using CC=${CC} for target ${CC_MACHINE}
CONFIG_HVT=${CONFIG_HVT}
CONFIG_SPT=${CONFIG_SPT}
CONFIG_VIRTIO=${CONFIG_VIRTIO}
CONFIG_MUEN=${CONFIG_MUEN}
CONFIG_GENODE=${CONFIG_GENODE}
MAKECONF_CFLAGS=${MAKECONF_CFLAGS}
MAKECONF_LDFLAGS=${MAKECONF_LDFLAGS}
CONFIG_ARCH=${CONFIG_ARCH}
CONFIG_HOST=${CONFIG_HOST}
CONFIG_GUEST_PAGE_SIZE=${CONFIG_GUEST_PAGE_SIZE}
MAKECONF_CC=${CC}
MAKECONF_LD=${LD}
MAKECONF_SPT_CFLAGS=${MAKECONF_SPT_CFLAGS}
MAKECONF_SPT_LDLIBS=${MAKECONF_SPT_LDLIBS}
CONFIG_SPT_NO_PIE=${CONFIG_SPT_NO_PIE}
EOM
echo "${prog_NAME}: Configured for ${CC_MACHINE}."
echo -n "${prog_NAME}: Enabled targets:"
[ -n "${CONFIG_HVT}" ] && echo -n " hvt"
[ -n "${CONFIG_SPT}" ] && echo -n " spt"
[ -n "${CONFIG_VIRTIO}" ] && echo -n " virtio"
[ -n "${CONFIG_MUEN}" ] && echo -n " muen"
[ -n "${CONFIG_GENODE}" ] && echo -n " genode"
echo "."
|
import { tsx, create } from "@dojo/framework/core/vdom";
import App from "../app";
import { Window } from "../interfaces";
// 定义部件的属性
export interface PageProperties extends Window {}
const factory = create().properties<PageProperties>();
// 定义部件
export default factory(function Page({ children, properties }) {
const { navigationBarBackgroundColor = "#000000", navigationBarTitleText } = properties();
const window = { navigationBarBackgroundColor, navigationBarTitleText };
return (
<App window={window}>
<div>{children()}</div>
</App>
);
});
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.close = void 0;
var close = {
"viewBox": "0 0 512 512",
"children": [{
"name": "path",
"attribs": {
"d": "M443.6,387.1L312.4,255.4l131.5-130c5.4-5.4,5.4-14.2,0-19.6l-37.4-37.6c-2.6-2.6-6.1-4-9.8-4c-3.7,0-7.2,1.5-9.8,4\r\n\tL256,197.8L124.9,68.3c-2.6-2.6-6.1-4-9.8-4c-3.7,0-7.2,1.5-9.8,4L68,105.9c-5.4,5.4-5.4,14.2,0,19.6l131.5,130L68.4,387.1\r\n\tc-2.6,2.6-4.1,6.1-4.1,9.8c0,3.7,1.4,7.2,4.1,9.8l37.4,37.6c2.7,2.7,6.2,4.1,9.8,4.1c3.5,0,7.1-1.3,9.8-4.1L256,313.1l130.7,131.1\r\n\tc2.7,2.7,6.2,4.1,9.8,4.1c3.5,0,7.1-1.3,9.8-4.1l37.4-37.6c2.6-2.6,4.1-6.1,4.1-9.8C447.7,393.2,446.2,389.7,443.6,387.1z"
},
"children": []
}]
};
exports.close = close; |
<filename>cache_content_imported_object.go<gh_stars>1000+
package gopdf
import (
"fmt"
"io"
)
type cacheContentImportedTemplate struct {
pageHeight float64
tplName string
scaleX float64
scaleY float64
tX float64
tY float64
}
func (c *cacheContentImportedTemplate) write(w io.Writer, protection *PDFProtection) error {
c.tY += c.pageHeight
fmt.Fprintf(w, "q 0 J 1 w 0 j 0 G 0 g q %.4F 0 0 %.4F %.4F %.4F cm %s Do Q Q\n", c.scaleX, c.scaleY, c.tX, c.tY, c.tplName)
return nil
}
|
<gh_stars>1-10
// Testing for foods router
const request = require('supertest');
const db = require('../../data/dbConfig.js');
const FoodRouter = require('./food-model.js');
// This is in the testing layout stages Not sure if I should test all endpoints since I'm testing the models.
// Mostly setting up to see about looking for glitches. Or see if something needs touching up to prevent errors.
describe('Foods Router', () => {
beforeEach(async () => {
// wipe the database
await db('foods').truncate()
})
//#region - READ
// TODO: GET all - simple
describe('test get`/api/food` ', () => {
it('shouldDoThis', async () => {
// Expected Input
// call function ->
// expected results ->
})
})
/* work in progress
// TODO: GET food by id
describe('test get`/api/food/:id` ', () => {
it('shouldDoThis', async () => {
// Expected Input
// call function ->
// expected results ->
})
})
// TODO: GET all categories
describe('test get`/api/food/categories/all` ', () => {
it('shouldDoThis', async () => {
// Expected Input
// call function ->
// expected results ->
})
})
// TODO: GET category by id
describe('test get`/api/food/categories/:id` ', () => {
it('shouldDoThis', async () => {
// Expected Input
// call function ->
// expected results ->
})
})
//#endregion
//#region - CREATE - POST endpoints
// TODO: add food
describe('test post`/api/food` ', () => {
it('shouldDoThis', async () => {
// Expected Input
// call function ->
// expected results ->
})
})
// TODO: add category
describe('test post`/api/food/categories` ', () => {
it('shouldDoThis', async () => {
// Expected Input
// call function ->
// expected results ->
})
})
//#endregion
//#region - Update - PUT endpoints
// TODO: update Food
describe('test put `/api/food/:id` ', () => {
it('shouldDoThis', async () => {
// Expected Input
// call function ->
// expected results ->
})
})
// TODO: update Cateogory
describe('test put `/api/food/categories/:id` ', () => {
it('shouldDoThis', async () => {
// Expected Input
// call function ->
// expected results ->
})
})
//#endregion
//#region - Delete - delete endpoints
// TODO: delete cateogory
describe('test delete `/api/food/:id` ', () => {
it('shouldDoThis', async () => {
// Expected Input
// call function ->
// expected results ->
})
})
// TODO: delete cateogory
describe('test get`/api/food/categories/:id` ', () => {
it('shouldDoThis', async () => {
// Expected Input
// call function ->
// expected results ->
})
})
//#endregion
*/
}) |
#!/usr/bin/env bash
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../../..)
source $rootdir/test/common/autotest_common.sh
source $rootdir/test/nvmf/common.sh
MALLOC_BDEV_SIZE=64
MALLOC_BLOCK_SIZE=512
LVOL_BDEV_INIT_SIZE=20
LVOL_BDEV_FINAL_SIZE=30
rpc_py="$rootdir/scripts/rpc.py"
timing_enter lvol_integrity
nvmftestinit
nvmfappstart "-m 0x7"
# SoftRoce does not have enough queues available for
# multiconnection tests. Detect if we're using software RDMA.
# If so - lower the number of subsystems for test.
if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
echo "Using software RDMA, lowering number of NVMeOF subsystems."
SUBSYS_NR=1
fi
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
# Construct a RAID volume for the logical volume store
base_bdevs="$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE) "
base_bdevs+=$($rpc_py bdev_malloc_create $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)
$rpc_py bdev_raid_create -n raid0 -z 64 -r 0 -b "$base_bdevs"
# Create the logical volume store on the RAID volume
lvs=$($rpc_py bdev_lvol_create_lvstore raid0 lvs)
# Create a logical volume on the logical volume store
lvol=$($rpc_py bdev_lvol_create -u $lvs lvol $LVOL_BDEV_INIT_SIZE)
# Create an NVMe-oF subsystem and add the logical volume as a namespace
$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode0 -a -s SPDK0
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode0 $lvol
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
# Start random writes in the background
$rootdir/examples/nvme/perf/perf -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" -o 4096 -q 128 -s 512 -w randwrite -t 10 -c 0x18 &
perf_pid=$!
sleep 1
# Perform some operations on the logical volume
snapshot=$($rpc_py bdev_lvol_snapshot $lvol "MY_SNAPSHOT")
$rpc_py bdev_lvol_resize $lvol $LVOL_BDEV_FINAL_SIZE
clone=$($rpc_py bdev_lvol_clone $snapshot "MY_CLONE")
$rpc_py bdev_lvol_inflate $clone
# Wait for I/O to complete
wait $perf_pid
# Clean up
$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode0
$rpc_py bdev_lvol_delete $lvol
$rpc_py bdev_lvol_delete_lvstore -u $lvs
rm -f ./local-job*
trap - SIGINT SIGTERM EXIT
nvmftestfini
timing_exit lvol_integrity
|
<reponame>vandvassily/babel-plugin-lessimport<filename>webpack.config.js
const path = require("path");
const webpack = require("webpack");
const package = require("./package.json");
// 清理构建目录
const { CleanWebpackPlugin } = require("clean-webpack-plugin");
module.exports = {
mode: "development",
// JavaScript 执行入口文件
entry: "./demo/main.js",
output: {
// 把所有依赖的模块合并输出到一个 bundle.js 文件
filename: "[name].[contenthash:8].js",
// 输出文件都放到 dist 目录下
path: path.resolve(__dirname, "./dist"),
publicPath: "",
},
module: {
rules: [
{
test: /\.m?js$/,
use: {
loader: "babel-loader", // babel解析在.babelrc中配置
},
},
],
},
resolve: {},
plugins: [
// 清空插件
new CleanWebpackPlugin({
cleanStaleWebpackAssets: false,
}),
],
target: ["web", "es6"],
};
|
#!/usr/bin/env bash
set -ex -o pipefail -o errtrace -o functrace
function catch() {
echo "error $1 on line $2"
exit 255
}
trap 'catch $? $LINENO' ERR TERM INT
# build-manifests is designed to populate the deploy directory
# with all of the manifests necessary for use in development
# and for consumption with the operator-lifecycle-manager.
#
# First, we create a temporary directory and filling it with
# all of the component operator's ClusterServiceVersion (CSV for OLM)
# and CustomResourceDefinitions (CRDs); being sure to copy the CRDs
# into the deploy/crds directory.
#
# The CSV manifests contain all of the information we need to 1) generate
# a combined CSV and 2) other development related manifests (like the
# operator deployment + rbac).
#
# Second, we pass all of the component CSVs off to the manifest-templator
# that handles the deployment specs, service account names, permissions, and
# clusterPermissions by converting them into their corresponding Kubernetes
# manifests (ie. permissions + serviceAccountName = role + service account
# + role binding) before writing them to disk.
#
# Lastly, we take give the component CSVs to the csv-merger that combines all
# of the manifests into a single, unified, ClusterServiceVersion.
function get_image_digest() {
if [[ ! -f ${PROJECT_ROOT}/tools/digester/digester ]]; then
(
cd "${PROJECT_ROOT}/tools/digester"
go build .
)
fi
local image
image=$("${PROJECT_ROOT}/tools/digester/digester" -image "$1" "$2")
echo "${image}"
}
PROJECT_ROOT="$(readlink -e $(dirname "${BASH_SOURCE[0]}")/../)"
source "${PROJECT_ROOT}"/hack/config
source "${PROJECT_ROOT}"/deploy/images.env
DEPLOY_DIR="${PROJECT_ROOT}/deploy"
CRD_DIR="${DEPLOY_DIR}/crds"
OLM_DIR="${DEPLOY_DIR}/olm-catalog"
CSV_VERSION=${CSV_VERSION}
CSV_TIMESTAMP=$(date +%Y%m%d%H%M -u)
PACKAGE_NAME="community-kubevirt-hyperconverged"
CSV_DIR="${OLM_DIR}/${PACKAGE_NAME}/${CSV_VERSION}"
DEFAULT_CSV_GENERATOR="/usr/bin/csv-generator"
SSP_CSV_GENERATOR="/csv-generator"
INDEX_IMAGE_DIR=${DEPLOY_DIR}/index-image
CSV_INDEX_IMAGE_DIR="${INDEX_IMAGE_DIR}/${PACKAGE_NAME}/${CSV_VERSION}"
OPERATOR_NAME="${OPERATOR_NAME:-kubevirt-hyperconverged-operator}"
OPERATOR_NAMESPACE="${OPERATOR_NAMESPACE:-kubevirt-hyperconverged}"
IMAGE_PULL_POLICY="${IMAGE_PULL_POLICY:-IfNotPresent}"
# Important extensions
CSV_EXT="clusterserviceversion.yaml"
CSV_CRD_EXT="csv_crds.yaml"
CRD_EXT="crd.yaml"
function gen_csv() {
# Handle arguments
local csvGeneratorPath="$1" && shift
local operatorName="$1" && shift
local imagePullUrl="$1" && shift
local dumpCRDsArg="$1" && shift
local operatorArgs="$@"
# Handle important vars
local csv="${operatorName}.${CSV_EXT}"
local csvWithCRDs="${operatorName}.${CSV_CRD_EXT}"
local crds="${operatorName}.crds.yaml"
# TODO: Use oc to run if cluster is available
local dockerArgs="docker run --rm --entrypoint=${csvGeneratorPath} ${imagePullUrl} ${operatorArgs}"
eval $dockerArgs > $csv
eval $dockerArgs $dumpCRDsArg > $csvWithCRDs
# diff returns 1 when there is a diff, and there is always diff here. Added `|| :` to cancel trap here.
diff -u $csv $csvWithCRDs | grep -E "^\+" | sed -E 's/^\+//' | tail -n+2 > $crds || :
csplit --digits=2 --quiet --elide-empty-files \
--prefix="${operatorName}" \
--suffix-format="%02d.${CRD_EXT}" \
$crds \
"/^---$/" "{*}"
}
function create_virt_csv() {
local apiSha
local controllerSha
local launcherSha
local handlerSha
apiSha="${KUBEVIRT_API_IMAGE/*@/}"
controllerSha="${KUBEVIRT_CONTROLLER_IMAGE/*@/}"
launcherSha="${KUBEVIRT_LAUNCHER_IMAGE/*@/}"
handlerSha="${KUBEVIRT_HANDLER_IMAGE/*@/}"
local operatorName="kubevirt"
local dumpCRDsArg="--dumpCRDs"
local operatorArgs
operatorArgs=" \
--namespace=${OPERATOR_NAMESPACE} \
--csvVersion=${CSV_VERSION} \
--operatorImageVersion=${KUBEVIRT_OPERATOR_IMAGE/*@/} \
--dockerPrefix=${KUBEVIRT_OPERATOR_IMAGE%\/*} \
--kubeVirtVersion=${KUBEVIRT_VERSION} \
--apiSha=${apiSha} \
--controllerSha=${controllerSha} \
--handlerSha=${handlerSha} \
--launcherSha=${launcherSha} \
"
gen_csv "${DEFAULT_CSV_GENERATOR}" "${operatorName}" "${KUBEVIRT_OPERATOR_IMAGE}" "${dumpCRDsArg}" "${operatorArgs}"
echo "${operatorName}"
}
function create_cna_csv() {
local operatorName="cluster-network-addons"
local dumpCRDsArg="--dump-crds"
local containerPrefix="${CNA_OPERATOR_IMAGE%/*}"
local imageName="${CNA_OPERATOR_IMAGE#${containerPrefix}/}"
local tag="${CNA_OPERATOR_IMAGE/*:/}"
local operatorArgs=" \
--namespace=${OPERATOR_NAMESPACE} \
--version=${CSV_VERSION} \
--version-replaces=${REPLACES_VERSION} \
--image-pull-policy=IfNotPresent \
--operator-version=${NETWORK_ADDONS_VERSION} \
--container-tag=${CNA_OPERATOR_IMAGE/*:/} \
--container-prefix=${containerPrefix} \
--image-name=${imageName/:*/}
"
gen_csv ${DEFAULT_CSV_GENERATOR} ${operatorName} "${CNA_OPERATOR_IMAGE}" ${dumpCRDsArg} ${operatorArgs}
echo "${operatorName}"
}
function create_ssp_csv() {
local operatorName="scheduling-scale-performance"
local dumpCRDsArg="--dump-crds"
local operatorArgs=" \
--namespace=${OPERATOR_NAMESPACE} \
--csv-version=${CSV_VERSION} \
--operator-image=${SSP_OPERATOR_IMAGE} \
--operator-version=${SSP_VERSION} \
"
gen_csv ${SSP_CSV_GENERATOR} ${operatorName} "${SSP_OPERATOR_IMAGE}" ${dumpCRDsArg} ${operatorArgs}
echo "${operatorName}"
}
function create_cdi_csv() {
local operatorName="containerized-data-importer"
local dumpCRDsArg="--dump-crds"
local operatorArgs=" \
--namespace=${OPERATOR_NAMESPACE} \
--csv-version=${CSV_VERSION} \
--pull-policy=IfNotPresent \
--operator-image=${CDI_OPERATOR_IMAGE} \
--controller-image=${CDI_CONTROLLER_IMAGE} \
--apiserver-image=${CDI_APISERVER_IMAGE} \
--cloner-image=${CDI_CLONER_IMAGE} \
--importer-image=${CDI_IMPORTER_IMAGE} \
--uploadproxy-image=${CDI_UPLOADPROXY_IMAGE} \
--uploadserver-image=${CDI_UPLOADSERVER_IMAGE} \
--operator-version=${CDI_VERSION} \
"
gen_csv ${DEFAULT_CSV_GENERATOR} ${operatorName} "${CDI_OPERATOR_IMAGE}" ${dumpCRDsArg} ${operatorArgs}
echo "${operatorName}"
}
function create_nmo_csv() {
local operatorName="node-maintenance"
local dumpCRDsArg="--dump-crds"
local operatorArgs=" \
--namespace=${OPERATOR_NAMESPACE} \
--csv-version=${CSV_VERSION} \
--operator-image=${NMO_IMAGE} \
"
local csvGeneratorPath="/usr/local/bin/csv-generator"
gen_csv ${csvGeneratorPath} ${operatorName} "${NMO_IMAGE}" ${dumpCRDsArg} ${operatorArgs}
echo "${operatorName}"
}
function create_hpp_csv() {
local operatorName="hostpath-provisioner"
local dumpCRDsArg="--dump-crds"
local operatorArgs=" \
--csv-version=${CSV_VERSION} \
--operator-image-name=${HPPO_IMAGE} \
--provisioner-image-name=${HPP_IMAGE} \
--namespace=${OPERATOR_NAMESPACE} \
--pull-policy=IfNotPresent \
"
gen_csv ${DEFAULT_CSV_GENERATOR} ${operatorName} "${HPPO_IMAGE}" ${dumpCRDsArg} ${operatorArgs}
echo "${operatorName}"
}
function create_vm_import_csv() {
local operatorName="vm-import-operator"
local containerPrefix="${VMIMPORT_OPERATOR_IMAGE%/*}"
local dumpCRDsArg="--dump-crds"
local operatorArgs=" \
--csv-version=${CSV_VERSION} \
--operator-version=${VM_IMPORT_VERSION} \
--operator-image=${VMIMPORT_OPERATOR_IMAGE} \
--controller-image=${VMIMPORT_CONTROLLER_IMAGE} \
--namespace=${OPERATOR_NAMESPACE} \
--virtv2v-image=${VMIMPORT_VIRTV2V_IMAGE} \
--pull-policy=IfNotPresent \
"
gen_csv ${DEFAULT_CSV_GENERATOR} ${operatorName} "${VMIMPORT_OPERATOR_IMAGE}" ${dumpCRDsArg} ${operatorArgs}
echo "${operatorName}"
}
TEMPDIR=$(mktemp -d) || (echo "Failed to create temp directory" && exit 1)
pushd $TEMPDIR
virtFile=$(create_virt_csv)
virtCsv="${TEMPDIR}/${virtFile}.${CSV_EXT}"
cnaFile=$(create_cna_csv)
cnaCsv="${TEMPDIR}/${cnaFile}.${CSV_EXT}"
sspFile=$(create_ssp_csv)
sspCsv="${TEMPDIR}/${sspFile}.${CSV_EXT}"
cdiFile=$(create_cdi_csv)
cdiCsv="${TEMPDIR}/${cdiFile}.${CSV_EXT}"
nmoFile=$(create_nmo_csv)
nmoCsv="${TEMPDIR}/${nmoFile}.${CSV_EXT}"
hhpFile=$(create_hpp_csv)
hppCsv="${TEMPDIR}/${hhpFile}.${CSV_EXT}"
vmImportFile=$(create_vm_import_csv)
importCsv="${TEMPDIR}/${vmImportFile}.${CSV_EXT}"
csvOverrides="${TEMPDIR}/csv_overrides.${CSV_EXT}"
keywords=" keywords:
- KubeVirt
- Virtualization
- VM"
cat > ${csvOverrides} <<- EOM
---
spec:
$keywords
EOM
# Write HCO CRDs
(cd ${PROJECT_ROOT}/tools/csv-merger/ && go build)
hco_crds=${TEMPDIR}/hco.crds.yaml
(cd ${PROJECT_ROOT} && ${PROJECT_ROOT}/tools/csv-merger/csv-merger --api-sources=${PROJECT_ROOT}/pkg/apis/... --output-mode=CRDs > $hco_crds)
csplit --digits=2 --quiet --elide-empty-files \
--prefix=hco \
--suffix-format="%02d.${CRD_EXT}" \
$hco_crds \
"/^---$/" "{*}"
popd
rm -fr "${CSV_DIR}"
mkdir -p "${CSV_DIR}/metadata" "${CSV_DIR}/manifests"
cat << EOF > "${CSV_DIR}/metadata/annotations.yaml"
annotations:
operators.operatorframework.io.bundle.channel.default.v1: ${CSV_VERSION}
operators.operatorframework.io.bundle.channels.v1: ${CSV_VERSION}
operators.operatorframework.io.bundle.manifests.v1: manifests/
operators.operatorframework.io.bundle.mediatype.v1: registry+v1
operators.operatorframework.io.bundle.metadata.v1: metadata/
operators.operatorframework.io.bundle.package.v1: ${PACKAGE_NAME}
EOF
SMBIOS=$(cat <<- EOM
Family: KubeVirt
Manufacturer: KubeVirt
Product: None
EOM
)
# validate CSVs. Make sure each one of them contain an image (and so, also not empty):
csvs=("${cnaCsv}" "${virtCsv}" "${sspCsv}" "${cdiCsv}" "${nmoCsv}" "${hppCsv}" "${importCsv}")
for csv in "${csvs[@]}"; do
grep -E "^ *image: [a-zA-Z0-9/\.:@\-]+$" ${csv}
done
if [[ -n ${OPERATOR_IMAGE} ]]; then
TEMP_IMAGE_NAME=$(get_image_digest "${OPERATOR_IMAGE}")
DIGEST_LIST="${DIGEST_LIST/${HCO_OPERATOR_IMAGE}/${TEMP_IMAGE_NAME}}"
HCO_OPERATOR_IMAGE=${TEMP_IMAGE_NAME}
fi
if [[ -n ${WEBHOOK_IMAGE} ]]; then
TEMP_IMAGE_NAME=$(get_image_digest "${WEBHOOK_IMAGE}")
if [[ -n ${HCO_WEBHOOK_IMAGE} ]]; then
DIGEST_LIST="${DIGEST_LIST/${HCO_WEBHOOK_IMAGE}/${TEMP_IMAGE_NAME}}"
else
DIGEST_LIST="${DIGEST_LIST},${TEMP_IMAGE_NAME}"
fi
HCO_WEBHOOK_IMAGE=${TEMP_IMAGE_NAME}
fi
if [[ -z ${HCO_WEBHOOK_IMAGE} ]]; then
HCO_WEBHOOK_IMAGE="${HCO_OPERATOR_IMAGE}"
fi
# Build and write deploy dir
(cd ${PROJECT_ROOT}/tools/manifest-templator/ && go build)
${PROJECT_ROOT}/tools/manifest-templator/manifest-templator \
--api-sources=${PROJECT_ROOT}/pkg/apis/... \
--cna-csv="$(<${cnaCsv})" \
--virt-csv="$(<${virtCsv})" \
--ssp-csv="$(<${sspCsv})" \
--cdi-csv="$(<${cdiCsv})" \
--nmo-csv="$(<${nmoCsv})" \
--hpp-csv="$(<${hppCsv})" \
--vmimport-csv="$(<${importCsv})" \
--ims-conversion-image-name="${CONVERSION_IMAGE}" \
--ims-vmware-image-name="${VMWARE_IMAGE}" \
--operator-namespace="${OPERATOR_NAMESPACE}" \
--smbios="${SMBIOS}" \
--hco-kv-io-version="${CSV_VERSION}" \
--kubevirt-version="${KUBEVIRT_VERSION}" \
--cdi-version="${CDI_VERSION}" \
--cnao-version="${NETWORK_ADDONS_VERSION}" \
--ssp-version="${SSP_VERSION}" \
--nmo-version="${NMO_VERSION}" \
--hppo-version="${HPPO_VERSION}" \
--vm-import-version="${VM_IMPORT_VERSION}" \
--operator-image="${HCO_OPERATOR_IMAGE}" \
--webhook-image="${HCO_WEBHOOK_IMAGE}"
(cd ${PROJECT_ROOT}/tools/manifest-templator/ && go clean)
if [[ "$1" == "UNIQUE" ]]; then
CSV_VERSION_PARAM=${CSV_VERSION}-${CSV_TIMESTAMP}
ENABLE_UNIQUE="true"
else
CSV_VERSION_PARAM=${CSV_VERSION}
ENABLE_UNIQUE="false"
fi
# Build and merge CSVs
CSV_DIR=${CSV_DIR}/manifests
${PROJECT_ROOT}/tools/csv-merger/csv-merger \
--cna-csv="$(<${cnaCsv})" \
--virt-csv="$(<${virtCsv})" \
--ssp-csv="$(<${sspCsv})" \
--cdi-csv="$(<${cdiCsv})" \
--nmo-csv="$(<${nmoCsv})" \
--hpp-csv="$(<${hppCsv})" \
--vmimport-csv="$(<${importCsv})" \
--ims-conversion-image-name="${CONVERSION_IMAGE}" \
--ims-vmware-image-name="${VMWARE_IMAGE}" \
--csv-version=${CSV_VERSION_PARAM} \
--replaces-csv-version=${REPLACES_CSV_VERSION} \
--hco-kv-io-version="${CSV_VERSION}" \
--spec-displayname="KubeVirt HyperConverged Cluster Operator" \
--spec-description="$(<${PROJECT_ROOT}/docs/operator_description.md)" \
--metadata-description="A unified operator deploying and controlling KubeVirt and its supporting operators with opinionated defaults" \
--crd-display="HyperConverged Cluster Operator" \
--smbios="${SMBIOS}" \
--csv-overrides="$(<${csvOverrides})" \
--enable-unique-version=${ENABLE_UNIQUE} \
--kubevirt-version="${KUBEVIRT_VERSION}" \
--cdi-version="${CDI_VERSION}" \
--cnao-version="${NETWORK_ADDONS_VERSION}" \
--ssp-version="${SSP_VERSION}" \
--nmo-version="${NMO_VERSION}" \
--hppo-version="${HPPO_VERSION}" \
--vm-import-version="${VM_IMPORT_VERSION}" \
--related-images-list="${DIGEST_LIST}" \
--operator-image-name="${HCO_OPERATOR_IMAGE}" \
--webhook-image-name="${HCO_WEBHOOK_IMAGE}" > "${CSV_DIR}/${OPERATOR_NAME}.v${CSV_VERSION}.${CSV_EXT}"
rendered_csv="$(cat "${CSV_DIR}/${OPERATOR_NAME}.v${CSV_VERSION}.${CSV_EXT}")"
rendered_keywords="$(echo "$rendered_csv" |grep 'keywords' -A 3)"
# assert that --csv-overrides work
[ "$keywords" == "$rendered_keywords" ]
# Copy all CRDs into the CRD and CSV directories
rm -f ${CRD_DIR}/*
cp -f ${TEMPDIR}/*.${CRD_EXT} ${CRD_DIR}
cp -f ${TEMPDIR}/*.${CRD_EXT} ${CSV_DIR}
# Validate the yaml files
(cd ${CRD_DIR} && docker run --rm -v "$(pwd)":/yaml quay.io/pusher/yamllint yamllint -d "{extends: relaxed, rules: {line-length: disable}}" /yaml)
(cd ${CSV_DIR} && docker run --rm -v "$(pwd)":/yaml quay.io/pusher/yamllint yamllint -d "{extends: relaxed, rules: {line-length: disable}}" /yaml)
# Check there are not API Groups overlap between different CNV operators
${PROJECT_ROOT}/tools/csv-merger/csv-merger --crds-dir=${CRD_DIR}
(cd ${PROJECT_ROOT}/tools/csv-merger/ && go clean)
if [[ "$1" == "UNIQUE" ]]; then
# Add the current CSV_TIMESTAMP to the currentCSV in the packages file
sed -Ei "s/(currentCSV: ${OPERATOR_NAME}.v${CSV_VERSION}).*/\1-${CSV_TIMESTAMP}/" \
${PACKAGE_DIR}/kubevirt-hyperconverged.package.yaml
fi
# Intentionally removing last so failure leaves around the templates
rm -rf ${TEMPDIR}
rm -rf "${INDEX_IMAGE_DIR:?}"
mkdir -p "${INDEX_IMAGE_DIR:?}/${PACKAGE_NAME}"
cp -r "${CSV_DIR%/*}" "${INDEX_IMAGE_DIR:?}/${PACKAGE_NAME}/"
cp "${OLM_DIR}/bundle.Dockerfile" "${INDEX_IMAGE_DIR:?}/"
INDEX_IMAGE_CSV="${INDEX_IMAGE_DIR}/${PACKAGE_NAME}/${CSV_VERSION}/manifests/kubevirt-hyperconverged-operator.v${CSV_VERSION}.${CSV_EXT}"
sed -r -i "s|createdAt: \".*\$|createdAt: \"2020-10-23 08:58:25\"|; s|quay.io/kubevirt/hyperconverged-cluster-operator.*$|+IMAGE_TO_REPLACE+|; s|quay.io/kubevirt/hyperconverged-cluster-webhook.*$|+WEBHOOK_IMAGE_TO_REPLACE+|" ${INDEX_IMAGE_CSV}
|
#!/bin/sh
# Created By Nate Ober
# Nate [dot] Ober [at] Gmail
LBLUE='\033[0;36m';
DBLUE='\033[0;34m';
BLACK='\033[0;30m';
GREEN='\033[0;32m';
RED='\033[0;31m';
YELLOW='\033[0;33m';
WHITE='\033[0;37m';
LINES=20;
HIGHLIGHT=RED;
SYMBOL="__";
MOUNT="/";
DEVICENAME="hd"
usage()
{
cat << EOF
usage: $0 options
This script will output a series of divisor symbols with the line that represents the percent full highlighted in one of several colors.
I wrote this for use with Geektool. It is best operated in Geektool by using the "dot" convention ". /path/to/script.sh".
OPTIONS:
-h Highlight color. The color of the pipe line that indicates percent full.
Options are LBLUE, DBLUE, BLACK, GREEN, RED, YELLOW, WHITE. Default is RED.
-l Total number of divisor symbols. Default is 40.
-s The string to use as a divisor symbol. The default is a "__". (As you might expect, you must surround the string in quotes)
-m Mount location. The default is "/", the home directory. Remember to quote any directory that contains spaces. (Example -m "/Volumes/Time Machine Backups")
-n Name of device. This is an alias used to visually identify this device. The default is "hd".
If you use multile instantiations of this script you may want to identify each device by a short
identifier. It looks best, at the moment, if you use two characters such as "HD" or "TM" (for Time Machine).
EOF
}
while getopts “:h:l:s:m:n:” OPTION
do
case $OPTION in
h)
HIGHLIGHT=$(echo "$OPTARG" | tr '[a-z]' '[A-Z]')
;;
l)
LINES="$OPTARG"
;;
s)
SYMBOL=$(printf '%s' $OPTARG)
;;
m)
MOUNT="$OPTARG"
;;
n)
DEVICENAME="$OPTARG"
;;
?)
usage
exit
;;
*)
usage
exit
;;
esac
done
case $HIGHLIGHT in
LBLUE)
HIGHLIGHT=$LBLUE
;;
DBLUE)
HIGHLIGHT=$DBLUE
;;
BLACK)
HIGHLIGHT=$BLACK
;;
GREEN)
HIGHLIGHT=$GREEN
;;
RED)
HIGHLIGHT=$RED
;;
YELLOW)
HIGHLIGHT=$YELLOW
;;
WHITE)
HIGHLIGHT=$WHITE
;;
*)
usage
exit
;;
esac
DIVTOT=`df -h "$MOUNT" | awk -v lines="$LINES" 'NR==2{printf "%.0f\n", lines-(($5/100)*lines)}'`
for ((i=1;i<$LINES;i++));do
if [ $i -ge $DIVTOT ]; then
echo " $(echo $HIGHLIGHT)$SYMBOL$(echo '\033[0m')";
else
echo " $SYMBOL";
fi
done
echo " $DEVICENAME"
echo "$(df -h "$MOUNT" | awk 'NR==2{printf "%s",$5}')" |
import React from "react"
function PostLink(props) {
return (
<a href={props.node.fields.slug} className="postLink">
<h3>{props.node.frontmatter.title} </h3>
<div className="postLinkDate">{props.node.frontmatter.date}</div>
</a>
)
}
export default PostLink
|
<h2>Contact Form</h2>
<form action="/" method="POST">
<div>
<label>Name</label>
<input type="text" name="name" />
</div>
<div>
<label>Email</label>
<input type="text" name="email" />
</div>
<div>
<label>Message</label>
<textarea name="message" cols="30" rows="10"></textarea>
</div>
<button type="submit">Submit</button>
</form> |
import { Formats } from 'consts';
import { t } from 'i18n';
// Render the content(Style) of cell
const noop = (v?: string | number) => String(v);
const formats = [
{
label: t('format.auto'),
key: Formats.auto,
formatter: noop,
},
{
label: t('format.duration'),
key: Formats.duration,
formatter: noop,
},
{
label: t('format.plainText'),
key: Formats.plainText,
formatter: noop,
},
{
label: t('format.date'),
key: Formats.date,
formatter(text: string) {
const d = new Date(text);
if (d.getTime()) {
return `${d.getFullYear()}-${d.getMonth() + 1}-${d.getDate()}`;
}
return text;
},
},
{
label: t('format.time'),
key: Formats.time,
formatter(text: string) {
const d = new Date(text);
if (d.getTime()) {
return `${d.getHours()}:${d.getMinutes()}:${d.getSeconds()}`;
}
return text;
},
},
{
label: t('format.dateTime'),
key: Formats.dateTime,
formatter(text: string) {
const d = new Date(text);
if (d.getTime()) {
return `${d.getFullYear()}-${d.getMonth() +
1}-${d.getDate()} ${d.getHours()}:${d.getMinutes()}:${d.getSeconds()}`;
}
return String(text);
},
},
{
label: t('format.percentage'),
key: Formats.percentage,
formatter(text: string, toFixed: number) {
const _text = parseFloat(text);
if (isNaN(_text)) {
return text;
}
return `${(_text * 100).toFixed(toFixed)}%`;
},
},
{
label: t('format.percentage'),
key: Formats.scientificNotation,
formatter(text: string) {
const _text = parseFloat(text);
if (isNaN(_text)) {
return text;
}
return _text.toExponential();
},
},
{
label: t('format.number'),
key: Formats.number,
formatter(text: string) {
const _text = parseFloat(text);
if (isNaN(_text)) {
return text;
}
return text;
},
},
];
export function findFormatter(key: Formats) {
const formatter =
formats.find(format => format.key === key)?.formatter ?? noop;
return formatter;
}
export default formats;
|
<reponame>Tillsten/bp_tracker
from database import init_db
init_db()
|
<?php
// Function to find the shortest path between two nodes
function findShortestPath($graph, $source, $destination)
{
// Seen is used to track which node we have already seen
$seen = array();
// To store the path
$queue = array();
// Insert source vertex
array_unshift($queue, array('vertex' => $source, 'path' => array($source)));
while(!empty($queue))
{
// Extract the first node from the queue
$node = array_shift($queue);
// Mark the extracted node as seen
$vertex = $node['vertex'];
if (!isset($seen[$vertex]))
{
$seen[$vertex] = true;
// Check if this is the destination node
if ($vertex == $destination)
{
// We have reached the destination node
return $node['path'];
}
// Find neighbors of the extracted node
// which are not yet visited
$neighbors = $graph[$vertex];
foreach ($neighbors as $neighbor)
{
if (!isset($seen[$neighbor]))
{
// Create a new path by appending
// current node to existing path
$path = $node['path'];
array_push($path, $neighbor);
// Insert new node in the queue
array_unshift($queue,
array('vertex' => $neighbor,'path' => $path));
}
}
}
}
// Return -1 if there is no path from the source node
// to the destination node
return -1;
}
$graph = array(
'A' => array('B', 'C', 'D'),
'B' => array('C', 'D'),
'C' => array('D')
);
$path = findShortestPath($graph, 'A', 'D');
echo implode('->', $path);
?> |
import { useRouter } from "next/router";
import { useEffect, useRef } from "react";
/**
* This is a custom hook that we have to use for any all all hash URL's.
* This is a very hacky solution but it works so we'll use it
*
* @see {@link useRouter}
*/
function useHashChange() {
const { asPath } = useRouter();
const time = useRef<any>();
useEffect(() => {
if (/#.+/g.test(asPath)) {
const id = asPath.replace(/\/.*#(.+)/g, "$1");
const el = window.document.getElementById(id);
if (el) {
time.current = setTimeout(() => {
const r = el.getBoundingClientRect();
window.top.scroll({
top: pageYOffset + r.top,
behavior: "smooth",
});
}, 100);
}
}
return () => clearTimeout(time.current);
}, [asPath]);
}
export default useHashChange;
|
package types
import (
"encoding/json"
sdk "github.com/cosmos/cosmos-sdk/types"
)
// RouterKey -
const RouterKey = ModuleName
/*
Shit
*/
// MsgShit -
type MsgShit struct {
ID string
ShitType uint8
Amount uint16
Informant sdk.AccAddress
Comment string
}
// NewMsgShit -
func NewMsgShit(id string, shitType uint8, amount uint16, informant sdk.AccAddress, comment string) MsgShit {
return MsgShit{
ID: id,
ShitType: shitType,
Amount: amount,
Informant: informant,
Comment: comment,
}
}
// Route -
func (msg MsgShit) Route() string {
return RouterKey
}
// Type -
func (msg MsgShit) Type() string {
return "shit"
}
// ValidateBasic -
func (msg MsgShit) ValidateBasic() sdk.Error {
if msg.Informant.Empty() {
return sdk.ErrInvalidAddress(msg.Informant.String())
}
if len(msg.ID) == 0 {
return sdk.ErrUnknownRequest("Must specify ID, ShitType.")
}
if msg.Amount < 100 {
return sdk.ErrUnknownRequest("Amount must be greater than 100.")
}
if msg.ShitType < 1 {
return sdk.ErrUnknownRequest("ShitType must be greater than 0.")
}
return nil
}
// GetSignBytes -
func (msg MsgShit) GetSignBytes() []byte {
b, err := json.Marshal(msg)
if err != nil {
panic(err)
}
return sdk.MustSortJSON(b)
}
// GetSigners -
func (msg MsgShit) GetSigners() []sdk.AccAddress {
return []sdk.AccAddress{msg.Informant}
}
/*
MsgSorry
*/
// MsgSorry -
type MsgSorry struct {
ID string
ShitType uint8
Amount uint16
Informant sdk.AccAddress
Opponent sdk.AccAddress
Comment string
}
// NewMsgSorry -
func NewMsgSorry(id string, owner sdk.AccAddress, nonce string) MsgSorry {
return MsgSorry{
ID: id,
ShitType: owner,
Amount: nonce,
Informant: informant,
Opponent: opponent,
Comment: comment,
}
}
// Route -
func (msg MsgDeployNonce) Route() string {
return "rand"
}
// Type -
func (msg MsgDeployNonce) Type() string {
return "deploy_round"
}
// ValidateBasic -
func (msg MsgDeployNonce) ValidateBasic() sdk.Error {
if msg.Owner.Empty() {
return sdk.ErrInvalidAddress(msg.Owner.String())
}
if len(msg.ID) == 0 {
return sdk.ErrUnknownRequest("Must specify ID, NonceHash.")
}
return nil
}
// GetSignBytes -
func (msg MsgDeployNonce) GetSignBytes() []byte {
b, err := json.Marshal(msg)
if err != nil {
panic(err)
}
return sdk.MustSortJSON(b)
}
// GetSigners -
func (msg MsgDeployNonce) GetSigners() []sdk.AccAddress {
return []sdk.AccAddress{msg.Owner}
}
/*
AddTargets
*/
// MsgAddTargets -
type MsgAddTargets struct {
ID string
Owner sdk.AccAddress
Targets []string
}
// NewMsgAddTargets -
func NewMsgAddTargets(id string, owner sdk.AccAddress, targets []string) MsgAddTargets {
return MsgAddTargets{
ID: id,
Owner: owner,
Targets: targets,
}
}
// Route -
func (msg MsgAddTargets) Route() string {
return "rand"
}
// Type -
func (msg MsgAddTargets) Type() string {
return "add_targets"
}
// ValidateBasic - 모집단 추가 ValidateBasic
func (msg MsgAddTargets) ValidateBasic() sdk.Error {
if msg.Owner.Empty() {
return sdk.ErrInvalidAddress(msg.Owner.String())
}
if len(msg.ID) == 0 {
return sdk.ErrUnknownRequest("Must specify ID.")
}
if msg.Targets == nil {
return sdk.ErrUnknownRequest("Must specify Targets.")
}
return nil
}
// GetSignBytes -
func (msg MsgAddTargets) GetSignBytes() []byte {
b, err := json.Marshal(msg)
if err != nil {
panic(err)
}
return sdk.MustSortJSON(b)
}
// GetSigners -
func (msg MsgAddTargets) GetSigners() []sdk.AccAddress {
return []sdk.AccAddress{msg.Owner}
}
/*
MsgUpdateTargets
*/
// MsgUpdateTargets -
type MsgUpdateTargets struct {
ID string
Owner sdk.AccAddress
Targets []string
}
// NewMsgUpdateTargets -
func NewMsgUpdateTargets(id string, owner sdk.AccAddress, targets []string) MsgUpdateTargets {
return MsgUpdateTargets{
ID: id,
Owner: owner,
Targets: targets,
}
}
// Route -
func (msg MsgUpdateTargets) Route() string {
return "rand"
}
// Type -
func (msg MsgUpdateTargets) Type() string {
return "update_targets"
}
// ValidateBasic -
func (msg MsgUpdateTargets) ValidateBasic() sdk.Error {
if msg.Owner.Empty() {
return sdk.ErrInvalidAddress(msg.Owner.String())
}
if len(msg.ID) == 0 {
return sdk.ErrUnknownRequest("Must specify ID.")
}
if msg.Targets == nil {
return sdk.ErrUnknownRequest("Must specify Targets.")
}
return nil
}
// GetSignBytes -
func (msg MsgUpdateTargets) GetSignBytes() []byte {
b, err := json.Marshal(msg)
if err != nil {
panic(err)
}
return sdk.MustSortJSON(b)
}
// GetSigners -
func (msg MsgUpdateTargets) GetSigners() []sdk.AccAddress {
return []sdk.AccAddress{msg.Owner}
}
|
<filename>src/main/java/vectorwing/farmersdelight/client/model/package-info.java
@ParametersAreNonnullByDefault
@MethodsReturnNonnullByDefault
package vectorwing.farmersdelight.client.model;
import net.minecraft.MethodsReturnNonnullByDefault;
import javax.annotation.ParametersAreNonnullByDefault;
|
package com.acmvit.acm_app.model;
public class Request {
private String icon;
private String name;
private String projectName;
public Request(String icon, String name, String projectName) {
this.icon = icon;
this.name = name;
this.projectName = projectName;
}
public String getIcon() {
return icon;
}
public void setIcon(String icon) {
this.icon = icon;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getProjectName() {
return projectName;
}
public void setProjectName(String projectName) {
this.projectName = projectName;
}
}
|
#!/bin/bash
docker images -q |xargs docker rmi
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
docker build -t phpoker_image $DIR
docker rm -f phpoker_container
docker run -d -v $DIR/../:/home/ranpafin/projects --name phpoker_container -ti phpoker_image bash
sleep 1
docker exec -ti -u ranpafin phpoker_container zsh
|
<filename>src/includes/shared/shared_session.h
#ifndef SHARED_SESSION_H
#define SHARED_SESSION_H
#include <kore/kore.h>
int
auth_not_user(
struct http_request *req);
int
auth_user(
struct http_request *req,
const char *cookie);
int
auth_admin(
struct http_request *req,
const char *cookie);
int
auth_remove(
struct http_request *req);
#endif |
#!/bin/bash
# ARTWORK
# GET=1 (attempt to fetch Postgres database and Grafana database from the test server)
# AGET=1 (attempt to fetch 'All CNCF' Postgres database from the test server)
# INIT=1 (needs PG_PASS_RO, PG_PASS_TEAM, initialize from no postgres database state, creates postgres logs database and users)
# SKIPWWW=1 (skips Apache and SSL cert configuration, final result will be Grafana exposed on the server on its port (for example 3010) via HTTP)
# SKIPVARS=1 (if set it will skip final Postgres vars regeneration)
# CUSTGRAFPATH=1 (set this to use non-standard grafana instalation from ~/grafana.v5/)
# SETPASS=1 (should be set on a real first run to set main postgres password interactively, CANNOT be used without user interaction)
set -o pipefail
exec > >(tee run.log)
exec 2> >(tee errors.txt)
if [ -z "$PG_PASS" ]
then
echo "$0: You need to set PG_PASS environment variable to run this script"
exit 1
fi
if ( [ ! -z "$INIT" ] && ( [ -z "$PG_PASS_RO" ] || [ -z "$PG_PASS_TEAM" ] ) )
then
echo "$0: You need to set PG_PASS_RO, PG_PASS_TEAM when using INIT"
exit 2
fi
if [ ! -z "$CUSTGRAFPATH" ]
then
GRAF_USRSHARE="$HOME/grafana.v5/usr.share.grafana"
GRAF_VARLIB="$HOME/grafana.v5/var.lib.grafana"
GRAF_ETC="$HOME/grafana.v5/etc.grafana"
fi
if [ -z "$GRAF_USRSHARE" ]
then
GRAF_USRSHARE="/usr/share/grafana"
fi
if [ -z "$GRAF_VARLIB" ]
then
GRAF_VARLIB="/var/lib/grafana"
fi
if [ -z "$GRAF_ETC" ]
then
GRAF_ETC="/etc/grafana"
fi
export GRAF_USRSHARE
export GRAF_VARLIB
export GRAF_ETC
if [ ! -z "$ONLY" ]
then
export ONLY
fi
host=`hostname`
function finish {
sync_unlock.sh
rm -f /tmp/deploy.wip 2>/dev/null
}
if [ -z "$TRAP" ]
then
sync_lock.sh || exit -1
trap finish EXIT
export TRAP=1
> /tmp/deploy.wip
fi
. ./devel/all_projs.sh || exit 3
if [ -z "$ONLYDB" ]
then
host=`hostname`
if [ $host = "teststats.cncf.io" ]
then
alldb=`cat ./devel/all_test_dbs.txt`
else
alldb=`cat ./devel/all_prod_dbs.txt`
fi
else
alldb=$ONLYDB
fi
LASTDB=""
for db in $alldb
do
exists=`./devel/db.sh psql postgres -tAc "select 1 from pg_database where datname = '$db'"` || exit 4
if [ ! "$exists" = "1" ]
then
LASTDB=$db
fi
done
export LASTDB
echo "Last missing DB is $LASTDB"
if [ ! -z "$INIT" ]
then
./devel/init_database.sh || exit 5
fi
PROJ=contrib PROJDB=contrib PROJREPO="-" ORGNAME="CNCF vs OpenStack" PORT=3253 ICON=cncf GRAFSUFF=contrib GA="-" ./devel/deploy_proj.sh || exit 6
if [ -z "$SKIPWWW" ]
then
CERT=1 WWW=1 ./devel/create_www.sh || exit 7
fi
if [ -z "$SKIPVARS" ]
then
./devel/vars_all.sh || exit 8
fi
echo "$0: All deployments finished"
|
<reponame>NaKolenke/kolenka-backend
import datetime
import pytest
from src.model.models import User, Feedback
from src.trello import Trello
@pytest.fixture
def feedback():
user = User.create(
username="feedback_user",
password="0x:<PASSWORD>",
email="asd",
registration_date=datetime.datetime.now(),
last_active_date=datetime.datetime.now(),
name="name",
birthday=datetime.date.today(),
about="",
avatar=None,
)
Feedback.create(text="some text", user=user)
from src.model import db
db.db_wrapper.database.close()
def test_leave_feedback(client, user_token, mocker):
mocker.patch.object(Trello, "create_card")
Trello.create_card.return_value = True
rv = client.post(
"/feedback/",
json={"text": "some text"},
headers={"authorization": user_token[1].token},
)
assert rv.status_code == 200
assert rv.json["success"] == 1
assert Feedback.select().count() == 1
Trello.create_card.assert_called_with("some text")
def test_leave_feedback_no_text(client, user_token):
rv = client.post(
"/feedback/", json={}, headers={"authorization": user_token[1].token}
)
assert rv.status_code == 400
assert rv.json["success"] == 0
assert rv.json["error"]["code"] == 5
assert Feedback.select().count() == 0
def test_leave_feedback_no_auth(client, user_token):
rv = client.post("/feedback/", json={"text": "some text"})
assert rv.status_code == 401
assert rv.json["success"] == 0
assert rv.json["error"]["code"] == 1
assert Feedback.select().count() == 0
def test_get_feedback_not_admin(client, user_token, feedback):
rv = client.get("/feedback/", headers={"authorization": user_token[1].token})
assert rv.status_code == 403
assert rv.json["success"] == 0
assert rv.json["error"]["code"] == 3
assert "feedback" not in rv.json
rv = client.get("/feedback/1/", headers={"authorization": user_token[1].token})
assert rv.status_code == 403
assert rv.json["success"] == 0
assert rv.json["error"]["code"] == 3
def test_get_feedback(client, admin_token, feedback):
rv = client.get("/feedback/", headers={"authorization": admin_token[1].token})
assert rv.status_code == 200
assert rv.json["success"] == 1
assert len(rv.json["feedback"]) == 1
assert rv.json["feedback"][0]["text"] == "some text"
assert not rv.json["feedback"][0]["is_resolved"]
rv = client.get("/feedback/1/", headers={"authorization": admin_token[1].token})
assert rv.status_code == 200
assert rv.json["success"] == 1
rv = client.get("/feedback/", headers={"authorization": admin_token[1].token})
assert rv.status_code == 200
assert rv.json["success"] == 1
assert len(rv.json["feedback"]) == 1
assert rv.json["feedback"][0]["text"] == "some text"
assert rv.json["feedback"][0]["is_resolved"]
|
<filename>src/components/post-list/index.tsx
import * as React from "react";
import {useState, useEffect} from 'react';
import {Pagination} from 'antd';
import { NavLink } from "react-router-dom";
import PostHeadline from '../post-headline';
import * as listImg from '../../assets/images/2.jpg';
function PostList() {
const [currentPage,updateCurrentPage] = useState(1);
const [itemPerpage,updatePerpage] = useState(10);
const[postLists] = useState(
{
"total": 11,
"items": [
{
"postId": 1,
"title": "从头了解光刻机",
"content": "光刻是集成电路最重要的加工工艺,他的作用,如同金工车间中车床的作用。在整个芯片制造工艺中,几乎每个工艺的实施,都离不开光刻的技术。光刻也是制造芯片的最关键技术,他占芯片制造成本的35%以上。在如今的科技与社会发展中,光刻技术的增长,直接关系到大型计算机的运作等高科技领域。<p>测试渲染HTML标签。</p>",
"postTime": "2018-05-17 10:44",
"userName": "大漠穷秋",
"userId": "1",
"readTimes": "10000",
"commentTimes": "10000",
"likedTimes": "5555",
"isfamous": "true"
},
{
"postId": 2,
"title": "从头了解光刻机",
"content": "光刻是集成电路最重要的加工工艺,他的作用,如同金工车间中车床的作用。在整个芯片制造工艺中,几乎每个工艺的实施,都离不开光刻的技术。光刻也是制造芯片的最关键技术,他占芯片制造成本的35%以上。在如今的科技与社会发展中,光刻技术的增长,直接关系到大型计算机的运作等高科技领域。",
"postTime": "2018-05-17 10:44",
"userName": "大漠穷秋",
"userId": "1",
"readTimes": "10000",
"commentTimes": "10000",
"likedTimes": "5555",
"isfamous": "true"
},
{
"postId": 3,
"title": "从头了解光刻机",
"content": "光刻是集成电路最重要的加工工艺,他的作用,如同金工车间中车床的作用。在整个芯片制造工艺中,几乎每个工艺的实施,都离不开光刻的技术。光刻也是制造芯片的最关键技术,他占芯片制造成本的35%以上。在如今的科技与社会发展中,光刻技术的增长,直接关系到大型计算机的运作等高科技领域。",
"postTime": "2018-05-17 10:44",
"userName": "大漠穷秋",
"userId": "1",
"readTimes": "10000",
"commentTimes": "10000",
"likedTimes": "5555",
"isfamous": "true"
},
{
"postId": 4,
"title": "从头了解光刻机",
"content": "光刻是集成电路最重要的加工工艺,他的作用,如同金工车间中车床的作用。在整个芯片制造工艺中,几乎每个工艺的实施,都离不开光刻的技术。光刻也是制造芯片的最关键技术,他占芯片制造成本的35%以上。在如今的科技与社会发展中,光刻技术的增长,直接关系到大型计算机的运作等高科技领域。",
"postTime": "2018-05-17 10:44",
"userName": "大漠穷秋",
"userId": "1",
"readTimes": "10000",
"commentTimes": "10000",
"likedTimes": "5555",
"isfamous": "true"
},
{
"postId": 5,
"title": "从头了解光刻机",
"content": "光刻是集成电路最重要的加工工艺,他的作用,如同金工车间中车床的作用。在整个芯片制造工艺中,几乎每个工艺的实施,都离不开光刻的技术。光刻也是制造芯片的最关键技术,他占芯片制造成本的35%以上。在如今的科技与社会发展中,光刻技术的增长,直接关系到大型计算机的运作等高科技领域。",
"postTime": "2018-05-17 10:44",
"userName": "大漠穷秋",
"userId": "1",
"readTimes": "10000",
"commentTimes": "10000",
"likedTimes": "5555",
"isfamous": "true"
},
{
"postId": 6,
"title": "从头了解光刻机",
"content": "光刻是集成电路最重要的加工工艺,他的作用,如同金工车间中车床的作用。在整个芯片制造工艺中,几乎每个工艺的实施,都离不开光刻的技术。光刻也是制造芯片的最关键技术,他占芯片制造成本的35%以上。在如今的科技与社会发展中,光刻技术的增长,直接关系到大型计算机的运作等高科技领域。",
"postTime": "2018-05-17 10:44",
"userName": "大漠穷秋",
"userId": "1",
"readTimes": "10000",
"commentTimes": "10000",
"likedTimes": "5555",
"isfamous": "true"
},
{
"postId": 7,
"title": "从头了解光刻机",
"content": "光刻是集成电路最重要的加工工艺,他的作用,如同金工车间中车床的作用。在整个芯片制造工艺中,几乎每个工艺的实施,都离不开光刻的技术。光刻也是制造芯片的最关键技术,他占芯片制造成本的35%以上。在如今的科技与社会发展中,光刻技术的增长,直接关系到大型计算机的运作等高科技领域。",
"postTime": "2018-05-17 10:44",
"userName": "大漠穷秋",
"userId": "1",
"readTimes": "10000",
"commentTimes": "10000",
"likedTimes": "5555",
"isfamous": "true"
},
{
"postId": 8,
"title": "从头了解光刻机",
"content": "光刻是集成电路最重要的加工工艺,他的作用,如同金工车间中车床的作用。在整个芯片制造工艺中,几乎每个工艺的实施,都离不开光刻的技术。光刻也是制造芯片的最关键技术,他占芯片制造成本的35%以上。在如今的科技与社会发展中,光刻技术的增长,直接关系到大型计算机的运作等高科技领域。",
"postTime": "2018-05-17 10:44",
"userName": "大漠穷秋",
"userId": "1",
"readTimes": "10000",
"commentTimes": "10000",
"likedTimes": "5555",
"isfamous": "true"
},
{
"postId": 9,
"title": "从头了解光刻机",
"content": "光刻是集成电路最重要的加工工艺,他的作用,如同金工车间中车床的作用。在整个芯片制造工艺中,几乎每个工艺的实施,都离不开光刻的技术。光刻也是制造芯片的最关键技术,他占芯片制造成本的35%以上。在如今的科技与社会发展中,光刻技术的增长,直接关系到大型计算机的运作等高科技领域。",
"postTime": "2018-05-17 10:44",
"userName": "大漠穷秋",
"userId": "1",
"readTimes": "10000",
"commentTimes": "10000",
"likedTimes": "5555",
"isfamous": "true"
},
{
"postId": 10,
"title": "从头了解光刻机",
"content": "光刻是集成电路最重要的加工工艺,他的作用,如同金工车间中车床的作用。在整个芯片制造工艺中,几乎每个工艺的实施,都离不开光刻的技术。光刻也是制造芯片的最关键技术,他占芯片制造成本的35%以上。在如今的科技与社会发展中,光刻技术的增长,直接关系到大型计算机的运作等高科技领域。",
"postTime": "2018-05-17 10:44",
"userName": "大漠穷秋",
"userId": "1",
"readTimes": "10000",
"commentTimes": "10000",
"likedTimes": "5555",
"isfamous": "true"
},
{
"postId": 11,
"title": "从头了解光刻机",
"content": "光刻是集成电路最重要的加工工艺,他的作用,如同金工车间中车床的作用。在整个芯片制造工艺中,几乎每个工艺的实施,都离不开光刻的技术。光刻也是制造芯片的最关键技术,他占芯片制造成本的35%以上。在如今的科技与社会发展中,光刻技术的增长,直接关系到大型计算机的运作等高科技领域。",
"postTime": "2018-05-17 10:44",
"userName": "大漠穷秋",
"userId": "1",
"readTimes": "10000",
"commentTimes": "10000",
"likedTimes": "5555",
"isfamous": "true"
}
]
}
);
const [data,updateData] = useState([] as any);
function onChange(page:any){
loadData(page);
}
function ShowSizeChange(current:any,pageSize:any){
loadData(current,pageSize);
}
function loadData(page:any = 1,itemPerpage:any = 10){
const offset = (page-1)*10;
const end = page*itemPerpage;
const data = postLists.items.slice(offset,end>postLists.total?postLists.total:end);
updateData(data);
}
useEffect(()=>{
loadData(currentPage,itemPerpage);
},[])
return (
<div className="post-list-container">
<div className="row">
<div className="col-md-12">
<PostHeadline/>
</div>
<div className="col-md-12">
{
data.map((list:any,index:number)=>{
return (
<div className="post-item-container mt-16px" key={index}>
<div className="row">
<div className="col-md-2">
<img src={listImg} alt="..." className="img-thumbnail"/>
</div>
<div className="col-md-10 post-item-text-container sm-mt-16px">
<h3 className="font-size-18">
<NavLink to={`/post/post-detail/${list.postId}`}>
{list.title}
</NavLink>
</h3>
<div className="user-name-intitle">
<div className="row">
<div className="col-md-4 col-lg-3 ">
<span className="fa fa-user"></span>
<span className="ml-5px">{list.userName}</span>
</div>
<div className="col-md-6 col-lg-5">
<span className="fa fa-clock-o"></span>
<span className="ml-5px">{list.postTime}</span>
</div>
</div>
</div>
<div className="abs">{list.content}</div>
</div>
</div>
</div>
)
})
}
</div>
</div>
<div className="mt-16px">
<Pagination size="" total={postLists.total} showSizeChanger showQuickJumper onChange={onChange} onShowSizeChange={ShowSizeChange}/>
</div>
</div>
);
}
export default PostList;
|
# -----------------------------------------------------------------------------
#
# Package : for-in
# Version : 1.0.2
# Source repo : https://github.com/jonschlinkert/for-in
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <sethp@us.ibm.com>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=for-in
PACKAGE_VERSION=1.0.2
PACKAGE_URL=https://github.com/jonschlinkert/for-in
yum -y update && yum install -y yum-utils nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git gcc gcc-c++ libffi libffi-devel ncurses git jq make cmake
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/appstream/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/baseos/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/7Server/ppc64le/optional/
yum install -y firefox liberation-fonts xdg-utils && npm install n -g && n latest && npm install -g npm@latest && export PATH="$PATH" && npm install --global yarn grunt-bump xo testem acorn
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
HOME_DIR=`pwd`
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME" > /home/tester/output/clone_fails
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Clone_Fails" > /home/tester/output/version_tracker
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
git checkout $PACKAGE_VERSION
PACKAGE_VERSION=$(jq -r ".version" package.json)
# run the test command from test.sh
if ! npm install && npm audit fix && npm audit fix --force; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! npm test; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success"
exit 0
fi |
#!/bin/bash
#
# Author: bz0qyz
# Repository: https://github.com/bz0qyz/watch
app_name='watch'
# Set the deafult wait time
WAIT=5
LOOP=0
txtnone='\033[0m' # no color
txtblk='\e[0;30m' # Black - Regular
txtred='\033[0;31m' # Red
txtgrn='\033[0;32m' # Green
txtylw='\033[0;33m' # Yellow
txtblu='\033[0;34m' # Blue
function showusage(){
echo -e "Usage: \n$0 -n wait <command>"
echo "Defaults:"
exit 2
}
## Use getopt to read command agguments
args=`getopt n: $*`
if [ $? != 0 ]; then
showusage
fi
set -- $args
# Process command arguments
for i
do
case "$i" in
-n)
WAIT="$2"; shift
shift;;
*)
[ "$i" != "--" ] && [ "$i" != "$WAIT" ] && CMD="$CMD $i"
shift;;
--)
shift; break;;
esac
done
trap ctrl_c INT
function ctrl_c() {
echo -e "\n${txtylw}executed $LOOP times${txtnone}" && exit 0
}
# Main Loop
while [ 0 ]; do
clear
columns="$(tput cols)"
header="[ ${app_name} - refresh every ${WAIT} seconds ]"
# Get the length of the padding on each side of the header text
padlen=$(( $((${columns} / 2)) - $((${#header} /2)) ))
# print the header text with padded characters
printf "${txtgrn}"
head -z -c $padlen < /dev/zero | tr '\0' '\52'
printf "%s" "${header}"
head -z -c $padlen < /dev/zero | tr '\0' '\52'
printf "${txtnone}\n"
# execute the command that is being watched
$CMD
ct=$WAIT
# print the footer
printf "${txtgrn}"
head -z -c $columns < /dev/zero | tr '\0' '\52'
printf "${txtnone}\n"
# print a footer countdown to the next command execution
while [ $ct -gt 0 ]; do
sleep 1 &
printf "\r${txtred}[ %02d:%02d ]${txtnone}${txtgrn} < ctrl-c to exit > ${txtnone}" $(((ct/60)%60)) $((ct%60))
ct=$(( $ct - 1 ))
wait
done
LOOP=$(( $LOOP + 1 ))
done
|
print('1oveadela')
|
<filename>src/main/java/com/netcracker/ncstore/exception/OrderServiceOrderPaymentException.java
package com.netcracker.ncstore.exception;
/**
* Used when it is impossible to pay the order
*/
public class OrderServiceOrderPaymentException extends RuntimeException {
public OrderServiceOrderPaymentException() {
}
public OrderServiceOrderPaymentException(String message) {
super(message);
}
public OrderServiceOrderPaymentException(String message, Throwable cause) {
super(message, cause);
}
}
|
#!/bin/bash
for folder in */
do
7z a ${folder%/}{.7z,}
done
|
import math
def sieveOfEratosthenes(n):
prime = [True for i in range(n + 1)]
p = 2
prime_numbers = []
while (p * p <= n):
if (prime[p] == True):
for i in range(p * 2, n + 1, p):
prime[i] = False
p += 1
prime[0]= False
prime[1]= False
for p in range(n + 1):
if prime[p]:
prime_numbers.append(p)
return prime_numbers
n = 25
print(sieveOfEratosthenes(n)) |
const express = require("express");
const router = express.Router();
require("dotenv").config();
const Aes = require("../script/aes");
let aes = new Aes();
router.post("/encrypt", (req, res) => {
const { text } = req.body;
console.log(req.body);
let encryptedText = aes.encrypt(process.env.APP_SECRET, text);
res.json({
encryptedText,
});
});
router.post("/decrypt", (req, res) => {
const { encryptedText } = req.body;
console.log(req.body);
let decryptedText = aes.decrypt(process.env.APP_SECRET, encryptedText);
res.json({
decryptedText,
});
});
module.exports = router;
|
package com.example.damio.imaginarycityguide.fragments;
import android.animation.ObjectAnimator;
import android.animation.StateListAnimator;
import android.content.Context;
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.support.design.widget.AppBarLayout;
import android.support.design.widget.TabLayout;
import android.support.v4.app.Fragment;
import android.support.v4.view.ViewPager;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import com.example.damio.imaginarycityguide.Page;
import com.example.damio.imaginarycityguide.R;
import com.example.damio.imaginarycityguide.TabPagerAdapter;
import java.util.ArrayList;
public class MotherFragment extends Fragment {
private TabPagerAdapter tabPagerAdapter;
TabLayout tabLayout;
ViewPager viewPager;
ArrayList<Page> pages = new ArrayList<>();
public MotherFragment() {
// Required empty public constructor
}
@Override
public View onCreateView(@NonNull LayoutInflater inflater, @Nullable ViewGroup container,
@Nullable Bundle savedInstanceState) {
// Inflate the layout for this fragment
View root = inflater.inflate(R.layout.fragment_mother, container, false);
tabLayout = root.findViewById(R.id.tabs);
viewPager = root.findViewById(R.id.pager);
seTabAdapter();
return root;
}
private void seTabAdapter() {
final LibraryFragment libraryfragment = new LibraryFragment();
final OceanFragment oceanfragment = new OceanFragment();
final ParkFragment parkfragment = new ParkFragment();
final ResturantFragment resturantfragment = new ResturantFragment();
if (pages.size() <= 0) {
pages.add(new Page(libraryfragment, getResources().getString(R.string.library)));
pages.add(new Page(oceanfragment, getResources().getString(R.string.ocean)));
pages.add(new Page(parkfragment, getResources().getString(R.string.park)));
pages.add(new Page(resturantfragment, getResources().getString(R.string.restaurant)));
}
tabPagerAdapter = new TabPagerAdapter(getChildFragmentManager());
tabPagerAdapter.setFragments(pages);
viewPager.setAdapter(tabPagerAdapter);
tabLayout.setupWithViewPager(viewPager);
viewPager.addOnPageChangeListener(new ViewPager.OnPageChangeListener() {
@Override
public void onPageScrolled(int position, float positionOffset, int positionOffsetPixels) {
}
@Override
public void onPageSelected(int position) {
}
@Override
public void onPageScrollStateChanged(int state) {
}
});
}
}
|
#!/bin/bash
sed 's/\&/\&/g' $2 > tmp
sed --in-place='' 's/</\</g' tmp
sed --in-place='' 's/>/\>/g' tmp
sed --in-place='' 's/"/\"/g' tmp
sed --in-place='' "s/'/\'/g" tmp
python3 scripts/makeNAF.py tmp > tmp1
current_dir=$(pwd)
tmp1=$current_dir"/tmp1"
tmp2=$current_dir"/tmp2"
#echo $tmp1
cd ixa-pipe/nerc/
#head $tmp1
cat $tmp1 | java -jar target/ixa-pipe-nerc-2.0.0-exec.jar tag -m $current_dir/ixa-pipe/1000K.bin -o conll02 > $tmp2
cd - > /dev/null 2>&1
rm tmp1 tmp
awk '{print $1 "\t" $4}' tmp2 > tmp3
rm tmp2
sed --in-place='' 's/^[ \t]*//' tmp3
sed --in-place='' 's/B-Per$/B-Person/g' tmp3
sed --in-place='' 's/I-Per$/I-Person/g' tmp3
sed --in-place='' 's/B-Loc$/B-Location/g' tmp3
sed --in-place='' 's/I-Loc$/I-Location/g' tmp3
sed --in-place='' 's/I-Org$/I-Organization/g' tmp3
sed 's/B-Org$/B-Organization/g' tmp3 > output/ixa-pipe
#sed -i 's/\&/\&/g' tmp3
#sed -i 's/\</</g' tmp3
#sed -i 's/\>/>/g' tmp3
#sed -i 's/\"/"/g' tmp3
#sed -i "s/\'/'/g" tmp3
#cat tmp3
rm tmp3
|
#!/bin/env bash
set -eux
sh build-prepare.sh
sh build-kubernetes-tools.sh
sh build-calico-docs.sh
sh build-istio-io-weiki.sh
sh build-nginx-quick-http3.sh
sh build-ceph-io-wiki.sh
sh clean.sh
|
A=/home/xuehongyang/inpainting/MiddEval3/MiddInpaint
B=$1
C=/home/xuehongyang/inpainting/LRL0_result
Lambda=(30 40 50 60 70 80 90 100 110 120 130 140 150 160 170 180 190 200)
K=(3 5)
max=30
for var in ${B[@]}; do
DISPPATH=$A/${var}/disp.png
MISS=50
MASK=$A/${var}/mask_${MISS}.png
echo 'inpainting for' $B 'with mask' ${MASK}
echo 'initialized with' ${A}/${var}/tnnr_${MISS}.png
for lam in ${Lambda[@]}; do
for k in ${K[@]}; do
paramDir=$C/${lam}_$k
if [ ! -d "$paramDir" ]; then
`mkdir ${paramDir}`
fi
if [ ! -d "${paramDir}/${var}" ]; then
`mkdir ${paramDir}/${var}`
fi
echo 'lambda_l0 = ' ${lam} ', K = ' $k ', maxCnt = ' ${max}
OUTPUT=${paramDir}/${var}/lrl0_${MISS}_
./depthInpainting LRL0 ${DISPPATH} ${MASK} ${OUTPUT} ${A}/${var}/tnnr_${MISS}.png $k ${lam\
} ${max} ${paramDir}/${var}
done
done
done
|
<reponame>oueya1479/OpenOLAT
/**
* <a href="http://www.openolat.org">
* OpenOLAT - Online Learning and Training</a><br>
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); <br>
* you may not use this file except in compliance with the License.<br>
* You may obtain a copy of the License at the
* <a href="http://www.apache.org/licenses/LICENSE-2.0">Apache homepage</a>
* <p>
* Unless required by applicable law or agreed to in writing,<br>
* software distributed under the License is distributed on an "AS IS" BASIS, <br>
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <br>
* See the License for the specific language governing permissions and <br>
* limitations under the License.
* <p>
* Initial code contributed and copyrighted by<br>
* frentix GmbH, http://www.frentix.com
* <p>
*/
package org.olat.login.oauth.spi;
import java.io.IOException;
import java.util.concurrent.ExecutionException;
import org.apache.logging.log4j.Logger;
import org.json.JSONException;
import org.json.JSONObject;
import org.olat.core.logging.Tracing;
import org.olat.core.util.StringHelper;
import org.olat.login.oauth.OAuthLoginModule;
import org.olat.login.oauth.OAuthSPI;
import org.olat.login.oauth.model.OAuthUser;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import com.github.scribejava.apis.MicrosoftAzureActiveDirectory20Api;
import com.github.scribejava.apis.microsoftazureactivedirectory.BaseMicrosoftAzureActiveDirectoryApi;
import com.github.scribejava.core.builder.ServiceBuilder;
import com.github.scribejava.core.model.OAuth2AccessToken;
import com.github.scribejava.core.model.OAuthRequest;
import com.github.scribejava.core.model.Response;
import com.github.scribejava.core.model.Token;
import com.github.scribejava.core.model.Verb;
import com.github.scribejava.core.oauth.OAuth20Service;
import com.github.scribejava.core.oauth.OAuthService;
/**
*
* Initial date: 7 sept. 2020<br>
* @author srosse, <EMAIL>, http://www.frentix.com
*
*/
@Service
public class MicrosoftAzureADFSProvider implements OAuthSPI {
private static final Logger log = Tracing.createLoggerFor(MicrosoftAzureADFSProvider.class);
public static final String PROVIDER = "AZUREAD";
@Value("${azure.adfs.attributename.useridentifyer:userPrincipalName}")
private String idAttributeName;
@Value("${azure.adfs.attributename.firstName:givenName}")
private String firstNameAttributeName;
@Value("${azure.adfs.attributename.lastName:surname}")
private String lastNameAttributeName;
@Value("${azure.adfs.attributename.email:mail}")
private String emailAttributeName;
@Value("${azure.adfs.attributename.institutionalUserIdentifier:userPrincipalName}")
private String institutionalUserIdentifierAttributeName;
@Value("${azure.adfs.attributename.institutionalName}")
private String institutionalNameAttributeName;
@Value("${azure.adfs.attributename.department}")
private String departmentAttributeName;
@Value("${azure.adfs.attributename.country}")
private String countryAttributeName;
@Autowired
private OAuthLoginModule oauthModule;
@Override
public boolean isEnabled() {
return oauthModule.isAzureAdfsEnabled();
}
@Override
public boolean isRootEnabled() {
return oauthModule.isAzureAdfsRootEnabled();
}
@Override
public boolean isImplicitWorkflow() {
return false;
}
@Override
public OAuthService getScribeProvider() {
ServiceBuilder serviceBuilder = new ServiceBuilder(oauthModule.getAzureAdfsApiKey());
if(StringHelper.containsNonWhitespace(oauthModule.getAzureAdfsApiSecret())) {
serviceBuilder = serviceBuilder.apiSecret(oauthModule.getAzureAdfsApiSecret());
}
BaseMicrosoftAzureActiveDirectoryApi api;
if(StringHelper.containsNonWhitespace(oauthModule.getAzureAdfsTenant())) {
api = MicrosoftAzureActiveDirectory20Api.custom(oauthModule.getAzureAdfsTenant());
} else {
// common tenant
api = MicrosoftAzureActiveDirectory20Api.instance();
}
return serviceBuilder
.defaultScope("profile openid email User.Read")
.callback(oauthModule.getCallbackUrl())
.build(api);
}
@Override
public String getName() {
return "azureAdfs";
}
@Override
public String getProviderName() {
return PROVIDER;
}
@Override
public String getIconCSS() {
return "o_icon o_icon_provider_adfs";
}
@Override
public OAuthUser getUser(OAuthService service, Token accessToken) {
OAuthUser user = new OAuthUser();
enrichUser(user, (OAuth2AccessToken)accessToken);
enrichGraph(user, (OAuth20Service)service, (OAuth2AccessToken)accessToken);
return user;
}
private void enrichUser(OAuthUser user, OAuth2AccessToken accessToken) {
try {
JSONWebToken jwt = JSONWebToken.parse(accessToken);
JSONObject obj = jwt.getJsonPayload();
user.setId(getValue(obj, idAttributeName, user.getId()));
user.setFirstName(getValue(obj, firstNameAttributeName, user.getFirstName()));
user.setLastName(getValue(obj, lastNameAttributeName, user.getLastName()));
user.setEmail(getValue(obj, emailAttributeName, user.getEmail()));
user.setInstitutionalUserIdentifier(getValue(obj, institutionalUserIdentifierAttributeName, user.getInstitutionalUserIdentifier()));
if(!StringHelper.containsNonWhitespace(user.getId())) {
user.setId(user.getInstitutionalUserIdentifier());
}
user.setInstitutionalName(getValue(obj, institutionalNameAttributeName, user.getInstitutionalName()));
user.setDepartment(getValue(obj, departmentAttributeName, user.getDepartment()));
user.setCountry(getValue(obj, countryAttributeName, user.getCountry()));
} catch (JSONException e) {
log.error("", e);
}
}
private void enrichGraph(OAuthUser user, OAuth20Service oauthService, OAuth2AccessToken accessToken) {
try {
OAuthRequest oauthRequest = new OAuthRequest(Verb.GET, "https://graph.microsoft.com/v1.0/me");
oauthRequest.addHeader("x-li-format", "json");
oauthRequest.addHeader("Accept-Language", "en-GB");
oauthService.signRequest(accessToken, oauthRequest);
Response oauthResponse = oauthService.execute(oauthRequest);
String body = oauthResponse.getBody();
JSONObject obj = new JSONObject(body);
user.setId(getValue(obj, idAttributeName, user.getId()));
user.setFirstName(getValue(obj, firstNameAttributeName, user.getFirstName()));
user.setLastName(getValue(obj, lastNameAttributeName, user.getLastName()));
user.setEmail(getValue(obj, emailAttributeName, user.getEmail()));
user.setInstitutionalUserIdentifier(getValue(obj, institutionalUserIdentifierAttributeName, user.getInstitutionalUserIdentifier()));
if(!StringHelper.containsNonWhitespace(user.getId())) {
user.setId(user.getInstitutionalUserIdentifier());
}
user.setInstitutionalName(getValue(obj, institutionalNameAttributeName, user.getInstitutionalName()));
user.setDepartment(getValue(obj, departmentAttributeName, user.getDepartment()));
user.setCountry(getValue(obj, countryAttributeName, user.getCountry()));
} catch (JSONException | InterruptedException | ExecutionException | IOException e) {
log.error("", e);
}
}
private String getValue(JSONObject obj, String property, String currentValue) {
if(StringHelper.containsNonWhitespace(property)) {
String value = obj.optString(property);
return StringHelper.containsNonWhitespace(value) ? value : currentValue;
}
return currentValue;
}
@Override
public String getIssuerIdentifier() {
return "https://login.microsoftonline.com";
}
}
|
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
define([
'app',
'api',
'addons/fauxton/navigation/actiontypes'
],
function (app, FauxtonAPI, ActionTypes) {
var Stores = {};
Stores.NavBarStore = FauxtonAPI.Store.extend({
initialize: function () {
this.reset();
},
reset: function () {
this.activeLink = null;
this.version = null;
this.navLinks = [];
this.footerNavLinks = [];
this.bottomNavLinks = [];
},
addLink: function (link) {
if (link.top && !link.bottomNav) {
this.navLinks.unshift(link);
return;
}
if (link.top && link.bottomNav) {
this.bottomNavLinks.unshift(link);
return;
}
if (link.bottomNav) {
this.bottomNavLinks.push(link);
return;
}
if (link.footerNav) {
this.footerNavLinks.push(link);
return;
}
this.navLinks.push(link);
},
removeLink: function (removeLink) {
var links = this.getLinkSection(removeLink);
var indexOf = 0;
var res = _.filter(links, function (link) {
if (link.id === removeLink.id) {
return true;
}
indexOf++;
return false;
});
if (!res.length) { return; }
links.splice(indexOf, 1);
},
getNavLinks: function () {
return this.navLinks;
},
getBottomNavLinks: function () {
return this.bottomNavLinks;
},
getFooterNavLinks: function () {
return this.footerNavLinks;
},
toggleMenu: function () {
app.utils.localStorageSet(FauxtonAPI.constants.LOCAL_STORAGE.SIDEBAR_MINIMIZED,
!this.isMinimized());
},
getLinkSection: function (link) {
var links = this.navLinks;
if (link.bottomNav) {
links = this.bottomNavLinks;
}
if (link.footerNav) {
links = this.footerNavLinks;
}
return links;
},
updateLink: function (link) {
var oldLink;
var links = this.getLinkSection(link);
oldLink = _.find(links, function (oldLink) {
return oldLink.id === link.id;
});
if (!oldLink) { return; }
oldLink.title = link.title;
oldLink.href = link.href;
},
getVersion: function () {
return this.version;
},
setVersion: function (version) {
this.version = version;
},
getActiveLink: function () {
return this.activeLink;
},
setActiveLink: function (activeLink) {
this.activeLink = activeLink;
},
isMinimized: function () {
var isMinimized = app.utils.localStorageGet(FauxtonAPI.constants.LOCAL_STORAGE.SIDEBAR_MINIMIZED);
return (_.isUndefined(isMinimized)) ? false : isMinimized;
},
dispatch: function (action) {
switch (action.type) {
case ActionTypes.ADD_NAVBAR_LINK:
this.addLink(action.link);
break;
case ActionTypes.TOGGLE_NAVBAR_MENU:
this.toggleMenu();
break;
case ActionTypes.UPDATE_NAVBAR_LINK:
this.updateLink(action.link);
break;
case ActionTypes.CLEAR_NAVBAR_LINK:
this.reset();
break;
case ActionTypes.REMOVE_NAVBAR_LINK:
this.removeLink(action.link);
break;
case ActionTypes.NAVBAR_SET_VERSION_INFO:
this.setVersion(action.version);
break;
case ActionTypes.NAVBAR_ACTIVE_LINK:
this.setActiveLink(action.name);
break;
default:
return;
// do nothing
}
this.triggerChange();
}
});
Stores.navBarStore = new Stores.NavBarStore();
Stores.navBarStore.dispatchToken = FauxtonAPI.dispatcher.register(Stores.navBarStore.dispatch);
return Stores;
});
|
class Person:
def __init__(self, name, age, fav_color):
self.name = name
self.age = age
self.fav_color = fav_color
p1 = Person("John", 25, "red") |
def login_user(request, user):
"""
Authenticate the user based on the provided password.
:param request: The request object containing the form data.
:param user: The user object with password attribute.
:return: The authenticated user object or None.
"""
if 'password' in request.form and user.password == request.form['password']:
user.is_authenticated = True
return user
else:
return None
@public.route('/logout')
def logout(home: str=None) -> str:
"""Log out current session and redirect to home.
:param home: URL to redirect to after logout success
"""
flask_login.logout_user()
return redirect(home) |
package link.infra.demagnetize.blocks;
import net.minecraft.entity.player.PlayerEntity;
import net.minecraft.entity.player.PlayerInventory;
import net.minecraft.inventory.container.ClickType;
import net.minecraft.inventory.container.Container;
import net.minecraft.inventory.container.Slot;
import net.minecraft.item.ItemStack;
import net.minecraft.util.IWorldPosCallable;
import net.minecraft.util.math.BlockPos;
import net.minecraft.world.World;
import net.minecraftforge.items.IItemHandler;
import net.minecraftforge.items.SlotItemHandler;
import net.minecraftforge.items.wrapper.InvWrapper;
import javax.annotation.Nonnull;
import java.util.Objects;
public class DemagnetizerContainer extends Container {
final DemagnetizerTileEntity te;
public DemagnetizerContainer(int windowId, World world, BlockPos pos, PlayerInventory playerInventory) {
super(ModBlocks.DEMAGNETIZER_CONTAINER, windowId);
this.te = (DemagnetizerTileEntity) world.getTileEntity(pos);
addOwnSlots();
addPlayerSlots(new InvWrapper(playerInventory));
}
private void addPlayerSlots(IItemHandler playerInventory) {
// Slots for the main inventory
for (int row = 0; row < 3; ++row) {
for (int col = 0; col < 9; ++col) {
int x = 8 + col * 18;
int y = row * 18 + 84;
addSlot(new SlotItemHandler(playerInventory, col + row * 9 + 9, x, y));
}
}
// Slots for the hotbar
for (int row = 0; row < 9; ++row) {
int x = 8 + row * 18;
int y = 142;
addSlot(new SlotItemHandler(playerInventory, row, x, y));
}
}
private void addOwnSlots() {
for (int i = 0; i < te.getFilterSize(); i++) {
addSlot(new SlotItemHandler(te.itemStackHandler, i, 8 + (i * 18), 53));
}
}
@Nonnull
@Override
public ItemStack transferStackInSlot(@Nonnull PlayerEntity playerIn, int index) {
return ItemStack.EMPTY;
}
@Override
public boolean canInteractWith(@Nonnull PlayerEntity playerEntity) {
World world = Objects.requireNonNull(te.getWorld());
if (te.advanced) {
return isWithinUsableDistance(IWorldPosCallable.of(world, te.getPos()), playerEntity, ModBlocks.DEMAGNETIZER_ADVANCED);
} else {
return isWithinUsableDistance(IWorldPosCallable.of(world, te.getPos()), playerEntity, ModBlocks.DEMAGNETIZER);
}
}
@Nonnull
@Override
public ItemStack slotClick(int slotId, int dragType, @Nonnull ClickType clickTypeIn, @Nonnull PlayerEntity player) {
if (slotId >= 0 && slotId < te.getFilterSize()) {
Slot slot = this.inventorySlots.get(slotId);
ItemStack heldStack = player.inventory.getItemStack();
if (heldStack.isEmpty()) {
slot.putStack(ItemStack.EMPTY);
} else {
ItemStack single = heldStack.copy();
single.setCount(1);
slot.putStack(single);
}
return ItemStack.EMPTY;
}
return super.slotClick(slotId, dragType, clickTypeIn, player);
}
}
|
<filename>spec/file_handler_spec.rb<gh_stars>1-10
require_relative '../lib/file_handler.rb'
describe FileHandler do
let(:new_class) { FileHandler.new(hash, 'file.json') }
it 'Raises argument error when no arguments are given' do
expect { FileHandler.new }.to raise_error(ArgumentError)
end
it 'Raises argument error when more than two arguments are given' do
expect { FileHandler.new('a', 'b', 'c') }.to raise_error(ArgumentError)
end
it 'Raises argument error when no arguments are given' do
expect { FileHandler.new.file_reader }.to raise_error(ArgumentError)
end
it 'Raises argument error when more than two arguments are given' do
expect { FileHandler.new.file_reader('a', 'b', 'c') }.to raise_error(ArgumentError)
end
end
|
class Config:
def __init__(self):
self.settings = {}
def add_setting(self, key, value):
self.settings[key] = value
def get_setting(self, key):
return self.settings.get(key)
def delete_setting(self, key):
if key in self.settings:
del self.settings[key]
def get_all_settings(self):
return self.settings |
<gh_stars>1-10
package binary_search;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.Arrays;
import java.util.StringTokenizer;
/**
*
* @author exponential-e
* 백준 12003번: Diamond Collector (silver)
*
* @see https://www.acmicpc.net/problem/12003/
*
*/
public class Boj12003 {
public static void main(String[] args) throws Exception{
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
StringTokenizer st = new StringTokenizer(br.readLine());
int N = Integer.parseInt(st.nextToken());
int K = Integer.parseInt(st.nextToken());
int[] diamond = new int[N];
for(int i = 0; i < N; i++) {
diamond[i] = Integer.parseInt(br.readLine());
}
Arrays.sort(diamond);
System.out.println(makeResult(N, K, diamond));
}
private static int makeResult(int n, int k, int[] arr) {
int[] list = new int[n];
for(int i = 0; i < n; i++){
list[i] = binarySearch(0, n, arr[i] + k, arr) - i; // find inner
}
int[] max = new int[n];
max[n - 1] = list[n - 1] > 0 ? list[n - 1]: 0;
for(int i = n - 2; i >= 0; i--) {
max[i] = Math.max(max[i + 1], list[i]);
}
int result = 0;
int sum, idx = 0;
while(n > (sum = list[idx] + idx)) { // possibility
result = Math.max(result, list[idx] + max[sum]);
idx++;
}
return result;
}
private static int binarySearch(int start, int end, int target, int[] arr) {
while(start < end) {
int mid = (start + end) / 2;
if(arr[mid] <= target) start = mid + 1;
else end = mid;
}
return end;
}
}
|
console.log("Shubham_siddhartha");
|
package sign
import (
"bytes"
"testing"
gossh "github.com/coreos/fleet/Godeps/_workspace/src/code.google.com/p/gosshnew/ssh"
"github.com/coreos/fleet/job"
"github.com/coreos/fleet/unit"
)
func TestSignJob(t *testing.T) {
c, _ := initSign(t)
u, err := unit.NewUnit("Echo")
if err != nil {
t.Fatalf("unexpected error creating new unit: %v", err)
}
j := job.NewJob("echo.service", *u)
data, err := marshal(u)
if err != nil {
t.Fatal("marshal error:", err)
}
expectedSig, err := c.keyring.Sign(testPublicKeys["rsa"], data)
if err != nil {
t.Fatal("sign error:", err)
}
s, err := c.SignJob(j)
if err != nil {
t.Fatal("sign payload error:", err)
}
if s.Tag != TagForJob("echo.service") {
t.Fatal("sign tag error:", err)
}
if len(s.Signatures) != 1 {
t.Fatal("expect 1 signature instead of", len(s.Signatures))
}
if bytes.Compare(s.Signatures[0].Blob, expectedSig.Blob) != 0 {
t.Fatal("wrong signature")
}
}
func TestVerifyJob(t *testing.T) {
c, v := initSign(t)
u, err := unit.NewUnit("Echo")
if err != nil {
t.Fatalf("unexpected error creating new unit: %v", err)
}
j := job.NewJob("echo.service", *u)
data, err := marshal(u)
if err != nil {
t.Fatal("marshal error:", err)
}
v.pubkeys = append(v.pubkeys, testPublicKeys["rsa"])
signature, err := c.keyring.Sign(testPublicKeys["rsa"], data)
if err != nil {
t.Fatal("sign error:", err)
}
ss := &SignatureSet{TagForJob("echo.service"), []*gossh.Signature{signature}}
ok, err := v.VerifyJob(j, ss)
if err != nil {
t.Fatal("error verifying job:", err)
}
if !ok {
t.Fatal("job verification failed")
}
ss.Tag = ""
ok, err = v.VerifyJob(j, ss)
if err == nil || ok == true {
t.Fatal("should fail on job verification")
}
ok, err = v.VerifyJob(j, nil)
if err == nil || ok == true {
t.Fatal("should fail on job verification")
}
}
|
<filename>StreamGlider App/Classes/MagPage.h
//
// MagPage.h
// StreamGlider
//
// Created by <NAME> on 31/10/2011.
// Copyright 2011 StreamGlider, Inc. All rights reserved.
//
// This program is free software if used non-commercially: you can redistribute it and/or modify
// it under the terms of the BSD 4 Clause License as published by
// the Free Software Foundation.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// BSD 4 Clause License for more details.
//
// You should have received a copy of the BSD 4 Clause License
// along with this program. If not, see the README.md file with this program.
#import <Foundation/Foundation.h>
@class MagPageLayout;
@class Stream;
@class FrameIterator;
@interface MagPage : NSObject
@property (nonatomic, retain) NSArray *articles;
@property (nonatomic, retain) MagPageLayout *layout;
@property (nonatomic, assign) Stream *stream;
@property (nonatomic, assign) FrameIterator *iterator;
@property (nonatomic, assign) BOOL emptyPage;
- (void)prepareArticlesForOrientation:(UIInterfaceOrientation)orientation;
- (void)layoutArticlesForOrientation:(UIInterfaceOrientation)orientation;
@end
|
#!/bin/bash
#Note: Search by Date and Time
#Author:Khondakar
clear
echo -e "\\e[30;48;5;82m*** [1] SEARCH LOGS - Search All Date ***\e[0m"
# User input:
echo -e "\e[30;48;5;196m\e[1m\e[5mTIPS:\e[0m\e[25m Enter the Date format like this example: YYYY-MM-DD HH:MM:SS"
echo -n -e "Enter the Start Date : "
read startdate
echo -n -e "Enter the End Date : "
read enddate
echo -n -e "Enter the Word you want to Search : "
read inputword
# Save the search result in a file
sudo journalctl --since "$startdate" --until "$enddate" | grep "$inputword" > ~/workSpace/LMS/Reports/ReportBaseOnDateTimeSearch.txt
# Display the result column wise with highlighted searching key word
column -t ~/workSpace/LMS/Reports/ReportBaseOnDateTimeSearch.txt | grep --color=always "$inputword" | more
echo "*----------------- End of Search Result! -----------------------*"
|
<filename>ClientApp/src/functions/signUp.js
export default async function signUp() {
try {
await fetch(window.ApiUrl + "Users", {
method: "POST",
mode: "cors",
body: JSON.stringify(this.state.form),
headers: {
"Content-Type": "application/json"
}
}).then(response => {
if (response.ok) {
window.container.success(
"Ya puede ingresar con su usuario y contraseña",
"Usuario registrado con éxito",
{
showAnimation: "animated rubberBand",
hideAnimation: "animated flipOutX",
timeOut: 7000,
extendedTimeOut: 2000
}
);
this.setState({ loading: false, mounted: false });
} else {
throw Error;
}
});
} catch (error) {
window.container.error("El usuario ingresado ya existe", "Error", {
showAnimation: "animated rubberBand",
hideAnimation: "animated flipOutX",
timeOut: 7000,
extendedTimeOut: 2000
});
this.setState({ loading: false });
}
}
|
/*
* SimpleTDBarrier.java
*
* Created on March 26, 2006, 10:30 PM
*
* From "Multiprocessor Synchronization and Concurrent Data Structures",
* by <NAME> and <NAME>.
* Copyright 2006 Elsevier Inc. All rights reserved.
*/
package tamp.ch17.Barrier.barrier;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Simmple termination-detection barrier
*
* @author <NAME>
*/
public class SimpleTDBarrier implements TDBarrier {
AtomicInteger count;
/**
* @param n
*/
public SimpleTDBarrier(int n) {
this.count = new AtomicInteger(n);
}
/**
* @param active
*/
public void setActive(boolean active) {
if (active) {
count.getAndDecrement();
} else {
count.getAndIncrement();
}
}
/**
* @return
*/
public boolean isTerminated() {
return count.get() == 0;
}
}
|
var galtonUrl = 'http://galton.minutes.urbica.co/foot?'
/*
document.getElementById('profile-selector').addEventListener('change', function() {
galtonUrl = document.getElementById('profile-selector').value;
});
*/
var args = location.search.replace(/^\?/,'').split('&').reduce(function(o, param){ var keyvalue=param.split('='); o[keyvalue[0]] = keyvalue[1]; return o; }, {});
mapboxgl.accessToken = '<KEY>';
var map = new mapboxgl.Map({
container: 'map',
style: 'mapbox://styles/urbica/cinyado0k004cbunmpjsqxlb8',
center: [37.61701583862305, 55.750931611695684],
zoom: 12
});
var gridSource = new mapboxgl.GeoJSONSource({
data: {
type: 'FeatureCollection',
features: []
}
});
var debugSource = new mapboxgl.GeoJSONSource({
data: turf.featurecollection([])
});
var layers = [
[30, '#00aaFF', 0.2],
[25, '#00aaFF', 0],
[20, '#00aaFF', 0.4],
[15, '#00aaFF', 0],
[10, '#00aaFF', 0.6],
[5, '#00aaFF', 0]
];
map.on('style.load', function () {
map.addSource('grid', gridSource);
map.addSource('debug', debugSource);
layers.forEach(function (layer, i) {
map.addLayer({
'id': 'grid-' + i,
'type': 'fill',
'source': 'grid',
'layout': {},
'paint': {
'fill-color': layer[1],
'fill-opacity': layer[2]
},
'filter': [
'all',
['==', '$type', 'Polygon'],
['<=', 'time', layer[0]]
]
}, "road-path");
map.addLayer({
'id': 'points-' + i,
'type': 'circle',
'source': 'grid',
'layout': {},
'paint': {
"circle-radius": 0,
"circle-color": layer[1]
},
'filter': [
'all',
['==', '$type', 'Point'],
['<=', 'time', layer[0]]
]
});
});
map.addLayer({
'id': 'debug',
'type': 'fill',
'source': 'debug',
'layout': {},
'paint': {
'fill-color': "#999",
'fill-opacity': 0.6
}
});
});
map.on('click', function (e) {
console.time('request');
var url = galtonUrl + 'lng=' + e.lngLat.lng + '&lat=' + e.lngLat.lat;
d3.json(url, function(data) {
var debugPoints = turf.featurecollection([]);
console.log(data);
console.timeEnd('request');
console.time('hull');
debugPoints.features = data.features.filter(function(f) {
return f.geometry.type == 'Point' && f.properties.time <= 20;
});
// console.log(points);
var hull = turf.concave(debugPoints, 0.5, 'kilometers');
console.log(hull);
console.timeEnd('hull');
gridSource.setData(data);
debugSource.setData(turf.featurecollection([hull]));
});
});
var popup = new mapboxgl.Popup({
closeButton: false,
closeOnClick: false
});
map.on('mousemove', function(e) {
var features = map.queryRenderedFeatures(e.point, { layers: ['grid-0', 'grid-1', 'grid-2', 'grid-3', 'grid-4', 'grid-5'] });
if (!features.length) {
popup.remove();
return;
}
var feature = features[0];
popup.setLngLat(e.lngLat)
.setHTML(feature.properties.time)
.addTo(map);
});
|
package de.rieckpil.blog;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import java.util.List;
@RestController
@RequestMapping("/api/messages")
public class MessageController {
@GetMapping
public List<String> getMessage() {
return List.of("Hello", "from", "Kubernetes");
}
}
|
<filename>packages/runtime-electron/src/app.ts
import * as React from 'react'
import * as ReactDOM from 'react-dom'
import { App } from '@overlayed2/ux-core'
document.body.style.backgroundColor = 'black'
const mountNode = document.querySelector('#app')
const app = React.createElement(App)
ReactDOM.render(app, mountNode)
|
import React, { useState, useEffect } from 'react';
const Table = () => {
const [data, setData] = useState([]);
useEffect(() => {
setData(input);
}, []);
const [sortBy, setSortBy] = useState('');
const [sortDir, setSortDir] = useState('asc');
const [currentPage, setCurrentPage] = useState(1);
const sortProperty = (property) => {
const sortedData = data.sort((a, b) => {
let output = 0;
if (a[property] < b[property]) {
output = -1;
} else if (a[property] > b[property]) {
output = 1;
}
return (sortDir === 'desc') ? output : -output;
});
setData(sortedData);
};
const itemsPerPage = 5;
const totalPages = Math.ceil(data.length / itemsPerPage);
const pageData = data.slice((currentPage - 1) * itemsPerPage, currentPage * itemsPerPage);
return (
<div>
<table>
<thead>
<tr>
<th
onClick={() => {
setSortBy('name');
setSortDir(sortDir === 'asc' ? 'desc' : 'asc');
sortProperty('name');
}}
>
Name
{sortBy === 'name' && <small>{sortDir === 'asc' ? '↓' : '↑'}</small>}
</th>
<th
onClick={() => {
setSortBy('age');
setSortDir(sortDir === 'asc' ? 'desc' : 'asc');
sortProperty('age');
}}
>
Age
{sortBy === 'age' && <small>{sortDir === 'asc' ? '↓' : '↑'}</small>}
</th>
</tr>
</thead>
<tbody>
{pageData.map((row, index) => (
<tr key={index}>
<td>{row.name}</td>
<td>{row.age}</td>
</tr>
))}
</tbody>
</table>
<div>
{Array.from({length: totalPages}, (v, i) => i).map((page, index) => (
<button
key={index}
onClick={() => setCurrentPage(page + 1)}
>
{page + 1}
</button>
))}
</div>
</div>
);
};
export default Table; |
<filename>Tests/Engine/ExtrapolateTests.cpp
//
// ExtrapolateTests.cpp
// Vortex
//
#include <Vortex/Engine/Extrapolation.h>
#include "VariationalHelpers.h"
#include "Verify.h"
using namespace Vortex::Fluid;
using namespace Vortex::Renderer;
extern Device* device;
void PrintValid(const glm::ivec2& size, Vortex::Renderer::Buffer<glm::ivec2>& buffer)
{
std::vector<glm::ivec2> pixels(size.x * size.y);
CopyTo(buffer, pixels);
for (int j = 0; j < size.x; j++)
{
for (int i = 0; i < size.y; i++)
{
glm::ivec2 value = pixels[i + j * size.x];
std::cout << "(" << value.x << "," << value.y << ")";
}
std::cout << std::endl;
}
std::cout << std::endl;
}
void SetValid(const glm::ivec2& size, FluidSim& sim, Buffer<glm::ivec2>& buffer)
{
std::vector<glm::ivec2> validData(size.x * size.y);
for (int i = 0; i < size.x; i++)
{
for (int j = 0; j < size.y; j++)
{
std::size_t index = i + j * size.x;
validData[index].x = sim.u_valid(i, j);
validData[index].y = sim.v_valid(i, j);
}
}
CopyFrom(buffer, validData);
}
TEST(ExtrapolateTest, Extrapolate)
{
glm::ivec2 size(50);
FluidSim sim;
sim.initialize(1.0f, size.x, size.y);
sim.set_boundary(complex_boundary_phi);
AddParticles(size, sim, complex_boundary_phi);
sim.add_force(0.01f);
sim.apply_projection(0.01f);
Buffer<glm::ivec2> valid(*device, size.x * size.y, VMA_MEMORY_USAGE_CPU_ONLY);
SetValid(size, sim, valid);
Velocity velocity(*device, size);
SetVelocity(*device, size, velocity, sim);
extrapolate(sim.u, sim.u_valid);
extrapolate(sim.v, sim.v_valid);
Extrapolation extrapolation(*device, size, valid, velocity, 10);
extrapolation.Extrapolate();
device->Queue().waitIdle();
CheckVelocity(*device, size, velocity, sim);
CheckValid(size, sim, valid);
}
TEST(ExtrapolateTest, Constrain)
{
// FIXME increase size
glm::ivec2 size(20);
FluidSim sim;
sim.initialize(1.0f, size.x, size.y);
sim.set_boundary(complex_boundary_phi);
AddParticles(size, sim, complex_boundary_phi);
sim.add_force(0.01f);
sim.apply_projection(0.01f);
Buffer<glm::ivec2> valid(*device, size.x * size.y, VMA_MEMORY_USAGE_CPU_ONLY);
Texture solidPhi(*device, size.x, size.y, vk::Format::eR32Sfloat);
SetSolidPhi(*device, size, solidPhi, sim, (float)size.x);
extrapolate(sim.u, sim.u_valid);
extrapolate(sim.v, sim.v_valid);
Velocity velocity(*device, size);
SetVelocity(*device, size, velocity, sim);
sim.constrain_velocity();
Extrapolation extrapolation(*device, size, valid, velocity);
extrapolation.ConstrainBind(solidPhi);
extrapolation.ConstrainVelocity();
device->Queue().waitIdle();
CheckVelocity(*device, size, velocity, sim, 1e-3f); // FIXME reduce error tolerance
}
|
for a in range(1, 26):
for b in range(1, 26):
for c in range(1, 26):
if (a*a + b*b == c*c and a+b+c == 25):
print("Pythagorean triplet: " + str(a) + ", " + str(b) + ", " + str(c)) |
import psycopg2
conn = psycopg2.connect("host=localhost dbname=mydb user=myuser password=mypassword")
cur = conn.cursor()
cur.execute("SELECT * FROM users")
rows = cur.fetchall()
for row in rows:
user_id = row[0]
username = row[1]
user_email = row[3] |
# fractional knapsack problem
# declare the structures
weight = [5, 7, 10, 25]
value = [37, 20, 59, 2]
capacity = 22
# create a list for the knapsack
knapsack = [0, 0, 0, 0]
# loop through the list
for i in range(len(weight)):
if capacity - weight[i] >= 0:
knapsack[i] = 1
capacity -= weight[i]
else:
knapsack[i] = capacity / weight[i]
break
# calculate the maximum value
max_value = 0
for i in range(len(knapsack)):
max_value += knapsack[i] * value[i]
# print the maximum value
print("The maximum value is:", max_value) |
#! /bin/bash
#SBATCH -o /home/hpc/pr63so/di69fol/workspace/SWEET_2015_12_26/benchmarks_performance/rexi_tests_lrz_freq_waves/2016_01_03_scalability_rexi_fd_high_res_run3/run_rexi_fd_par_m0512_t014_n0128_r0266_a1.txt
###SBATCH -e /home/hpc/pr63so/di69fol/workspace/SWEET_2015_12_26/benchmarks_performance/rexi_tests_lrz_freq_waves/2016_01_03_scalability_rexi_fd_high_res_run3/run_rexi_fd_par_m0512_t014_n0128_r0266_a1.err
#SBATCH -J rexi_fd_par_m0512_t014_n0128_r0266_a1
#SBATCH --get-user-env
#SBATCH --clusters=mpp2
#SBATCH --ntasks=266
#SBATCH --cpus-per-task=14
#SBATCH --exclusive
#SBATCH --export=NONE
#SBATCH --time=03:00:00
#declare -x NUMA_BLOCK_ALLOC_VERBOSITY=1
declare -x KMP_AFFINITY="granularity=thread,compact,1,0"
declare -x OMP_NUM_THREADS=14
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
echo
. /etc/profile.d/modules.sh
module unload gcc
module unload fftw
module unload python
module load python/2.7_anaconda_nompi
module unload intel
module load intel/16.0
module unload mpi.intel
module load mpi.intel/5.1
module load gcc/5
cd /home/hpc/pr63so/di69fol/workspace/SWEET_2015_12_26/benchmarks_performance/rexi_tests_lrz_freq_waves/2016_01_03_scalability_rexi_fd_high_res_run3
cd ../../../
. local_software/env_vars.sh
# force to use FFTW WISDOM data
declare -x SWEET_FFTW_LOAD_WISDOM_FROM_FILE="FFTW_WISDOM_nofreq_T0"
time -p mpiexec.hydra -genv OMP_NUM_THREADS 14 -envall -ppn 2 -n 266 ./build/rexi_fd_par_m_tno_a1 --initial-freq-x-mul=2.0 --initial-freq-y-mul=1.0 -f 1 -g 1 -H 1 -X 1 -Y 1 --compute-error 1 -t 50 -R 4 -C 0.3 -N 128 -U 0 -S 0 --use-specdiff-for-complex-array 0 --rexi-h 0.8 --timestepping-mode 1 --staggering 0 --rexi-m=512 -C -5.0
|
import random
jokes = [
'Yo momma so fat she uses Google Earth to take a selfie',
'Yo momma so old she knew Burger King when he was a prince',
'Yo momma so short she poses for trophies',
'Yo momma so lazy she thinks a two-income family is where your wife has a job',
'Yo momma so ugly when she tried to join an ugly contest they said, "Sorry, no professionals."',
'Yo momma so fat when she steps on the scale, it says "To be continued..."',
'Yo momma so fat she uses butter for her chapstick',
'Yo momma so fat a vampire bit her and got Type 2 diabetes'
]
def generate_jokes(num):
if num <= 0:
return "Please enter a positive number"
elif num >= len(jokes):
return jokes
else:
return random.sample(jokes, num)
def main():
while True:
try:
num_jokes = int(input("Enter the number of jokes you want to hear: "))
selected_jokes = generate_jokes(num_jokes)
if isinstance(selected_jokes, list):
print("Here are your 'Yo momma' jokes:")
for i, joke in enumerate(selected_jokes, 1):
print(f"{i}. {joke}")
break
else:
print(selected_jokes)
except ValueError:
print("Please enter a valid number")
if __name__ == "__main__":
main() |
package com.whoisxmlapi.whoisapi.model;
public class ZoneContact extends BaseContact{
}
|
<reponame>gridgentoo/gpdb<gh_stars>1-10
package javaclasses;
import com.emc.greenplum.gpdb.hadoop.io.GPDBWritable;
import com.emc.greenplum.gpdb.hadoop.mapreduce.lib.input.GPDBInputFormat;
import com.emc.greenplum.gpdb.hadoop.mapreduce.lib.output.GPDBOutputFormat;
import java.io.*;
import java.util.*;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.output.*;
import org.apache.hadoop.mapreduce.lib.input.*;
import org.apache.hadoop.util.*;
public class UseMapreduce {
private static String datanodestr;
private static String jobtrackerstr;
private static String compressData;
UseMapreduce(String hdfshostname,String jobtrackerhost, String datanodeport, String jobtrackerport, String compressionType)
{
if (datanodeport.equalsIgnoreCase("none"))
{
//datanodestr = "maprfs:///" +hdfshostname;
//jobtrackerstr = "maprfs:///" +hdfshostname;
datanodestr = "maprfs:///mapr/mapr";
jobtrackerstr = "maprfs:///mapr/mapr";
}
else
{
datanodestr= "hdfs://"+hdfshostname+":"+datanodeport;
jobtrackerstr = "hdfs://"+jobtrackerhost+":"+jobtrackerport;
}
compressData = compressionType;
}
public static class Mapreduce_mapper_GPDB_INOUT extends Mapper<LongWritable, GPDBWritable, LongWritable, GPDBWritable> {
private LongWritable word = new LongWritable(1);
public void map(LongWritable key, GPDBWritable value, Context context) throws IOException{
try
{
String datatype = value.getString(0);
String delims = ",";
String[] typetokens = datatype.split(delims);
int tablesize = typetokens.length + 2;
int[] colType = new int[tablesize];
colType[0] = GPDBWritable.VARCHAR;
colType[1] = GPDBWritable.BIGINT;
for (int x = 2; x < tablesize; x++)
{ colType[x] = returnGPDBWritableType(typetokens[x-2]); }
GPDBWritable gw = new GPDBWritable(colType);
gw.setString(0,value.getString(0));
gw.setLong(1,value.getLong(1));
for (int x = 2; x < tablesize; x++)
{
int typetokenInd = x-2;
if (typetokens[typetokenInd].equalsIgnoreCase("bigint")){gw.setLong(x, value.getLong(x));}
else if (typetokens[typetokenInd].equalsIgnoreCase("int")){gw.setInt(x, value.getInt(x));}
/*else if (typetokens[typetokenInd].equalsIgnoreCase("smallint"))
{
if (value.getShort(x) == null)
gw.setShort(x, (short)0);
else
gw.setShort(x, (short)value.getShort(x));
}*/
else if (typetokens[typetokenInd].equalsIgnoreCase("smallint")){gw.setShort(x,value.getShort(x));}
else if (typetokens[typetokenInd].equalsIgnoreCase("float")){gw.setDouble(x, value.getDouble(x));}
else if (typetokens[typetokenInd].equalsIgnoreCase("real")){gw.setFloat(x, value.getFloat(x));}
else if (typetokens[typetokenInd].equalsIgnoreCase("varchar")){gw.setString(x, value.getString(x));}
else if (typetokens[typetokenInd].equalsIgnoreCase("bpchar")){gw.setString(x, value.getString(x));}
else if (typetokens[typetokenInd].equalsIgnoreCase("text")){gw.setString(x, value.getString(x));}
else if (typetokens[typetokenInd].equalsIgnoreCase("time")){gw.setString(x, value.getString(x));}
else if (typetokens[typetokenInd].equalsIgnoreCase("timestamp")){gw.setString(x, value.getString(x));}
else if (typetokens[typetokenInd].equalsIgnoreCase("date")){gw.setString(x, value.getString(x));}
else if (typetokens[typetokenInd].equalsIgnoreCase("numeric")){gw.setString(x,value.getString(x));}
else if (typetokens[typetokenInd].equalsIgnoreCase("boolean")){gw.setBoolean(x, value.getBoolean(x));}
else {gw.setString(x,value.getString(x));}
}
context.write(word, gw);
} catch (Exception e) { throw new IOException ( "Mapreduce exception: " + e.getMessage()); }
}
}
public static int returnGPDBWritableType(String datatype)
{
if (datatype.equalsIgnoreCase("bigint")){return GPDBWritable.BIGINT;}
else if (datatype.equalsIgnoreCase("int")){return GPDBWritable.INTEGER;}
else if (datatype.equalsIgnoreCase("smallint")){return GPDBWritable.SMALLINT;}
else if (datatype.equalsIgnoreCase("float")){return GPDBWritable.FLOAT8;}
else if (datatype.equalsIgnoreCase("real")){return GPDBWritable.REAL;}
else if (datatype.equalsIgnoreCase("varchar")){return GPDBWritable.VARCHAR;}
else if (datatype.equalsIgnoreCase("bpchar")){return GPDBWritable.BPCHAR;}
else if (datatype.equalsIgnoreCase("text")){return GPDBWritable.TEXT;}
else if (datatype.equalsIgnoreCase("time")){return GPDBWritable.TIME;}
else if (datatype.equalsIgnoreCase("timestamp")){return GPDBWritable.TIMESTAMP;}
else if (datatype.equalsIgnoreCase("date")){return GPDBWritable.DATE;}
else if (datatype.equalsIgnoreCase("numeric")){return GPDBWritable.NUMERIC;}
else if (datatype.equalsIgnoreCase("boolean")){return GPDBWritable.BOOLEAN;}
else {return GPDBWritable.TEXT;}
}
public static class Mapreduce_mapper_TextIn extends Mapper<LongWritable, Text, LongWritable, GPDBWritable> {
private LongWritable word = new LongWritable(1);
public void map(LongWritable key, Text value, Context context) throws IOException {
try {
String line = value.toString();
String delims = "\t";
String[] tokens = line.split(delims);
String datatype = tokens[0];
String datatypedelims = ",";
String[] typetokens = datatype.split(datatypedelims);
int tablesize = typetokens.length + 2;
int[] colType = new int[tablesize];
colType[0] = GPDBWritable.VARCHAR;
colType[1] = GPDBWritable.BIGINT;
for (int x = 2; x < tablesize; x++)
{ colType[x] = returnGPDBWritableType(typetokens[x-2]); }
GPDBWritable gw = new GPDBWritable(colType);
gw.setString(0,tokens[0]);
gw.setLong(1,Long.parseLong(tokens[1]));
for (int x = 2; x < tablesize; x++)
{
int typetokenInd = x-2;
if (typetokens[typetokenInd].equalsIgnoreCase("bigint"))
{
if (tokens[x].equalsIgnoreCase("\\N"))
{ gw.setLong(x,null);}
else
{ gw.setLong(x,Long.parseLong(tokens[x]));}
}
else if (typetokens[typetokenInd].equalsIgnoreCase("int"))
{
if (tokens[x].equalsIgnoreCase("\\N"))
{ gw.setInt(x,null);}
else
{ gw.setInt(x,Integer.parseInt(tokens[x]));}
}
else if (typetokens[typetokenInd].equalsIgnoreCase("smallint"))
{
if (tokens[x].equalsIgnoreCase("\\N"))
{ gw.setShort(x,null);}
else
{ gw.setShort(x,Short.parseShort(tokens[x]));}
}
else if (typetokens[typetokenInd].equalsIgnoreCase("float"))
{
if (tokens[x].equalsIgnoreCase("\\N"))
{ gw.setDouble(x,null);}
else
{ gw.setDouble(x,Double.parseDouble(tokens[x]));}
}
else if (typetokens[typetokenInd].equalsIgnoreCase("real"))
{
if (tokens[x].equalsIgnoreCase("\\N"))
{ gw.setFloat(x,null);}
else
{ gw.setFloat(x,Float.parseFloat(tokens[x]));}
}
else if (typetokens[typetokenInd].equalsIgnoreCase("boolean"))
{
if(tokens[x].equalsIgnoreCase("true"))
gw.setBoolean(x, true);
else if (tokens[x].equalsIgnoreCase("\\N"))
gw.setBoolean(x, null);
else
gw.setBoolean(x, false);
}else
{
if (tokens[x].equalsIgnoreCase("\\N"))
gw.setString(x, null);
else
gw.setString(x, tokens[x]);
}
}
context.write(word, gw);
} catch (Exception e) { throw new IOException ("Mapreduce exception: " +e.getMessage()); }
}
}
public static class Mapreduce_mapper_TextIn_customtype extends Mapper<LongWritable, Text, LongWritable, GPDBWritable> {
private LongWritable word = new LongWritable(1);
public void map(LongWritable key, Text value, Context context) throws IOException {
try {
int[] colType = new int[1];
colType[0] = GPDBWritable.TEXT;
GPDBWritable gw = new GPDBWritable(colType);
String line = value.toString();
gw.setString(0,line);
context.write(word, gw);
} catch (Exception e) { throw new IOException ("Mapreduce exception: " +e.getMessage()); }
}
}
public static class Mapreduce_mapper_GPDBIn extends Mapper<LongWritable, GPDBWritable, Text, NullWritable> {
private Text word = new Text();
public void map(LongWritable key, GPDBWritable value, Context context) throws IOException {
try {
Text word = new Text(value.toString());
context.write(word, NullWritable.get());
} catch (Exception e) { throw new IOException ("Mapreduce exception: " +e.getMessage()); }
}
}
public static class Mapreduce_mapper_TEXT_INOUT extends Mapper<LongWritable, Text, Text, NullWritable> {
private Text word = new Text();
public void map(LongWritable key, Text value, Context context) throws IOException {
try {
Text word = new Text(value.toString());
context.write(word, NullWritable.get());
} catch (Exception e) { throw new IOException ("Mapreduce exception: " +e.getMessage()); }
}
}
public static void testtypeerror() throws Exception{
int[] colType = new int[2];
colType[0] = GPDBWritable.VARCHAR;
colType[1] = GPDBWritable.BIGINT;
GPDBWritable gw = new GPDBWritable(colType);
gw.setLong(0,4L);
//gw.getString(1);
}
public static void mapreduce_readwrite(String mapperfunc, String inputpath, String outputpath ) throws Exception {
Configuration conf = new Configuration(true);
/* Not sure why this helps ..But it gets rid of the deprecation messages that were causing failures */
if (System.getenv("HADOOP_INTEGRATION_CDH4") == null)
conf.set("fs.default.name", datanodestr);
conf.set("mapred.job.tracker", jobtrackerstr);
conf.set("mapred.job.map.memory.mb", "3024");
conf.set("mapred.job.reduce.memory.mb", "3024");
Job job = new Job(conf, "mapreduce_readwrite");
job.setJarByClass(UseMapreduce.class);
if (mapperfunc.equalsIgnoreCase("Mapreduce_mapper_TextIn"))
{
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(GPDBWritable.class);
job.setMapperClass(Mapreduce_mapper_TextIn.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(GPDBOutputFormat.class);
}
else if (mapperfunc.equalsIgnoreCase("Mapreduce_mapper_TextIn_customtype"))
{
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(GPDBWritable.class);
job.setMapperClass(Mapreduce_mapper_TextIn_customtype.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(GPDBOutputFormat.class);
}
else if (mapperfunc.equalsIgnoreCase("Mapreduce_mapper_GPDB_INOUT"))
{
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(GPDBWritable.class);
job.setMapperClass(Mapreduce_mapper_GPDB_INOUT.class);
job.setInputFormatClass(GPDBInputFormat.class);
job.setOutputFormatClass(GPDBOutputFormat.class);
}
else if (mapperfunc.equalsIgnoreCase("Mapreduce_mapper_GPDBIn"))
{
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
job.setMapperClass(Mapreduce_mapper_GPDBIn.class);
job.setInputFormatClass(GPDBInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
}
else if (mapperfunc.equalsIgnoreCase("Mapreduce_mapper_TEXT_INOUT"))
{
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
job.setMapperClass(Mapreduce_mapper_TEXT_INOUT.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
}
GPDBInputFormat.setInputPaths(job, inputpath);
GPDBOutputFormat.setOutputPath(job, new Path(outputpath));
if (compressData.equalsIgnoreCase("BLOCK"))
GPDBOutputFormat.setOutputCompressionType(job, SequenceFile.CompressionType.BLOCK);
else if (compressData.equalsIgnoreCase("RECORD"))
GPDBOutputFormat.setOutputCompressionType(job, SequenceFile.CompressionType.RECORD);
job.waitForCompletion(true);
}
}
|
<gh_stars>0
import { BP_PixelShipPlayer } from 'UE/BP_PixelShipPlayer';
import { GameplayStatics as KGameplay } from 'UE/GameplayStatics';
import { MathLibrary as KMath } from 'UE/MathLibrary';
import { Rotator } from 'UE/Rotator';
import { TimelineComponent } from 'UE/TimelineComponent';
import { Vector } from 'UE/Vector';
const trackName = 'Movement';
export function onBeginPlay(target: BP_PixelShipPlayer) {
target.gameCabinetYaw = target.getInstigator().getActorRotation().yaw;
target.spawnLocation = target.getActorLocation();
const spawnAnimation = new TimelineComponent(target);
spawnAnimation.registerComponent();
spawnAnimation.setTimelineLength(1);
spawnAnimation.addFloatCurve(trackName, target.spawnAnimationCurve);
spawnAnimation.setOnFloatUpdate(trackName, value => {
onAnimate(target, value);
});
spawnAnimation.setOnFinished(() => {
target.movementEnabled = true;
});
spawnAnimation.playFromStart();
target.addOwnedComponent(spawnAnimation);
}
function onAnimate(target: BP_PixelShipPlayer, alpha: number) {
const { spawnLocation, gameCabinetYaw } = target;
const sourceLocation = spawnLocation;
const targetLocation = sourceLocation.add(new Vector(0, 0, 60));
const location = sourceLocation.lerp(targetLocation, alpha);
target.setActorLocation(location);
const sourceRotation = new Rotator(0, 0, gameCabinetYaw + 720);
const targetRotation = new Rotator(0, 0, gameCabinetYaw);
const rotation = sourceRotation.lerp(targetRotation, alpha, false);
target.setActorRotation(rotation);
}
export function tick(target: BP_PixelShipPlayer) {
if (!target.movementEnabled) { return; }
const deltaSeconds = KGameplay.getWorldDeltaSeconds();
const step = target.axisMoveRight * target.movementSpeed;
const desiredSpeed = step * deltaSeconds;
target.speed = KMath.finterpTo(target.speed, desiredSpeed, deltaSeconds, 4);
const velocity = new Vector(target.speed, 0, 0);
const cabinetRotation = new Rotator(0, 0, target.gameCabinetYaw);
const offset = cabinetRotation.rotateVector(velocity);
target.addActorWorldOffset(offset, true);
const yaw = target.gameCabinetYaw + (target.speed * -5);
const targetRotation = new Rotator(0, 0, yaw);
const currentRotation = target.getActorRotation();
const newRotation = currentRotation.interpTo(targetRotation, deltaSeconds, 20);
target.setActorRotation(newRotation);
}
export function startFiring(target: BP_PixelShipPlayer) {
if (!target.movementEnabled) { return; }
fireProjectile(target);
target.fireTimerHandle = setInterval(() => {
fireProjectile(target);
}, 200);
}
function fireProjectile(target: BP_PixelShipPlayer) {
target.spawnProjectile(
target.getActorTransform(),
new Vector(0, 0, 700),
target,
KMath.makeColor(0.2, 0.5, 1, 1)
);
}
export function stopFiring(target: BP_PixelShipPlayer) {
const { fireTimerHandle } = target;
if (fireTimerHandle.isValidTimerHandle()) {
clearInterval(fireTimerHandle);
}
}
export function onDamage(target: BP_PixelShipPlayer, damage: number) {
target.speed *= -0.5;
target.health -= damage;
if (target.health <= 0) {
KGameplay.spawnEmitterAtLocation(
target.destroyedEffect,
target.getActorLocation()
);
target.destroyActor();
}
}
|
from pandas import DataFrame, concat
from datetime import datetime as dt
class PortfolioManager:
def __init__(self, domestic_stocks, international_stocks, crypto, domestic_funds):
self.domestic_stocks = domestic_stocks
self.international_stocks = international_stocks
self.crypto = crypto
self.domestic_funds = domestic_funds
def get_concat_dataframe(self, *columns, include_date=True):
dfs = [self.domestic_stocks[columns], self.international_stocks[columns], self.crypto[columns], self.domestic_funds[columns]]
if include_date:
dfs.insert(0, DataFrame({'date': []})) # Insert an empty dataframe with 'date' column
df = concat(dfs, axis=1)
return df
def get_portfolio_invested(self, df):
if 'date' in df.columns:
df.set_index('date', inplace=True)
start, end = df.index[0], df.index[-1]
start = dt.strptime(start, '%Y-%m-%d').date()
end = dt.strptime(end, '%Y-%m-%d').date()
reference = self.get_concat_dataframe('date', 'purchase_price', include_date=False)
reference = reference.groupby(by='date')['purchase_price'].sum()
reference = DataFrame(reference).reset_index()
total_invested = reference['purchase_price'].sum()
return total_invested |
// Copyright 2021 99cloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import { action } from 'mobx';
import client from 'client';
import Base from 'stores/base';
export class PoolMemberStore extends Base {
get client() {
return this.poolClient.members;
}
get poolClient() {
return client.octavia.pools;
}
get responseKey() {
return 'member';
}
get listFilterByProject() {
return true;
}
listFetchByClient(params, originParams) {
const { pool_id } = originParams;
return this.client.list(pool_id);
}
@action
create({ default_pool_id, data }) {
const body = {};
body[this.listResponseKey] = data;
return this.submitting(this.client.create(default_pool_id, body));
}
@action
batchUpdate({ default_pool_id, data }) {
const body = {};
body[this.listResponseKey] = data;
return this.submitting(
this.poolClient.batchUpdateMembers(default_pool_id, body)
);
}
@action
update({ default_pool_id, member_id, data }) {
const body = {};
body[this.responseKey] = data;
return this.submitting(
this.client.update(default_pool_id, member_id, body)
);
}
@action
delete = ({ id, default_pool_id }) =>
this.submitting(this.client.delete(default_pool_id, id));
}
const globalPoolMemberStore = new PoolMemberStore();
export default globalPoolMemberStore;
|
<filename>src/index.ts
export { NewProxyBuilder } from './new-proxy-builder';
export { NewProxy } from './new-proxy';
|
import tensorflow as tf
from tensorflow import keras
import numpy as np
# Create model
model = keras.Sequential()
model.add(keras.layers.Dense(64, input_shape=(5, ), activation='relu'))
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(1))
# Compile model
model.compile(optimizer='rmsprop', loss='mean_squared_error', metrics=['accuracy']) |
// custom-injection.ts
import { InjectionToken } from "@angular/core";
import { SchematicContext } from "@angular-devkit/schematics";
export const CONTEXT = new InjectionToken<SchematicContext>("SchematicContext"); |
def remove_vowels_reverse_str(input_str):
vowels = ['a', 'e', 'i', 'o', 'u']
output_str = ''
for letter in input_str:
if letter not in vowels:
output_str += letter
return output_str[::-1]
remove_vowels_reverse_str('Hello') //returns "llH" |
function arraySum(arr){
var sum = 0;
for (let i = 0; i < arr.length; i++){
if (typeof arr[i] === 'number'){
sum += arr[i];
}
}
return sum;
} |
<reponame>quarkfin/QF-Lib
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pandas as pd
from qf_lib.common.enums.frequency import Frequency
from qf_lib.common.enums.price_field import PriceField
from qf_lib.common.tickers.tickers import BloombergTicker
from qf_lib.common.utils.dateutils.string_to_date import str_to_date
from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame
from qf_lib.containers.dataframe.qf_dataframe import QFDataFrame
from qf_lib.containers.qf_data_array import QFDataArray
from qf_lib.containers.series.prices_series import PricesSeries
from qf_lib.containers.series.qf_series import QFSeries
from qf_lib_tests.helpers.testing_tools.containers_comparison import assert_series_equal
from qf_lib_tests.integration_tests.connect_to_data_provider import get_data_provider
class TestBloomberg(unittest.TestCase):
START_DATE = str_to_date('2014-01-01')
END_DATE = str_to_date('2015-02-02')
SINGLE_FIELD = 'PX_LAST'
MANY_FIELDS = ['PX_LAST', 'PX_OPEN', 'PX_HIGH']
INVALID_TICKER = BloombergTicker('Inv TIC Equity')
INVALID_TICKERS = [BloombergTicker('Inv1 TIC Equity'), BloombergTicker('AAPL US Equity'),
BloombergTicker('Inv2 TIC Equity')]
SINGLE_TICKER = BloombergTicker('IBM US Equity')
MANY_TICKERS = [BloombergTicker('IBM US Equity'), BloombergTicker('AAPL US Equity')]
NUM_OF_DATES = 273
SINGLE_PRICE_FIELD = PriceField.Close
MANY_PRICE_FIELDS = [PriceField.Close, PriceField.Open, PriceField.High]
def setUp(self):
try:
self.bbg_provider = get_data_provider()
except Exception as e:
raise self.skipTest(e)
# =========================== Test invalid ticker ==========================================================
def test_price_single_invalid_ticker_single_field(self):
# single ticker, single field; end_date by default now, frequency by default DAILY, currency by default None
data = self.bbg_provider.get_price(tickers=self.INVALID_TICKER, fields=self.SINGLE_PRICE_FIELD,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY)
self.assertIsInstance(data, PricesSeries)
self.assertEqual(len(data), 0)
self.assertEqual(data.name, self.INVALID_TICKER.as_string())
def test_price_single_invalid_ticker_many_fields(self):
# single ticker, single field; end_date by default now, frequency by default DAILY, currency by default None
data = self.bbg_provider.get_price(tickers=self.INVALID_TICKER, fields=self.MANY_PRICE_FIELDS,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY)
self.assertIsInstance(data, PricesDataFrame)
self.assertEqual(data.shape, (0, len(self.MANY_PRICE_FIELDS)))
self.assertEqual(list(data.columns), self.MANY_PRICE_FIELDS)
def test_price_many_invalid_tickers_many_fields(self):
# single ticker, single field; end_date by default now, frequency by default DAILY, currency by default None
data = self.bbg_provider.get_price(tickers=self.INVALID_TICKERS, fields=self.MANY_PRICE_FIELDS,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY)
self.assertEqual(type(data), QFDataArray)
self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.INVALID_TICKERS), len(self.MANY_PRICE_FIELDS)))
self.assertIsInstance(data.dates.to_index(), pd.DatetimeIndex)
self.assertEqual(list(data.tickers), self.INVALID_TICKERS)
self.assertEqual(list(data.fields), self.MANY_PRICE_FIELDS)
# =========================== Test get_price method ==========================================================
def test_price_single_ticker_single_field(self):
# single ticker, single field; end_date by default now, frequency by default DAILY, currency by default None
data = self.bbg_provider.get_price(tickers=self.SINGLE_TICKER, fields=self.SINGLE_PRICE_FIELD,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY)
self.assertIsInstance(data, PricesSeries)
self.assertEqual(len(data), self.NUM_OF_DATES)
self.assertEqual(data.name, self.SINGLE_TICKER.as_string())
def test_price_single_ticker_single_field_single_date(self):
# single ticker, single field; end_date by default now, frequency by default DAILY, currency by default None
data = self.bbg_provider.get_price(tickers=self.SINGLE_TICKER, fields=self.SINGLE_PRICE_FIELD,
start_date=self.END_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY)
self.assertIsInstance(data, float)
self.assertEqual(data, 147.7257)
def test_price_single_ticker_multiple_fields(self):
# single ticker, many fields; can be the same as for single field???
data = self.bbg_provider.get_price(tickers=self.SINGLE_TICKER, fields=self.MANY_PRICE_FIELDS,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY)
self.assertEqual(type(data), PricesDataFrame)
self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_PRICE_FIELDS)))
self.assertEqual(list(data.columns), self.MANY_PRICE_FIELDS)
def test_price_multiple_tickers_single_field(self):
data = self.bbg_provider.get_price(tickers=self.MANY_TICKERS, fields=self.SINGLE_PRICE_FIELD,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY)
self.assertEqual(type(data), PricesDataFrame)
self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_TICKERS)))
self.assertEqual(list(data.columns), self.MANY_TICKERS)
def test_price_multiple_tickers_single_field_order(self):
data1 = self.bbg_provider.get_price(tickers=self.MANY_TICKERS, fields=self.SINGLE_PRICE_FIELD,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY)
data2 = self.bbg_provider.get_price(tickers=[self.MANY_TICKERS[1], self.MANY_TICKERS[0]],
fields=self.SINGLE_PRICE_FIELD, start_date=self.START_DATE,
end_date=self.END_DATE, frequency=Frequency.DAILY)
assert_series_equal(data2.iloc[:, 0], data1.iloc[:, 1])
assert_series_equal(data2.iloc[:, 1], data1.iloc[:, 0])
def test_price_multiple_tickers_multiple_fields(self):
# testing for single date (start_date and end_date are the same)
data = self.bbg_provider.get_price(tickers=self.MANY_TICKERS, fields=self.MANY_PRICE_FIELDS,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY)
self.assertEqual(type(data), QFDataArray)
self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_TICKERS), len(self.MANY_PRICE_FIELDS)))
self.assertIsInstance(data.dates.to_index(), pd.DatetimeIndex)
self.assertEqual(list(data.tickers), self.MANY_TICKERS)
self.assertEqual(list(data.fields), self.MANY_PRICE_FIELDS)
# =========================== Test get_history method ==========================================================
def test_historical_single_ticker_single_field(self):
# single ticker, single field; end_date by default now, frequency by default DAILY, currency by default None
data = self.bbg_provider.get_history(tickers=self.SINGLE_TICKER, fields=self.SINGLE_FIELD,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY, currency='CHF')
self.assertIsInstance(data, QFSeries)
self.assertEqual(len(data), self.NUM_OF_DATES)
self.assertEqual(data.name, self.SINGLE_TICKER.as_string())
def test_historical_single_ticker_multiple_fields(self):
# single ticker, many fields; can be the same as for single field???
data = self.bbg_provider.get_history(tickers=self.SINGLE_TICKER, fields=self.MANY_FIELDS,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY, currency='PLN')
self.assertEqual(type(data), QFDataFrame)
self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_FIELDS)))
self.assertEqual(list(data.columns), self.MANY_FIELDS)
def test_historical_multiple_tickers_single_field(self):
data = self.bbg_provider.get_history(tickers=self.MANY_TICKERS, fields=self.SINGLE_FIELD,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY, currency='PLN')
self.assertEqual(type(data), QFDataFrame)
self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_TICKERS)))
self.assertEqual(list(data.columns), self.MANY_TICKERS)
def test_historical_multiple_tickers_multiple_fields_one_date(self):
# testing for single date (start_date and end_date are the same)
data = self.bbg_provider.get_history(tickers=self.MANY_TICKERS, fields=self.MANY_FIELDS,
start_date=self.END_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY, currency='PLN')
self.assertEqual(type(data), QFDataFrame)
self.assertEqual(data.shape, (len(self.MANY_TICKERS), len(self.MANY_FIELDS)))
self.assertEqual(list(data.index), self.MANY_TICKERS)
self.assertEqual(list(data.columns), self.MANY_FIELDS)
def test_historical_multiple_tickers_multiple_fields_many_dates(self):
# testing for single date (start_date and end_date are the same)
data = self.bbg_provider.get_history(tickers=self.MANY_TICKERS, fields=self.MANY_FIELDS,
start_date=self.START_DATE, end_date=self.END_DATE,
frequency=Frequency.DAILY, currency='PLN')
self.assertEqual(type(data), QFDataArray)
self.assertEqual(data.shape, (self.NUM_OF_DATES, len(self.MANY_TICKERS), len(self.MANY_FIELDS)))
self.assertIsInstance(data.dates.to_index(), pd.DatetimeIndex)
self.assertEqual(list(data.tickers), self.MANY_TICKERS)
self.assertEqual(list(data.fields), self.MANY_FIELDS)
def test_historical_single_ticker_single_field_list1(self):
# single ticker, single field; end_date by default now, frequency by default DAILY, currency by default None
data = self.bbg_provider.get_history(tickers=[self.SINGLE_TICKER], fields=[self.SINGLE_FIELD],
start_date=self.START_DATE, end_date=self.END_DATE)
self.assertIsInstance(data, QFDataArray)
self.assertEqual(data.shape, (self.NUM_OF_DATES, 1, 1))
def test_historical_single_ticker_single_field_list2(self):
# single ticker, many fields; can be the same as for single field???
data = self.bbg_provider.get_history(tickers=[self.SINGLE_TICKER], fields=self.SINGLE_FIELD,
start_date=self.START_DATE, end_date=self.END_DATE)
self.assertEqual(type(data), QFDataFrame)
self.assertEqual(data.shape, (self.NUM_OF_DATES, 1))
self.assertEqual(list(data.columns), [self.SINGLE_TICKER])
def test_historical_single_ticker_single_field_list3(self):
# single ticker, many fields; can be the same as for single field???
data = self.bbg_provider.get_history(tickers=self.SINGLE_TICKER, fields=[self.SINGLE_FIELD],
start_date=self.START_DATE, end_date=self.END_DATE)
self.assertEqual(type(data), QFDataFrame)
self.assertEqual(data.shape, (self.NUM_OF_DATES, 1))
self.assertEqual(list(data.columns), [self.SINGLE_FIELD])
def test_historical_single_ticker_single_field_no_end_date(self):
# single ticker, many fields; can be the same as for single field???
data = self.bbg_provider.get_history(tickers=self.SINGLE_TICKER, fields=self.SINGLE_FIELD,
start_date=self.START_DATE)
self.assertTrue(len(data) >= self.NUM_OF_DATES)
# =========================== Test get_current_values method =======================================================
def test_current_many_tickers_many_fields(self):
data = self.bbg_provider.get_current_values(tickers=self.MANY_TICKERS, fields=self.MANY_FIELDS)
self.assertEqual(type(data), QFDataFrame)
self.assertEqual(data.shape, (len(self.MANY_TICKERS), len(self.MANY_FIELDS)))
self.assertEqual(list(data.index), self.MANY_TICKERS)
self.assertEqual(list(data.columns), self.MANY_FIELDS)
def test_current_many_tickers_one_field(self):
data = self.bbg_provider.get_current_values(tickers=self.MANY_TICKERS, fields=self.SINGLE_FIELD)
self.assertEqual(type(data), QFSeries)
self.assertEqual(data.size, len(self.MANY_TICKERS))
self.assertEqual(data.name, self.SINGLE_FIELD)
self.assertEqual(list(data.index), self.MANY_TICKERS)
def test_current_one_ticker_many_fields(self):
data = self.bbg_provider.get_current_values(tickers=self.SINGLE_TICKER, fields=self.MANY_FIELDS)
self.assertEqual(type(data), QFSeries)
self.assertEqual(data.size, len(self.MANY_FIELDS))
self.assertEqual(data.name, self.SINGLE_TICKER)
self.assertEqual(list(data.index), self.MANY_FIELDS)
# =========================== Test override ==========================================================
def test_override_historical_single_ticker_single_field(self):
start_date = str_to_date('2015-10-31')
end_date = str_to_date('2016-03-31')
ticker = BloombergTicker('DGNOXTCH Index')
data = self.bbg_provider.get_history(tickers=ticker, fields='ACTUAL_RELEASE',
start_date=start_date, end_date=end_date)
override_data = self.bbg_provider.get_history(tickers=ticker, fields='ACTUAL_RELEASE',
start_date=start_date, end_date=end_date,
override_name='RELEASE_STAGE_OVERRIDE', override_value='P')
data_model = [0.5, 0, -1, 1.7, -1.3, -0.2]
override_data_model = [0.5, -0.1, -1.2, 1.8, -1, -0.2]
self.assertSequenceEqual(seq1=data_model, seq2=data.tolist())
self.assertSequenceEqual(seq1=override_data_model, seq2=override_data.tolist())
# =========================== Test QFDataArray type ====================================================
def test_qf_dataarray_dtype_for_nan_volume(self):
""" The tested ticker does not have any volume data within the given period. In this case it has to be
checked if the dtype of QFDataArray would be correctly casted to float64 or to object. """
tickers = [BloombergTicker("FMIM10 Index")]
start_date = str_to_date("2010-01-14")
end_date = str_to_date("2010-01-19")
data_array = self.bbg_provider.get_price(tickers, [PriceField.Close, PriceField.Volume], start_date, end_date,
Frequency.DAILY)
self.assertEqual(data_array.dtype, np.float64)
if __name__ == '__main__':
unittest.main()
|
<gh_stars>0
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.boot.nacos.config.util.editor;
import java.beans.PropertyEditorSupport;
/**
* @author <a href="mailto:<EMAIL>">liaochuntao</a>
* @since 0.1.3
*/
public class NacosStringEditor extends PropertyEditorSupport {
public NacosStringEditor() {
}
@Override
public String getJavaInitializationString() {
Object var1 = this.getValue();
if (var1 == null) {
return "null";
}
else {
String var2 = var1.toString();
int var3 = var2.length();
StringBuilder var4 = new StringBuilder(var3 + 2);
var4.append('"');
for (int var5 = 0; var5 < var3; ++var5) {
char var6 = var2.charAt(var5);
String var7;
int var8;
switch (var6) {
case '\b':
var4.append("\\b");
continue;
case '\t':
var4.append("\\t");
continue;
case '\n':
var4.append("\\n");
continue;
case '\f':
var4.append("\\f");
continue;
case '\r':
var4.append("\\r");
continue;
case '"':
var4.append("\\\"");
continue;
case '\\':
var4.append("\\\\");
continue;
default:
if (var6 >= ' ' && var6 <= '~') {
var4.append(var6);
continue;
}
var4.append("\\u");
var7 = Integer.toHexString(var6);
var8 = var7.length();
}
while (var8 < 4) {
var4.append('0');
++var8;
}
var4.append(var7);
}
var4.append('"');
return var4.toString();
}
}
@Override
public void setAsText(String var1) {
this.setValue(var1);
}
}
|
#include <iostream>
using namespace std;
int main(){
char cod;//codigo do continente
cin >> cod;
switch(cod){
case 'a':
cout << "Africa" << endl;
break;
case 'b':
cout << "America" << endl;
break;
case 'c':
cout << "Antartida" << endl;
break;
case 'd':
cout << "Asia" << endl;
break;
case 'e':
cout << "Europa" << endl;
break;
case 'f':
cout << "Oceania" << endl;
}
return 0;
} |
package pack;
public class SubType1 {
}
|
#!/bin/bash
DURATION=7
if [ "$#" -eq 1 ]; then
DURATION=$1
fi
sfdx force:org:create -a dreaminvest -s -f config/project-scratch-def.json -d $DURATION
sfdx force:source:push
sfdx force:user:permset:assign -n dreaminvest
sfdx force:data:bulk:upsert -s Sector__c -f ./data/sectors.csv -w 1 -i Sector_Id__c
sfdx force:data:bulk:upsert -s Fund__c -f ./data/funds.csv -w 1 -i Id
sfdx force:org:open -p /lightning/page/home
echo "Org is set up"
|
# replace_files.py
import sys
import subprocess
def replace_line(file_path, line_number, new_content):
with open(file_path, 'r') as file:
lines = file.readlines()
if 0 < line_number <= len(lines):
lines[line_number - 1] = new_content + '\n' # Adjust for 0-based indexing
with open(file_path, 'w') as file:
file.writelines(lines)
else:
print(f"Error: Line number {line_number} is out of range.")
def start_backend_server():
subprocess.run(['uvicorn', '--reload', '--host=0.0.0.0', '--port=5000', 'backend:app'])
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: python3 replace_files.py <file_path> <line_number>")
else:
file_path = sys.argv[1]
line_number = int(sys.argv[2])
new_content = "New line content" # Replace with actual new content
replace_line(file_path, line_number, new_content)
start_backend_server() |
GREEN=$(tput setaf 2)
NORMAL=$(tput sgr0)
printf "${GREEN}Installing requirements${NORMAL}\n\n"
pip install -r src/requirements.txt
printf "${GREEN}Initializing Airflow database${NORMAL}\n\n"
airflow initdb
printf "${GREEN}Downloading dataset${NORMAL}\n\n"
wget https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip -P ./data
unzip data/household_power_consumption.zip -d data
printf "${GREEN}Preprocessing dataset${NORMAL}\n\n"
python src/preprocessing/data_preprocessing.py \
--data_path data/household_power_consumption.txt \
--samples_offset D \
--history_size 15 \
--tfrecord_path data/dataset.tfrecord
printf "${GREEN}Deleting raw dataset files${NORMAL}\n\n"
rm data/household_power_consumption.zip
rm data/household_power_consumption.txt
printf "${GREEN}Preparing Airflow DAG ${NORMAL}\n\n"
mkdir -p ~/airflow/dags
mkdir -p ~/airflow/data
cp src/pipeline.py ~/airflow/dags/
cp src/transform.py ~/airflow/dags/
cp src/trainer.py ~/airflow/dags/
cp data/dataset.tfrecord ~/airflow/data/
|
package main
import (
"fileTransferring/shared"
"fmt"
"github.com/mholt/archiver"
"github.com/pkg/errors"
"io/ioutil"
"math"
"math/rand"
"net"
"os"
"path/filepath"
"strings"
"time"
)
var tempZipName string
var fileSize int64
var totalBytesSent int64
var totalPacketsSent int
var totalPacketsToSend int
var packetsLost int
const MaxWindowSize = shared.MaxWindowSize
var ipv6, sw, dp = shared.GetCMDArgs(os.Args, true)
func main() {
rand.Seed(time.Now().UTC().UnixNano())
var serverAddress string
fmt.Print("Server address: ")
_, _ = fmt.Scanf("%s", &serverAddress)
var conn *net.UDPConn
var connError error
if ipv6 {
remoteAddr, err := net.ResolveUDPAddr("udp6", serverAddress+shared.PORT)
shared.ErrorValidation(err)
conn, connError = net.DialUDP("udp6", nil, remoteAddr)
shared.ErrorValidation(connError)
} else {
remoteAddr, err := net.ResolveUDPAddr("udp4", serverAddress+shared.PORT)
shared.ErrorValidation(err)
conn, connError = net.DialUDP("udp4", nil, remoteAddr)
shared.ErrorValidation(connError)
}
var filePath string
fmt.Print("Enter full file path: ")
_, _ = fmt.Scanf("%s", &filePath)
fmt.Println("Buffering file...")
zipError := zipFiles(filePath)
shared.ErrorValidation(zipError)
fileBytes, err := ioutil.ReadFile(tempZipName)
shared.ErrorValidation(err)
fmt.Println("File Buffered!")
file, fileError := os.Open(tempZipName)
shared.ErrorValidation(fileError)
defer file.Close()
fi, err := file.Stat()
shared.ErrorValidation(err)
fileSize = fi.Size()
totalPacketsToSend = determineAmountOfPacketsToSend(fileSize)
sendWRQPacket(conn, strings.Split(filepath.Base(filePath), ".")[0]+".zip")
var startTime = time.Now().UnixNano() / 1e6 // get it in milliseconds
sendFile(conn, fileBytes)
var endTime = time.Now().UnixNano() / 1e6 // get it in milliseconds
fmt.Printf("Throughput: %f megabits/sec", (float64(fileSize)/float64(endTime-startTime))/125) // from bytes/millsecond to megabits/sec
}
// Sends a file to the server
func sendFile(conn *net.UDPConn, fileBytes [] byte) {
if sw {
err := slidingWindowSend(conn, fileBytes)
if err != nil {
shared.ErrorValidation(err)
return
}
err = os.Remove(tempZipName)
shared.ErrorValidation(err)
} else {
var currentPacket int
var bytesToSend = fileBytes
for {
if len(bytesToSend) >= 512 {
sendDataPacket(conn, bytesToSend[:512], ¤tPacket)
bytesToSend = bytesToSend[512:]
} else {
sendDataPacket(conn, bytesToSend, ¤tPacket)
err := os.Remove(tempZipName)
shared.ErrorValidation(err)
break
}
}
}
}
func slidingWindowSend(conn *net.UDPConn, data []byte) error {
_ = conn.SetReadDeadline(time.Time{}) // no timeout on the client side
var bytesToSend = data
var dataPackets [] shared.DataPacket
var currentPacketNumber int
for {
if len(bytesToSend) >= 512 {
dataPacket := shared.CreateDataPacket(createBlockNumber(¤tPacketNumber), bytesToSend[:512])
dataPackets = append(dataPackets, *dataPacket)
bytesToSend = bytesToSend[512:]
} else {
dataPacket := shared.CreateDataPacket(createBlockNumber(¤tPacketNumber), bytesToSend[:])
dataPackets = append(dataPackets, *dataPacket)
break
}
}
var windowSize = 1
var windowStart = 0 // window
var windowEnd = windowStart + windowSize // window
var currentPacketsInFlight = 0
for {
var blockNumberToReceiveToAdvanceWindow [] byte
if windowStart < len(dataPackets) {
blockNumberToReceiveToAdvanceWindow = dataPackets[windowStart].BlockNumber
} else {
blockNumberToReceiveToAdvanceWindow = dataPackets[len(dataPackets)-1].BlockNumber
}
for i := windowStart; i < windowEnd; i++ {
if currentPacketsInFlight != windowSize {
if !shouldDropPacket() {
if (windowStart + currentPacketsInFlight) < len(dataPackets) {
var dataPacketToSend = dataPackets[windowStart+currentPacketsInFlight]
_, _ = conn.Write(dataPacketToSend.ByteArray())
currentPacketsInFlight++
}
} else {
currentPacketsInFlight++
fmt.Println("Dropped packet...")
}
} else {
break
}
}
endOfTransfer, sendWasSuccessful, _, err := receiveSlidingWindowPacket(conn, blockNumberToReceiveToAdvanceWindow)
shared.ErrorValidation(err)
if endOfTransfer {
fmt.Println("File transferred fully")
return nil
}
if sendWasSuccessful {
windowStart += 1 // first move where the window starts
currentPacketsInFlight--
windowSize++
if windowSize > MaxWindowSize {
windowSize = MaxWindowSize
}
fmt.Printf("Window size increased to...%d\n", windowSize)
} else {
windowSize /= 2
if windowSize <= 0 {
windowSize = 1
}
fmt.Printf("Window size decreased to...%d\n", windowSize)
currentPacketsInFlight = 0
}
windowEnd = windowStart + windowSize
}
}
func readSlidingWindowPacket(data [] byte, blockNumber [] byte) (endOfTransfer bool, sendSuccessful bool, blockNumberReceived [] byte, err error) {
opcode := data[1]
switch opcode {
case 4: // ack packet
ack, _ := shared.ReadACKPacket(data)
if shared.BlockNumberChecker(ack.BlockNumber, blockNumber) {
return false, true, nil, nil
}
return false, false, ack.BlockNumber, nil
case 5:
e, _ := shared.ReadErrorPacket(data)
return false, false, nil, errors.New(fmt.Sprintf("Error code: %d\nError Message: %s", e.ErrorCode[1], e.ErrorMessage))
case 7:
return true, false, nil, nil
default:
return false, false, nil, errors.New(fmt.Sprintf("Client can only read Opcodes of 4,5, and 7 in Sliding Window Mode...not: %d", opcode))
}
}
func receiveSlidingWindowPacket(conn *net.UDPConn, blockNumber [] byte) (bool, bool, [] byte, error) {
receivedData := make([]byte, 516)
bytesReceived, _, err := conn.ReadFromUDP(receivedData)
shared.ErrorValidation(err)
return readSlidingWindowPacket(receivedData[:bytesReceived], blockNumber)
}
// Creates and sends a WRQ packet
func sendWRQPacket(conn *net.UDPConn, fileName string) {
var wPacket *shared.RRQWRQPacket
if sw {
options := map[string]string{
"sendingMode": "slidingWindow",
}
wPacket = shared.CreateRRQWRQPacket(false, fileName, options)
} else {
wPacket = shared.CreateRRQWRQPacket(false, fileName, nil)
}
send(conn, wPacket.ByteArray(), []byte{0, 0})
}
// Creates and sends a data packet
func sendDataPacket(conn *net.UDPConn, data [] byte, currentPacket *int) {
dataPacket := shared.CreateDataPacket(createBlockNumber(currentPacket), data)
send(conn, dataPacket.ByteArray(), dataPacket.BlockNumber)
totalBytesSent += int64(len(dataPacket.Data))
totalPacketsSent++
displayProgress()
}
// Receives a packet and does something with it based on the opcode
func readSequentialPacket(data [] byte, blockNumber [] byte) error {
opcode := data[1]
switch opcode {
case 4: // ack packet
ack, _ := shared.ReadACKPacket(data)
if shared.BlockNumberChecker(ack.BlockNumber, blockNumber) {
return nil
}
return errors.New("Block numbers do not match...")
case 5:
e, _ := shared.ReadErrorPacket(data)
return errors.New(fmt.Sprintf("Error code: %d\nError Message: %s", e.ErrorCode[1], e.ErrorMessage))
case 6:
oack, _ := shared.ReadOACKPacket(data)
if oack.Options["sendingMode"] == "slidingWindow" { // just simulating if there were other options, to set the client to what the server wants...there is only one option for this specific project
sw = true
} else {
sw = false
}
return nil
default:
return errors.New(fmt.Sprintf("Client can only read Opcodes of 4, 5, and 6...not: %d", opcode))
}
}
// Generates a block number based on the current packet number
func createBlockNumber(currentPacketNumber *int) [] byte {
*currentPacketNumber++
leadingByte := math.Floor(float64(*currentPacketNumber / 256))
return [] byte{byte(leadingByte), byte(*currentPacketNumber % 256)}
}
// Sends the packet to the server
func send(conn *net.UDPConn, data []byte, blockNumber [] byte) {
for i := 0; i < 10; i++ {
if !shouldDropPacket() {
_, _ = conn.Write(data)
}
receivedData, err := receiveSequentialPacket(conn)
if err == nil {
err := readSequentialPacket(receivedData, blockNumber)
shared.ErrorValidation(err)
return
} else {
packetsLost++
displayProgress()
time.Sleep(time.Millisecond * 100)
}
}
shared.ErrorValidation(errors.New("Total retries exhausted...exiting"))
}
// Handles the read timeout of the server sending back an ACK
func receiveSequentialPacket(conn *net.UDPConn) ([] byte, error) {
_ = conn.SetReadDeadline(time.Now().Add(time.Millisecond * 500))
receivedData := make([]byte, 516)
bytesReceived, _, timedOut := conn.ReadFromUDP(receivedData)
return receivedData[:bytesReceived], timedOut
}
// Displays a progress bar that updates with the total data and total packets sent
func displayProgress() {
var totalDataSent = math.Ceil(float64(totalBytesSent) / float64(fileSize) * 100)
fmt.Print("\r")
fmt.Printf("Progress: (%d%% | Packets Lost: %d | %d/%d packets sent) ", int(totalDataSent), packetsLost, totalPacketsSent, totalPacketsToSend)
}
// Returns amount of packets that must be sent for file to be transferred
// completely
func determineAmountOfPacketsToSend(fileSize int64) int {
return int(math.Ceil(float64(fileSize) / 512))
}
// Figures out whether or not to drop the current packet
func shouldDropPacket() bool {
if dp {
return rand.Float64() <= .01
}
return false
}
// Takes in a path and recursively goes down the directory tree and creates a zip to send to the server
func zipFiles(path string) error {
generateTempZipName()
var filesToZip [] string
err := filepath.Walk(path,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
fi, err := os.Stat(path)
if err != nil {
return err
}
if fi.Mode().IsRegular() {
filesToZip = append(filesToZip, path)
}
return nil
})
if err != nil {
return err
}
return archiver.Archive(filesToZip, tempZipName)
}
// Generates a random name for temporary zip file
func generateTempZipName() {
bytes := make([]byte, 10)
for i := 0; i < 10; i++ {
bytes[i] = byte(65 + rand.Intn(25)) //A=65 and Z = 65+25
}
tempZipName = string(bytes) + ".zip"
}
|
import Sequelize from 'sequelize';
export default (sequelize) => {
const Articles = sequelize.define('Articles', {
articleId: {
type: Sequelize.UUID,
allowNull: false,
defaultValue: Sequelize.UUIDV4
},
title: {
type: Sequelize.STRING,
allowNull: false
},
image: {
type: Sequelize.STRING,
allowNull: true
},
article: {
type: Sequelize.TEXT,
allowNull: false
}
}, {});
Articles.associate = function(models) {
// associations can be defined here
Articles.hasMany(models.Comments, {
foreignKey: 'articleId',
as: "Articles"
})
};
return Articles;
}; |
#!/bin/bash
#
# check commit messages for AC issue numbers formatted as [task#<issue number>[-<issue number>]: <description>]
# check user email formatted as [<surname>-<ii>@gbksoft.com]
# check user author name formatted as [<name>[<surname>]|<surname>-<ii>]
REGEX_TASK='(task#[0-9]+(\-[0-9]+)?: [^ ].*|merge)'
REGEX_EMAIL='@gbksoft\.com$'
REGEX_NAME='[a-z]+?([a-z]+?)?$'
ERROR_MSG_TASK="[POLICY:ISSUE] The commit doesn't reference a AC issue"
ERROR_MSG_EMAIL="[POLICY:EMAIL] The commit doesn't contains valid email"
ERROR_MSG_NAME="[POLICY:NAME] The commit doesn't contains valid author name"
HAS_ERROR=false
while read OLDREV NEWREV REFNAME ; do
if [ 0 -ne $(expr "$OLDREV" : "0*$") ]; then
exit 0
fi
for COMMIT in `git rev-list $OLDREV..$NEWREV`; do
COMMIT="${COMMIT:0:9}"
NAME=`git log -1 --pretty=format:%an $COMMIT`
if ! echo $NAME | grep -iqE "$REGEX_NAME"; then
echo "$COMMIT | $ERROR_MSG_NAME:" >&2
echo "$NAME:" >&2
echo "" >&2
HAS_ERROR=true
fi
EMAIL=`git log -1 --pretty=format:%ae $COMMIT`
if ! echo $EMAIL | grep -iqE "$REGEX_EMAIL"; then
echo "$COMMIT | $ERROR_MSG_EMAIL:" >&2
echo "$EMAIL:" >&2
echo "" >&2
HAS_ERROR=true
fi
MESSAGE=`git cat-file commit $COMMIT | sed '1,/^$/d'`
if ! echo $MESSAGE | grep -iqE "$REGEX_TASK"; then
echo "$COMMIT | $ERROR_MSG_TASK:" >&2
echo "$MESSAGE:" >&2
echo "" >&2
HAS_ERROR=true
fi
done
done
if $HAS_ERROR ; then
echo "ERRORS!!!" >&2
exit 1
fi
exit 0
|
<filename>src/FaceDistance.py<gh_stars>0
# -*- coding: utf-8 -*-
import os
from scipy import misc
import tensorflow as tf
import numpy as np
import copy
import sys
import facenet # https://github.com/davidsandberg/facenet.git
def getDatasetDistance(result_path, dataset_path, model_path):
with tf.Graph().as_default():
with tf.Session() as sess:
# Load the model
facenet.load_model(model_path)
file = open(result_path, "w")
file.write("name, average, standard, count\n")
file.close()
filelist = os.listdir(dataset_path)
count = 0
for dirName in filelist:
count += 1
print("calculating distance of dir %s, current %d of total %d\n" % (dirName, count, len(filelist)))
subDir = os.path.join(dataset_path, dirName)
subFileList = os.listdir(subDir)
if len(subFileList) < 2:
continue
try:
avg, std = distance(sess, subDir, 160)
file = open(result_path, "a")
file.write("%s, %f, %f, %d\n" % (dirName, avg, std, len(os.listdir(subDir))))
file.close()
except:
print(sys.exc_info()[0])
def distance(sess, image_folder, img_size=None):
print(image_folder)
image_files = []
for file in os.listdir(image_folder):
image_files.append(os.path.join(image_folder, file))
print("There is %d files in %s." % (len(image_files), image_folder))
tmp_image_paths=copy.copy(image_files)
img_list = []
for image in tmp_image_paths:
img = misc.imread(os.path.expanduser(image), mode='RGB')
if img_size:
img = misc.imresize(img, (img_size, img_size))
img_list.append(img)
images = np.stack(img_list)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# Run forward pass to calculate embeddings
feed_dict = { images_placeholder: images, phase_train_placeholder:False }
emb = sess.run(embeddings, feed_dict=feed_dict)
nrof_images = len(image_files)
print('Images:')
for i in range(nrof_images):
print('%1d: %s' % (i, image_files[i]))
print('')
# Print distance matrix
print('Distance matrix')
for i in range(nrof_images):
print('%1d,' % i, end='')
print('')
average = -1
array = np.array([])
for i in range(nrof_images):
for j in range(nrof_images):
dist = np.sqrt(np.sum(np.square(np.subtract(emb[i,:], emb[j,:]))))
print('%1.4f,' % dist, end='')
if j > i:
array = np.append(array, dist)
if average < 0:
average = dist
else:
average = (average + dist) / 2
print('')
print('Average distance is %f' % average)
print('Standard Deviation is %f' % np.std(array))
return average, np.std(array) |
<gh_stars>1-10
// confine_level.c
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "main_canost.h"
// #define TEST_CONFINE_LEVEL 1
#ifdef TEST_CONFINE_LEVEL
extern FILE *dummyfile;
#endif
extern int iatm;
extern struct atom *a1st;
extern int new_atm;
extern struct level *top;
extern struct level *down;
extern int lowest;
extern int boundary;
extern int *confined_bond;
extern int atoms_in_confine;
int confine_level( void ){
int i,j;
struct bond *bp; // 結合を指すポインタ
struct level *lp;
int *comps;
for( i=1 ; i < iatm+new_atm ; i++ ){
confined_bond[i] = a1st[i].nbnd;
}
atoms_in_confine = 0;
if( boundary != lowest+1 ){ // CANOSTコード生成のレベルが指定されている
i = 0;
for( lp = top; lp->next != NULL ; lp=lp->next ){
atoms_in_confine += lp->member_Num;
if( i == boundary ){
comps = lp->member;
for( j=0; j < lp->member_Num ; j++ ){
if( comps[j] < iatm ){
for( bp = a1st[ comps[j] ].adj_list ; bp != NULL ; bp = bp->next ){
if( a1st[ bp->alias ].layer >= a1st[ comps[j] ].layer+1 ){
--confined_bond[ comps[j] ];
a1st[ bp->alias ].layer = a1st[ comps[j] ].layer+2;
}
}
}
}
down = lp->next; // 指定されたレベルのひとつ下のレベルを down に設定
break;
}
i++;
}
atoms_in_confine++;
}else{
atoms_in_confine = iatm+new_atm;
}
#ifdef TEST_CONFINE_LEVEL
dummyfile=fopen("dummy_confine_level.dat","w");
fprintf(dummyfile,"atoms_in_confine %d\n",atoms_in_confine);
for( lp=top ; lp->next != NULL ; lp=lp->next ){
fprintf(dummyfile,"%d \n\n",lp->member_Num );
comps=lp->member;
for( i=0 ; i < lp->member_Num ; i++ ){
fprintf(dummyfile,"%4d %4d %4d\n",comps[i],a1st[ comps[i] ].codeNum,a1st[ comps[i] ].layer );
}
fprintf(dummyfile,"\n\n");
}
fclose(dummyfile);
#endif
return EXIT_SUCCESS;
}
|
<filename>app/models/admin_user.rb<gh_stars>1-10
# The model representing single admin user
class AdminUser < ApplicationRecord
devise :database_authenticatable,
:recoverable,
:rememberable,
:trackable,
:validatable
end
|
class AggregatedServiceFeedback < AnonymousContact
def type
"aggregated-service-feedback"
end
def as_json(options = {})
attributes_to_serialise = %i[
type
path
id
created_at
slug
service_satisfaction_rating
details
]
super({
only: attributes_to_serialise,
methods: :url,
}.merge(options))
end
end
|
import torch
from torch2trt import torch2trt
def optimize_and_save_model(net, device, model_name):
# Move the PyTorch model to the specified device
net = net.to(device)
# Create a sample input tensor and move it to the specified device
x = torch.ones((1, 3, 64, 64)).to(device)
# Convert the PyTorch model to a TensorRT model using torch2trt
model_trt = torch2trt(net, [x])
# Save the optimized TensorRT model with the given model_name as a ".pth" file
torch.save(model_trt.state_dict(), model_name + ".pth")
# Return the path to the saved model
return model_name + ".pth" |
package io.opensphere.core.animation.impl;
import java.text.ParseException;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
import io.opensphere.core.animation.AnimationPlan.EndBehavior;
import io.opensphere.core.model.time.TimeSpan;
import io.opensphere.core.model.time.TimeSpanFormatterTest;
import io.opensphere.core.units.duration.Days;
import io.opensphere.core.units.duration.Duration;
import io.opensphere.core.util.collections.New;
/** Tests {@link AnimationPlanFactory}. */
public class AnimationPlanFactoryTest
{
/**
* Tests {@link AnimationPlanFactory#createDefaultAnimationPlan(TimeSpan, Duration, java.util.Collection)}.
*
* @exception ParseException if one of the times cannot be parsed.
*/
@Test
public void testCreateDefaultAnimationPlan() throws ParseException
{
TimeSpan loopSpan;
Duration frameDuration;
List<TimeSpan> skippedSpans = New.list();
List<TimeSpan> sequence = New.list();
// Simple day test
loopSpan = TimeSpanFormatterTest.span("2015-04-20 00:00:00", "2015-04-23 00:00:00");
frameDuration = Days.ONE;
skippedSpans.clear();
sequence.clear();
sequence.add(TimeSpanFormatterTest.span("2015-04-20", "2015-04-21"));
sequence.add(TimeSpanFormatterTest.span("2015-04-21", "2015-04-22"));
sequence.add(TimeSpanFormatterTest.span("2015-04-22", "2015-04-23"));
Assert.assertEquals(new DefaultAnimationPlan(sequence, EndBehavior.WRAP),
new AnimationPlanFactory().createDefaultAnimationPlan(loopSpan, frameDuration, skippedSpans));
// Simple day test with skipped interval
loopSpan = TimeSpanFormatterTest.span("2015-04-20 00:00:00", "2015-04-23 00:00:00");
frameDuration = Days.ONE;
skippedSpans.clear();
skippedSpans.add(TimeSpanFormatterTest.span("2015-04-21 06:00:00", "2015-04-21 09:00:00"));
sequence.clear();
sequence.add(TimeSpanFormatterTest.span("2015-04-20", "2015-04-21"));
sequence.add(TimeSpanFormatterTest.span("2015-04-22", "2015-04-23"));
Assert.assertEquals(new DefaultAnimationPlan(sequence, EndBehavior.WRAP),
new AnimationPlanFactory().createDefaultAnimationPlan(loopSpan, frameDuration, skippedSpans));
}
}
|
<gh_stars>1-10
// SPDX-License-Identifier: Apache-2.0
// YAPION
// Copyright (C) 2019,2020 yoyosource
package yapion.hierarchy.types;
import yapion.annotations.deserialize.YAPIONLoad;
import yapion.annotations.serialize.YAPIONSave;
import yapion.hierarchy.typegroups.YAPIONAnyClosure;
import yapion.hierarchy.typegroups.YAPIONAnyType;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
import java.util.Objects;
import static yapion.utils.ReferenceIDUtils.calc;
@YAPIONSave(context = "*")
@YAPIONLoad(context = "*")
public final class YAPIONVariable extends YAPIONAnyClosure {
private final String name;
private final YAPIONAnyType value;
private static final String PATTERN = "[({\\[<)}\\]>]";
private static final String REPLACEMENT = "\\\\$0";
public YAPIONVariable(String name, YAPIONAnyType value) {
this.name = name;
this.value = value;
}
public String getName() {
return name;
}
public YAPIONAnyType getValue() {
return value;
}
public long referenceValue() {
return ((long) name.length() ^ calc(name) ^ value.referenceValue()) & 0x7FFFFFFFFFFFFFFFL;
}
@Override
public String toString() {
return toYAPIONString();
}
@Override
public String toYAPIONString() {
if (name.startsWith(" ")) {
return "\\" + name.replaceAll(PATTERN, REPLACEMENT) + value.toYAPIONString();
}
return name.replaceAll(PATTERN, REPLACEMENT) + value.toYAPIONString();
}
@Override
public String toYAPIONStringPrettified() {
if (name.startsWith(" ")) {
return "\\" + name.replaceAll(PATTERN, REPLACEMENT) + value.toYAPIONStringPrettified();
}
return name.replaceAll(PATTERN, REPLACEMENT) + value.toYAPIONStringPrettified();
}
public String toJSONString() {
return "\"" + name + "\":" + value.toJSONString();
}
@Override
public String toLossyJSONString() {
return "\"" + name + "\":" + value.toLossyJSONString();
}
@Override
public void toOutputStream(OutputStream outputStream) throws IOException {
if (name.startsWith(" ")) outputStream.write("\\".getBytes(StandardCharsets.UTF_8));
outputStream.write(name.replaceAll(PATTERN, REPLACEMENT).getBytes(StandardCharsets.UTF_8));
value.toOutputStream(outputStream);
}
@Override
public void toOutputStreamPrettified(OutputStream outputStream) throws IOException {
if (name.startsWith(" ")) outputStream.write("\\".getBytes(StandardCharsets.UTF_8));
outputStream.write(name.replaceAll(PATTERN, REPLACEMENT).getBytes(StandardCharsets.UTF_8));
value.toOutputStreamPrettified(outputStream);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof YAPIONVariable)) return false;
YAPIONVariable variable = (YAPIONVariable) o;
return name.equals(variable.name) && value.equals(variable.value);
}
@Override
public int hashCode() {
return Objects.hash(name, value);
}
} |
# This file was generated on 2021-01-02T20:21:17+00:00 from the rspec-dev repo.
# DO NOT modify it by hand as your changes will get lost the next time it is generated.
# Taken from:
# https://github.com/travis-ci/travis-build/blob/e9314616e182a23e6a280199cd9070bfc7cae548/lib/travis/build/script/templates/header.sh#L34-L53
travis_retry() {
local result=0
local count=1
while [ $count -le 3 ]; do
[ $result -ne 0 ] && {
echo -e "\n\033[33;1mThe command \"$@\" failed. Retrying, $count of 3.\033[0m\n" >&2
}
"$@"
result=$?
[ $result -eq 0 ] && break
count=$(($count + 1))
sleep 1
done
[ $count -eq 3 ] && {
echo "\n\033[33;1mThe command \"$@\" failed 3 times.\033[0m\n" >&2
}
return $result
}
# Taken from https://github.com/vcr/vcr/commit/fa96819c92b783ec0c794f788183e170e4f684b2
# and https://github.com/vcr/vcr/commit/040aaac5370c68cd13c847c076749cd547a6f9b1
nano_cmd="$(type -p gdate date | head -1)"
nano_format="+%s%N"
[ "$(uname -s)" != "Darwin" ] || nano_format="${nano_format/%N/000000000}"
travis_time_start() {
travis_timer_id=$(printf %08x $(( RANDOM * RANDOM )))
travis_start_time=$($nano_cmd -u "$nano_format")
printf "travis_time:start:%s\r\e[0m" $travis_timer_id
}
travis_time_finish() {
local travis_end_time=$($nano_cmd -u "$nano_format")
local duration=$(($travis_end_time-$travis_start_time))
printf "travis_time:end:%s:start=%s,finish=%s,duration=%s\r\e[0m" \
$travis_timer_id $travis_start_time $travis_end_time $duration
}
fold() {
local name="$1"
local status=0
shift 1
if [ -n "$TRAVIS" ]; then
printf "travis_fold:start:%s\r\e[0m" "$name"
travis_time_start
else
echo "============= Starting $name ==============="
fi
"$@"
status=$?
[ -z "$TRAVIS" ] || travis_time_finish
if [ "$status" -eq 0 ]; then
if [ -n "$TRAVIS" ]; then
printf "travis_fold:end:%s\r\e[0m" "$name"
else
echo "============= Ending $name ==============="
fi
else
STATUS="$status"
fi
return $status
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.