code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9 values | license stringclasses 15 values | size int32 3 1.05M |
|---|---|---|---|---|---|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using Google.Cloud.EntityFrameworkCore.Spanner.Extensions;
using Google.Cloud.EntityFrameworkCore.Spanner.Extensions.Internal;
using Google.Cloud.EntityFrameworkCore.Spanner.Infrastructure;
using Google.Cloud.EntityFrameworkCore.Spanner.Tests.MigrationTests.Models;
using Grpc.Core;
using Microsoft.EntityFrameworkCore;
using Xunit;
namespace Google.Cloud.EntityFrameworkCore.Spanner.Tests.MigrationTests
{
internal class MockMigrationSampleDbContext : SpannerMigrationSampleDbContext
{
private readonly string _connectionString;
public MockMigrationSampleDbContext() : this("Data Source=projects/p1/instances/i1/databases/d1;")
{
}
public MockMigrationSampleDbContext(string connectionString)
{
_connectionString = connectionString;
}
protected override void OnConfiguring(DbContextOptionsBuilder optionsBuilder)
{
if (!optionsBuilder.IsConfigured)
{
optionsBuilder
#pragma warning disable EF1001
.UseSpanner(_connectionString, _ => SpannerModelValidationConnectionProvider.Instance.EnableDatabaseModelValidation(false), ChannelCredentials.Insecure)
#pragma warning restore EF1001
.UseMutations(MutationUsage.Never)
.UseLazyLoadingProxies();
}
}
}
public class GenerateCreateScriptTest
{
[Fact]
public void Generate_Create_Script()
{
using var db = new MockMigrationSampleDbContext();
var generatedScript = db.Database.GenerateCreateScript();
var script = @"CREATE TABLE `Singers` (
`SingerId` INT64 NOT NULL,
`FirstName` STRING(200),
`LastName` STRING(200) NOT NULL,
`FullName` STRING(400) NOT NULL AS (COALESCE(FirstName || ' ', '') || LastName) STORED,
`BirthDate` DATE,
`Picture` BYTES(MAX)
)PRIMARY KEY (`SingerId`)
CREATE TABLE `TableWithAllColumnTypes` (
`ColInt64` INT64 NOT NULL,
`ColFloat64` FLOAT64,
`ColNumeric` NUMERIC,
`ColBool` BOOL,
`ColString` STRING(100),
`ColStringMax` STRING(MAX),
`ColChar` STRING(1),
`ColBytes` BYTES(100),
`ColBytesMax` BYTES(MAX),
`ColDate` DATE,
`ColTimestamp` TIMESTAMP,
`ColCommitTS` TIMESTAMP OPTIONS (allow_commit_timestamp=true) ,
`ColInt64Array` ARRAY<INT64>,
`ColFloat64Array` ARRAY<FLOAT64>,
`ColNumericArray` ARRAY<NUMERIC>,
`ColBoolArray` ARRAY<BOOL>,
`ColStringArray` ARRAY<STRING(100)>,
`ColStringMaxArray` ARRAY<STRING(MAX)>,
`ColBytesArray` ARRAY<BYTES(100)>,
`ColBytesMaxArray` ARRAY<BYTES(MAX)>,
`ColDateArray` ARRAY<DATE>,
`ColTimestampArray` ARRAY<TIMESTAMP>,
`ColGuid` STRING(36),
`ColComputed` STRING(MAX) AS (ARRAY_TO_STRING(ColStringArray, ',')) STORED
)PRIMARY KEY (`ColInt64`)
CREATE TABLE `Venues` (
`Code` STRING(10) NOT NULL,
`Name` STRING(100),
`Active` BOOL NOT NULL,
`Capacity` INT64,
`Ratings` ARRAY<FLOAT64>
)PRIMARY KEY (`Code`)
CREATE TABLE `Albums` (
`AlbumId` INT64 NOT NULL,
`Title` STRING(100) NOT NULL,
`ReleaseDate` DATE,
`SingerId` INT64 NOT NULL,
`MarketingBudget` INT64,
CONSTRAINT `FK_Albums_Singers` FOREIGN KEY (`SingerId`) REFERENCES `Singers` (`SingerId`),
)PRIMARY KEY (`AlbumId`)
CREATE TABLE `Concerts` (
`VenueCode` STRING(10) NOT NULL,
`StartTime` TIMESTAMP NOT NULL,
`SingerId` INT64 NOT NULL,
`Title` STRING(200),
CONSTRAINT `FK_Concerts_Singers` FOREIGN KEY (`SingerId`) REFERENCES `Singers` (`SingerId`),
CONSTRAINT `FK_Concerts_Venues` FOREIGN KEY (`VenueCode`) REFERENCES `Venues` (`Code`),
)PRIMARY KEY (`VenueCode`, `StartTime`, `SingerId`)
CREATE TABLE `Tracks` (
`AlbumId` INT64 NOT NULL,
`TrackId` INT64 NOT NULL,
`Title` STRING(200) NOT NULL,
`Duration` NUMERIC,
`LyricsLanguages` ARRAY<STRING(2)>,
`Lyrics` ARRAY<STRING(MAX)>,
CONSTRAINT `Chk_Languages_Lyrics_Length_Equal` CHECK (ARRAY_LENGTH(LyricsLanguages) = ARRAY_LENGTH(Lyrics)),
)PRIMARY KEY (`AlbumId`, `TrackId`),
INTERLEAVE IN PARENT `Albums` ON DELETE NO ACTION
CREATE TABLE `Performances` (
`VenueCode` STRING(10) NOT NULL,
`SingerId` INT64 NOT NULL,
`StartTime` TIMESTAMP NOT NULL,
`ConcertStartTime` TIMESTAMP NOT NULL,
`AlbumId` INT64 NOT NULL,
`TrackId` INT64 NOT NULL,
`Rating` FLOAT64,
CONSTRAINT `FK_Performances_Singers` FOREIGN KEY (`SingerId`) REFERENCES `Singers` (`SingerId`),
CONSTRAINT `FK_Performances_Tracks` FOREIGN KEY (`AlbumId`, `TrackId`) REFERENCES `Tracks` (`AlbumId`, `TrackId`),
CONSTRAINT `FK_Performances_Concerts` FOREIGN KEY (`VenueCode`, `ConcertStartTime`, `SingerId`) REFERENCES `Concerts` (`VenueCode`, `StartTime`, `SingerId`),
)PRIMARY KEY (`VenueCode`, `SingerId`, `StartTime`)
CREATE INDEX `AlbumsByAlbumTitle2` ON `Albums` (`Title`) STORING (`MarketingBudget`, `ReleaseDate`)
CREATE INDEX `Idx_Singers_FullName` ON `Singers` (`FullName`)
CREATE NULL_FILTERED INDEX `IDX_TableWithAllColumnTypes_ColDate_ColCommitTS` ON `TableWithAllColumnTypes` (`ColDate`, `ColCommitTS`)
CREATE UNIQUE INDEX `Idx_Tracks_AlbumId_Title` ON `Tracks` (`TrackId`, `Title`)
";
Assert.Equal(script, generatedScript);
}
}
}
| googleapis/dotnet-spanner-entity-framework | Google.Cloud.EntityFrameworkCore.Spanner.Tests/MigrationTests/GenerateCreateScriptTest.cs | C# | apache-2.0 | 5,850 |
/**
* <copyright>
*
* Copyright (c) 2010 SAP AG.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Reiner Hille-Doering (SAP AG) - initial API and implementation and/or initial documentation
*
* </copyright>
*/
package org.eclipse.bpmn2.provider;
import java.util.Collection;
import java.util.List;
import org.eclipse.bpmn2.Bpmn2Package;
import org.eclipse.bpmn2.FormValue;
import org.eclipse.emf.common.notify.AdapterFactory;
import org.eclipse.emf.common.notify.Notification;
import org.eclipse.emf.edit.provider.ComposeableAdapterFactory;
import org.eclipse.emf.edit.provider.IEditingDomainItemProvider;
import org.eclipse.emf.edit.provider.IItemLabelProvider;
import org.eclipse.emf.edit.provider.IItemPropertyDescriptor;
import org.eclipse.emf.edit.provider.IItemPropertySource;
import org.eclipse.emf.edit.provider.IStructuredItemContentProvider;
import org.eclipse.emf.edit.provider.ITreeItemContentProvider;
import org.eclipse.emf.edit.provider.ItemPropertyDescriptor;
import org.eclipse.emf.edit.provider.ViewerNotification;
/**
* This is the item provider adapter for a {@link org.eclipse.bpmn2.FormValue} object.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public class FormValueItemProvider extends BaseElementItemProvider implements
IEditingDomainItemProvider, IStructuredItemContentProvider,
ITreeItemContentProvider, IItemLabelProvider, IItemPropertySource {
/**
* This constructs an instance from a factory and a notifier.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public FormValueItemProvider(AdapterFactory adapterFactory) {
super(adapterFactory);
}
/**
* This returns the property descriptors for the adapted class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public List<IItemPropertyDescriptor> getPropertyDescriptors(Object object) {
if (itemPropertyDescriptors == null) {
super.getPropertyDescriptors(object);
addValueIdPropertyDescriptor(object);
addValueNamePropertyDescriptor(object);
}
return itemPropertyDescriptors;
}
/**
* This adds a property descriptor for the Value Id feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
protected void addValueIdPropertyDescriptor(Object object) {
itemPropertyDescriptors.add(createItemPropertyDescriptor(
((ComposeableAdapterFactory) adapterFactory)
.getRootAdapterFactory(),
getResourceLocator(),
getString("_UI_FormValue_valueId_feature"),
getString("_UI_PropertyDescriptor_description",
"_UI_FormValue_valueId_feature", "_UI_FormValue_type"),
Bpmn2Package.Literals.FORM_VALUE__VALUE_ID, true, false, false,
ItemPropertyDescriptor.GENERIC_VALUE_IMAGE, null, null));
}
/**
* This adds a property descriptor for the Value Name feature.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
protected void addValueNamePropertyDescriptor(Object object) {
itemPropertyDescriptors
.add(createItemPropertyDescriptor(
((ComposeableAdapterFactory) adapterFactory)
.getRootAdapterFactory(),
getResourceLocator(),
getString("_UI_FormValue_valueName_feature"),
getString("_UI_PropertyDescriptor_description",
"_UI_FormValue_valueName_feature",
"_UI_FormValue_type"),
Bpmn2Package.Literals.FORM_VALUE__VALUE_NAME, true,
false, false,
ItemPropertyDescriptor.GENERIC_VALUE_IMAGE, null, null));
}
/**
* This returns FormValue.gif.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public Object getImage(Object object) {
return overlayImage(object,
getResourceLocator().getImage("full/obj16/FormValue"));
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
protected boolean shouldComposeCreationImage() {
return true;
}
/**
* This returns the label text for the adapted class.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public String getText(Object object) {
String label = ((FormValue) object).getId();
return label == null || label.length() == 0 ? getString("_UI_FormValue_type")
: getString("_UI_FormValue_type") + " " + label;
}
/**
* This handles model notifications by calling {@link #updateChildren} to update any cached
* children and by creating a viewer notification, which it passes to {@link #fireNotifyChanged}.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public void notifyChanged(Notification notification) {
updateChildren(notification);
switch (notification.getFeatureID(FormValue.class)) {
case Bpmn2Package.FORM_VALUE__VALUE_ID:
case Bpmn2Package.FORM_VALUE__VALUE_NAME:
fireNotifyChanged(new ViewerNotification(notification,
notification.getNotifier(), false, true));
return;
}
super.notifyChanged(notification);
}
/**
* This adds {@link org.eclipse.emf.edit.command.CommandParameter}s describing the children
* that can be created under this object.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
protected void collectNewChildDescriptors(
Collection<Object> newChildDescriptors, Object object) {
super.collectNewChildDescriptors(newChildDescriptors, object);
}
}
| adbrucker/SecureBPMN | designer/src/org.activiti.designer.model.edit/src/org/eclipse/bpmn2/provider/FormValueItemProvider.java | Java | apache-2.0 | 5,555 |
///done, not debug
__g_qrmCmatsize__ = [
undefined,
11, 13, 15, 17
];
///
__g_qrmCdatacodewords__ = {
L:[
undefined,
3, 5, 11, 16
],
M:[
undefined,
0, 4, 9, 14
],
Q:[
undefined,
0, 0, 0, 10
]
};
///
__g_qrmCtotalcodewords__ = {
L:[
undefined,
],
M:[
undefined,
],
Q:[
undefined,
]
};
///
__g_qrmCeccodewords__ = {
L:[
undefined,
],
M:[
undefined,
],
Q:[
undefined,
]
};
///notyet
__g_qrmCdatalen__ = {
// number of characters [num,alnum,8bit,kanji]
L:[
undefined,
[ 0, 0, 0, 0],// 1
[ 0, 0, 0, 0],// 2
[ 0, 0, 0, 0],// 3
[ 0, 0, 0, 0] // 4
],
M:[
undefined,
[ 0, 0, 0, 0],// 1
[ 0, 0, 0, 0],// 2
[ 0, 0, 0, 0],// 3
[ 0, 0, 0, 0] // 4
],
Q:[
undefined,
[ 0, 0, 0, 0],// 1
[ 0, 0, 0, 0],// 2
[ 0, 0, 0, 0],// 3
[ 0, 0, 0, 0] // 4
]
};
///
__g_qrmCsegments__ = [
//[[repeat,totalCodewords,dataCodewords,correctableCodewords],...]
undefined,
{ L:[[ 1, 5, 3, 0]] },//1
{ L:[[ 1, 10, 5, 1]],
M:[[ 1, 10, 4, 2]] },//2
{ L:[[ 1, 17, 11, 2]],
M:[[ 1, 17, 9, 4]] },//3
{ L:[[ 1, 24, 16, 3]],
M:[[ 1, 24, 14, 5]],
Q:[[ 1, 24, 10, 7]] } //4
];
///
__g_qrmCtypenumbers__ = {
L:[
undefined,
0, 1, 3, 5
],
M:[
undefined,
undefined, 2, 4, 6
],
Q:[
undefined,
undefined, undefined, undefined, 7
]
};
///
QrMCSymbolInfo = __extends(Object,
// constructor
function(version, eclevel) {
__constructSuper(this);
if (version == undefined) { version = 1; }
if (eclevel == undefined) { eclevel = "M"; }
this.version = version;
this.eclevel = {L:1,M:0,Q:3}[eclevel]; // L<M<Q
this.matrixSize = __g_qrmCmatsize__ [version];
this.dataCodewords = __g_qrmCdatacodewords__[eclevel][version];
this.segments = __g_qrmCsegments__ [version][eclevel];
this.typeNumber = __g_qrmCtypenumbers__ [eclevel][version];
},
// methods
function(__this__) {
__this__.MAXVER = 4;
});
| qnq777/matrixcode.js-legacy | public_html/js/qrcode/qrmcdata.js | JavaScript | apache-2.0 | 2,139 |
<?php
/*
* Copyright 2014 Stefan Lorenz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace stlrnz\json\converter\exception;
/**
* ConverterException
*
* @author Stefan Lorenz
* @license Apache License 2.0
* @license http://www.apache.org/licenses/LICENSE-2.0
*
* @package stlrnz\json\converter\exception
*/
class ConverterException extends \Exception
{
} | stlrnz/PHPJsonConverter | lib/stlrnz/json/converter/exception/ConverterException.php | PHP | apache-2.0 | 886 |
/*
* Licensed to the Ted Dunning under one or more contributor license
* agreements. See the NOTICE file that may be
* distributed with this work for additional information
* regarding copyright ownership. Ted Dunning licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.mapr.anomaly;
import com.google.common.base.Preconditions;
import java.io.BufferedReader;
import java.io.IOException;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Represents an event from a log.
*/
public class Event implements Comparable<Event> {
private static final DateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXXX");
private static final Pattern format = Pattern.compile("\\[(.*)] /(.+)[?&]user=(.*) (.*)\\.(.*)\\.(.*)\\.(.*)");
private final int uid;
private final long time;
private final int ip;
private final String op;
private Event(int uid, long time, int ip, String op) {
Preconditions.checkNotNull(op);
this.uid = uid;
this.time = time;
this.ip = ip;
this.op = op;
}
public static Event read(BufferedReader in) throws IOException {
in.mark(1000);
String line = in.readLine();
if (line == null) {
return null;
}
try {
Matcher m = format.matcher(line);
if (m.matches()) {
int i = 1;
Date d = df.parse(m.group(i++));
String op = m.group(i++);
int uid = Integer.parseInt(m.group(i++), 16);
int ip = Integer.parseInt(m.group(i++)) << 24;
ip += Integer.parseInt(m.group(i++)) << 16;
ip += Integer.parseInt(m.group(i++)) << 8;
ip += Integer.parseInt(m.group(i));
return new Event(uid, d.getTime(), ip, op);
} else {
in.reset();
return null;
}
} catch (ParseException | NumberFormatException e) {
in.reset();
return null;
}
}
public int getIp() {
return ip;
}
public long getTime() {
return time;
}
@SuppressWarnings("WeakerAccess")
public int getUid() {
return uid;
}
public String getOp() {
return op;
}
@Override
public int compareTo(Event o) {
int r = Integer.compare(uid, o.uid);
if (r != 0) {
return r;
}
r = Long.compare(time, o.time);
if (r != 0) {
return r;
}
r = Integer.compare(ip, o.ip);
return r;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof Event)) return false;
Event event = (Event) o;
if (ip != event.ip) return false;
if (time != event.time) return false;
if (uid != event.uid) return false;
return op.equals(event.op);
}
@Override
public int hashCode() {
int result = uid;
result = 31 * result + (int) (time ^ (time >>> 32));
result = 31 * result + ip;
result = 31 * result + op.hashCode();
return result;
}
static class EventFormatException extends Throwable {
@SuppressWarnings("unused")
public EventFormatException(String line) {
super(String.format("Invalid event format found: \"%s\"", line));
}
}
}
| tdunning/log-synth | src/main/java/com/mapr/anomaly/Event.java | Java | apache-2.0 | 4,105 |
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a samples controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
## - call exposes all registered services (none by default)
#########################################################################
from gluon.contrib.user_agent_parser import mobilize
import os,sys,types
import string,operator
from collections import OrderedDict
import numpy as np
import pandas as pd
import matplotlib as mpl
import bokeh
#from bokeh.plotting import *
home = os.path.expanduser("~")
datapath = os.path.join(request.folder,'static/results')
from applications.epitopemap.modules.mhcpredict import base, sequtils, tepitope
methods = ['tepitope','netmhciipan','iedbmhc1','bcell']#,'threading'] #'iedbmhc2'
iedbmethods = ['IEDB_recommended','consensus','ann','smm','arb','netmhcpan']
bcellmethods = ['Chou-Fasman', 'Emini', 'Karplus-Schulz',
'Kolaskar-Tongaonkar', 'Parker', 'Bepipred']
colors = {'tepitope':'green','netmhciipan':'orange',
'iedbmhc1':'blue','iedbmhc2':'pink','threading':'purple'}
colormaps={'tepitope':'Greens','netmhciipan':'Oranges','iedbmhc2':'Pinks',
'threading':'Purples','iedbmhc1':'Blues'}
def index():
"""
example action using the internationalization operator T and flash
rendered by views/default/index.html or views/generic.html
"""
if request.user_agent().is_mobile:
response.view.replace('.html','.mobile.html')
form = quicksearch()
return dict(message=T('Menu'),searchform=form)
def register():
return dict(form=auth.register())
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
auth.settings.registration_requires_approval = True
adminmail = 'damien.farrell@ucd.ie'
auth.settings.register_onaccept = lambda form: mail.send(to=adminmail,
subject='New user registered for %s application' % (request.application),
message="new user email is %s" % (form.vars.email))
return dict(form=auth())
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request,db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
@auth.requires_membership("editor_group")
def list_users():
btn = lambda row: A("Edit", _href=URL('manage_user', args=row.auth_user.id))
db.auth_user.edit = Field.Virtual(btn)
rows = db(db.auth_user).select()
headers = ["ID", "Name", "Last Name", "Email","registration", "Edit"]
fields = ['id', 'first_name', 'last_name', "email", "registration_key", "edit"]
table = TABLE(THEAD(TR(*[B(header) for header in headers])),
TBODY(*[TR(*[TD(row[field]) for field in fields]) \
for row in rows]))
table["_class"] = "table table-striped table-bordered table-condensed"
return dict(table=table)
@auth.requires_membership("editor_group")
def manage_user():
user_id = request.args(0) or redirect(URL('list_users'))
form = SQLFORM(db.auth_user, user_id).process()
membership_panel = LOAD(request.controller,
'manage_membership.html',
args=[user_id],
ajax=True)
return dict(form=form,membership_panel=membership_panel)
@auth.requires_membership("editor_group")
def manage_membership():
user_id = request.args(0) or redirect(URL('list_users'))
db.auth_membership.user_id.default = int(user_id)
db.auth_membership.user_id.writable = False
form = SQLFORM.grid(db.auth_membership.user_id == user_id,
args=[user_id],
searchable=False,
deletable=False,
details=False,
selectable=False,
csv=False,
user_signature=True) # change to True in production
return form
@auth.requires_signature()
def data():
return dict(form=crud())
def mpld3Plot(fig, objects=None):
"""mpld3 html plot from figure"""
import mpld3
html = mpld3.fig_to_html(fig)
htmllabels = []
if objects!=None and len(objects)>0:
bars,labels = zip(*objects)
tooltip = MyToolTip(bars, labels)
plugins.connect(fig, tooltip)
return html
def mplPlot(fig):
"""Convert matplitlib plot to bokeh"""
from bokeh import mpl
plot = mpl.to_bokeh(fig)
return plot
def embedPlot_old(plot):
"""Embed plot method for older versions of bokeh"""
from bokeh.resources import Resources
from bokeh.embed import autoload_static
fp = os.path.join(request.folder,'static/temp/')
fp = os.path.join(fp, plot._id+".js")
res = Resources("relative")
res.js_files = ["../static/js/bokeh.min.js"]
res.css_files = ["../static/css/bokeh.min.css"]
jspath = os.path.join('../static/temp/', plot._id+".js")
js,tag = autoload_static(plot, res, jspath)
with open(fp, "w") as f:
f.write(js)
print
return js,tag
def embedPlot(plot):
"""Embed plot method for new version of bokeh (tested on 0.11)"""
from bokeh.embed import components
script, div = components(plot)
#inject the required bokeh js and css files
response.files.append(URL('static','css/bokeh.min.css'))
response.files.append(URL('static','js/bokeh.min.js'))
response.include_files()
return script, div
def plotRegions(plot, regions=None):
"""Plot regions of interest"""
h=27
y=.5+h/2.0
w=20
colors = {'negative':'#FF3333', 'positive':'#0099FF'}
rv0655 = {'negative':[66,77,171,198,251], 'positive':[231]}
rv3676 = {'negative':[197], 'positive':[42,117,204]}
rv0757 = {'negative':[73,175], 'positive':[125,210]}
rv3584 = {'negative':[72], 'positive':[43,49]}
rv3390 = {'positive':[178,185]}
reg = rv3584
for r in reg:
x = reg[r]
x = [i+w/2 for i in x]
plot.rect(x,y, width=w, height=h,color=colors[r],
line_color='black',alpha=0.4,legend=r)
plot.legend.label_text_font_size = '15pt'
return
def plotAnnotations(plot,annotation):
#print annotation
h=1.8
y=.4+h/2.0
if 'signalp' in annotation:
x = annotation['signalp'].values()
#source = ColumnDataSource(data=dict(x=x,y=y))
plot.rect(x,y, width=.5, height=h,color='purple',line_color='red',alpha=0.7,legend='signalp')
if 'tmhmm' in annotation:
vals = annotation['tmhmm']
x=[i[0]+(i[1]-i[0])/2.0 for i in vals]
w=[i[1]-i[0] for i in vals]
#print x,w,y
plot.rect(x,y, width=w, height=h,color='blue',line_color='blue',alpha=0.6,legend='tmhmm')
if 'pfam27' in annotation:
vals = annotation['pfam27']
#print vals
text = [i[0] for i in vals]
x=[i[1]+(i[2]-i[1])/2.0 for i in vals]
w=[i[2]-i[1] for i in vals]
print x,w,y
plot.rect(x,y, width=w, height=h,color='white',line_color='black',alpha=0.6)
plot.text(x,y, text=text, text_font_size='9pt', angle=0, text_alpha=.8,
text_baseline='middle',text_align='center')
return
def plotBCell(plot,pred,height):
"""Line plot of b cell predictions - no allele stuff"""
x = pred.data.Position
#print pred.data[:20]
#source = ColumnDataSource(data=dict(x=x,y=y))
y=pred.data.Score
h=height
y = y+abs(min(y))
y = y*(h/max(y))+3
plot.line(x, y, line_color="red", line_width=2, alpha=0.6,legend='bcell')
return
def plotTracks(preds,tag,n=3,title=None,width=820,height=None,
seqdepot=None,bcell=None,exp=None):
"""Plot epitopes as parallel tracks"""
from bokeh.models import Range1d,HoverTool,FactorRange,Grid,GridPlot,ColumnDataSource
from bokeh.plotting import Figure
alls=1
if title == None:
title=tag
for m in preds:
alls += len(preds[m].data.groupby('allele'))
if height==None:
height = 130+10*alls
yrange = Range1d(start=0, end=alls+3)
plot = Figure(title=title,title_text_font_size="11pt",plot_width=width,
plot_height=height, y_range=yrange,
y_axis_label='allele',
tools="xpan, xwheel_zoom, resize, hover, reset, save",
background_fill="#FAFAFA",
toolbar_location="below")
h=3
if bcell != None:
plotBCell(plot, bcell, alls)
if seqdepot != None:
plotAnnotations(plot,seqdepot)
if exp is not None:
plotExp(plot, exp)
#plotRegions(plot)
#lists for hover data
#we plot all rects at once
x=[];y=[];allele=[];widths=[];clrs=[];peptide=[]
predictor=[];position=[];score=[];leg=[]
l=80
for m in preds:
pred = preds[m]
cmap = mpl.cm.get_cmap(colormaps[m])
df = pred.data
sckey = pred.scorekey
pb = pred.getPromiscuousBinders(data=df,n=n)
if len(pb) == 0:
continue
l = pred.getLength()
grps = df.groupby('allele')
alleles = grps.groups.keys()
if len(pb)==0:
continue
c=colors[m]
leg.append(m)
for a,g in grps:
b = pred.getBinders(data=g)
b = b[b.pos.isin(pb.pos)] #only promiscuous
b.sort('pos',inplace=True)
scores = b[sckey].values
score.extend(scores)
pos = b['pos'].values
position.extend(pos)
x.extend(pos+(l/2.0)) #offset as coords are rect centers
widths.extend([l for i in scores])
clrs.extend([c for i in scores])
y.extend([h+0.5 for i in scores])
alls = [a for i in scores]
allele.extend(alls)
peptide.extend(list(b.peptide.values))
predictor.extend([m for i in scores])
h+=1
source = ColumnDataSource(data=dict(x=x,y=y,allele=allele,peptide=peptide,
predictor=predictor,position=position,score=score))
plot.rect(x,y, width=widths, height=0.8,
#x_range=Range1d(start=1, end=seqlen+l),
color=clrs,line_color='gray',alpha=0.7,source=source)
hover = plot.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("allele", "@allele"),
("position", "@position"),
("peptide", "@peptide"),
("score", "@score"),
("predictor", "@predictor"),
])
seqlen = pred.data.pos.max()+l
plot.set(x_range=Range1d(start=0, end=seqlen+1))
plot.xaxis.major_label_text_font_size = "8pt"
plot.xaxis.major_label_text_font_style = "bold"
plot.ygrid.grid_line_color = None
plot.yaxis.major_label_text_font_size = '0pt'
plot.xaxis.major_label_orientation = np.pi/4
#js,html = embedPlot(plot)
script, div = embedPlot(plot)
return script, div
#return plot, html
def plotEmpty(width=850):
"""Plot an empty plot"""
from bokeh.models import Range1d
plot = figure(title='',plot_width=width, plot_height=10,
y_range=Range1d(start=1, end=100),
tools="xpan, xwheel_zoom, resize, hover, reset",
background_fill="white")
x=range(100); y=2
rect(x,y, width=1, height=0.8,color='white')
js,html = embedPlot(plot)
print plot
return html
def plots():
"""Use as component to plot predictions for given request"""
print 'plot request'
print request.vars
label = request.vars.label
#if we have no data
if label == 'dummy':
figure = plotEmpty()
return dict(figure=figure)
g = request.vars.genome
tag = request.vars.tag
gene = request.vars.gene
title=None
if gene != None:
t = getTagbyGene(g,gene) #override tag with gene name if provided
if t != None:
tag = t
title = tag+' / '+gene
if request.vars.width == None:
width = 820
else:
width = int(request.vars.width)
if request.vars.height != None:
height = int(request.vars.height)
else:
height = None
if request.vars.n == None:
n=3
else:
n = int(request.vars.n)
if request.vars.perccutoff != None:
perccutoff=float(request.vars.perccutoff)
else:
perccutoff=0.96
preds,bcell,cutoffs = getPredictions(label,g,tag,perccutoff)
if len(preds)==0 or preds==None:
return dict(error=True)
sd=None
if request.vars.annotation == 'on':
feat, fastafmt, previous, next = getFeature(g,tag)
seq = feat['translation']
sd = getSeqDepot(seq)['t']
script, div = plotTracks(preds,tag,n=n,title=title,
width=width,height=height,seqdepot=sd,bcell=bcell)
return dict(script=script,div=div,preds=preds,error=False)
def scoredistplots(preds):
"""Score distribution plots"""
from bokeh.models import Range1d,GridPlot
from bokeh.plotting import Figure
plots=[]
for p in preds:
pred=preds[p]
key=pred.scorekey
data = pred.data[key]
hist, edges = np.histogram(data, density=True, bins=30)
p = Figure(title=p,plot_height=250,tools='')
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="#036564", line_color="#033649")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
plots.append(p)
plot = GridPlot(children=[plots],title='test')
js,html = embedPlot(plot)
return html
def scoreCorrelations(preds):
figs=[]
for p in preds:
pred=preds[p]
df=pred.data
x = df.pivot_table(index='peptide', columns='allele', values=pred.scorekey)
f=plt.figure()
ax=f.add_subplot(111)
pd.scatter_matrix(x, alpha=0.2, figsize=(12,12), diagonal='hist',ax=ax)
#plt.tight_layout()
figs.append(f)
return figs
def results():
"""Component to show predictions for all peptides for each predictor """
label = request.vars.label
g = request.vars.genome
tag = request.vars.tag
preds,bcell,cutoffs = getPredictions(label,g,tag)
summary = summaryhtml(preds)
data = {}
for p in preds:
data[p] = preds[p].reshape()
data = dict(data)
return dict(data=data)
def binders():
"""Component for top binder tables"""
label = request.vars.label
g = request.vars.genome
tag = request.vars.tag
n = int(request.vars.n)
preds,bcell,cutoffs = getPredictions(label,g,tag)
summary = summaryhtml(preds)
b = base.getBinders(preds,n=n)
kys = b.keys()
if 'tepitope' in kys and 'netmhciipan' in kys:
shared = pd.merge(b['tepitope'],b['netmhciipan'],
on=['peptide','name','pos','core'],
copy=False).sort('pos')
else:
shared=''
return dict(b=b,summary=summary,shared=shared,n=n)
def showSequence(seq,preds):
"""Get html display of binders on sequences"""
colors = {'tepitope':'#70E2AA','netmhciipan':'orange',
'iedbmhc1':'#9DCEFF','iedbmhc2':'pink','threading':'#BCA9F5'}
l=9 #need to get this from predictors
seqs=[]
tabledata=[]
#idx = ''.join([seq[i] if i%10!=0 else '|' for i in range(len(seq))])
tabledata.append((TR(TH('allele'),TH('sequence'))))
for p in preds:
b = preds[p].getBinders()
clr = colors[p]
#pb = preds[p].getPromsicuousBinders(n=n)
#b = b[b.pos.isin(pb.pos)]
grps = b.groupby('allele')
for a,g in grps:
pos=[]
for i in g.pos: pos.extend(np.arange(i,i+l))
seqhtml=[]
for i in range(len(seq)):
if i in pos:
seqhtml.append(SPAN(seq[i],_style="background-color:%s" %clr))
else:
seqhtml.append(SPAN(seq[i],_style="color: gray"))
tabledata.append((TR(TH(a),TD(*seqhtml))))
table = TABLE(*tabledata,_class="seqtable")
return table
def sequence():
"""Component to highlight epitopes on sequence"""
label = request.vars.label
g = request.vars.genome
tag = request.vars.tag
n = int(request.vars.n)
feat, fastafmt, previous, next = getFeature(g,tag)
if feat==None:
return dict(table=None)
seq = feat.qualifiers['translation'][0]
preds,bcell,c = getPredictions(label,g,tag)
table = showSequence(seq,preds)
return dict(table=table)
def feature():
"""Component showing gene annotation"""
g = request.vars.genome
tag = request.vars.tag
items = getFeature(g,tag)
if items != None:
feat, fastafmt, previous, next = items
return dict(fastafmt=fastafmt,feat=feat,
previous=previous,next=next)
return dict()
def iedb():
"""remote iedb tools predcitions"""
g = request.vars.genome
tag = request.vars.tag
feature, fastafmt, previous, next = getFeature(g,tag)
seq = feature.qualifiers['translation'][0]
df = base.getIEDBRequest(seq)
result = XML(df.to_html(classes='mytable'))
return dict(result=result)
def seqdepot(result):
"""Sedepot data table format"""
#print result
kys = result['t'].keys()
tables = {}
for k in kys:
fieldnames = [TH(i) for i in sd.toolFields(k)]
rows = [TR(i) for i in result['t'][k]]
rows.insert(0,TR(*fieldnames))
tables[k] = TABLE(*rows,_class="tinytable")
fp = os.path.join(request.folder,'static/temp/')
filename = os.path.join(fp,tag+'.png')
sd.saveImage(aseqid, filename, {'format':'png'})
imgurl = IMG(_src=URL(r=request,c='static',f='temp/%s' %os.path.basename(filename)))
links = [LI(A(k,_href="#%s" %k)) for k in tables]
tablinks = UL(*links,_class="small-tab-links")
divs=[DIV(tables[k],_id=k,_class="tab") for k in tables]
content = DIV(*divs,_class="tab-content")
tabbedcontent = DIV(tablinks, content,_class="tabs")
return dict(result=result,seq=seq,imgurl=imgurl,tables=tables,
tabbedcontent=tabbedcontent)
def protein():
"""Display protein info from a fixed URL"""
label = request.args[0]
g = request.args[1]
tag = request.args[2]
n = 3
#print g
if g == 'other':
items = (None,None,'','')
else:
items = getFeature(g,tag)
if items != None:
feature, fastafmt, previous, next = items
else:
raise HTTP(404, "No such feature %s available in genome %s" %(tag,g))
return
result = dict(label=label,tag=tag,genome=g,n=n,
previous=previous,next=next)
return result
@auth.requires_login()
def sequences():
"""Allow user to add fasta sequences instead"""
uploadform = FORM(
TABLE(TR(TD(LABEL('Identifier:',_for='name')),
TD(INPUT(_name='name',_type='string',_required=True))),
TR(TD(LABEL('Fasta file:')),TD(INPUT(_name='fastafile',_type='file'))),
TR(TD(LABEL('Description:',_for='description')),
TD(INPUT(_name='description',_type='string',_required=False,
_style="width:400px;"))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Submit'))),
_class="smalltable"), _id="myform")
if uploadform.accepts(request.vars,formname='upload_form'):
fname = request.vars.fastafile.filename
uploadform.vars.filename = fname
id = db.sequences.insert(name=uploadform.vars.name,
description=uploadform.vars.description,
file=uploadform.vars.fastafile,
filename=uploadform.vars.filename)
db.sequences.id.readable=False
query=((db.sequences.id>0))
default_sort_order=[db.sequences.id]
links=[lambda row: A('browse',_href=URL('fastaview', args=row.name))]
grid = SQLFORM.grid(query=query, orderby=default_sort_order,
create=False, deletable=True, maxtextlength=64, paginate=35,
details=True, csv=False, ondelete=myondelete,
editable=auth.has_membership('editor_group'),links=links)
return dict(grid=grid,form=uploadform)
@auth.requires_login()
def genomes():
"""Display available genomes and allow upload"""
formats = ['genbank']
uploadform = FORM(
TABLE(TR(TD(LABEL('Identifier:',_for='name')),
TD(INPUT(_name='name',_type='string',_required=True))),
TR(TD(LABEL('Format:',_for='format')),
TD(SELECT(formats,_name='format',_type='string',_required=True))),
TR(TD(LABEL('file to upload')),TD(INPUT(_name='gfile',_type='file'))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Submit'))),
_class="smalltable"), _id="myform")
if uploadform.accepts(request.vars,formname='upload_form'):
fname = request.vars.gfile.filename
uploadform.vars.filename = fname
id = db.genomes.insert(name=uploadform.vars.name,
file=uploadform.vars.gfile,
filename=uploadform.vars.filename,
format=uploadform.vars.format)
db.genomes.id.readable=False
query=((db.genomes.id>0))
default_sort_order=[db.genomes.id]
links=[lambda row: A('browse',_href=URL('genomeview', args=row.name))]
grid = SQLFORM.grid(query=query, orderby=default_sort_order,
create=False, deletable=True, maxtextlength=350, paginate=35,
details=True, csv=False, ondelete=myondelete,
editable=auth.has_membership('editor_group'),links=links)
return dict(grid=grid,form=uploadform)
def genomeview():
"""Summary page for genome"""
g = request.args[0]
if len(request.args) == 1:
gfile = getGenome(g)
data = sequtils.genbank2Dataframe(gfile)
summary = sequtils.genbankSummary(data)
data = data[data.type=='CDS']
data = data.drop(['type','pseudo'],1)
#data=data.set_index('locus_tag')
return dict(genome=g,data=data,summary=summary)
else:
return dict()
def fastaview():
"""Summary of fasta contents"""
f = request.args[0]
if len(request.args) == 1:
ffile,desc = getFasta(f)
pd.set_option('max_colwidth', 800)
data = sequtils.fasta2Dataframe(ffile)
return dict(fastafile=f,data=data,desc=desc)
else:
return dict()
@auth.requires_login()
def presets():
"""Preset alleles form"""
uploadform = FORM(
TABLE(TR(TD(LABEL('Name:',_for='name')),
TD(INPUT(_name='name',_type='string',_required=True))),
TR(TD(LABEL('CSV file:')),TD(INPUT(_name='csvfile',_type='file'))),
TR(TD(LABEL('Description:',_for='description')),
TD(INPUT(_name='description',_type='string',_required=False,
_style="width:400px;"))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Submit'))),
_class="smalltable"), _id="myform")
if uploadform.accepts(request.vars,formname='upload_form'):
fname = request.vars.csvfile.filename
uploadform.vars.filename = fname
id = db.allelepresets.insert(name=uploadform.vars.name,
description=uploadform.vars.description,
file=uploadform.vars.csvfile,
filename=uploadform.vars.filename)
db.allelepresets.id.readable=False
query=((db.allelepresets.id>0))
default_sort_order=[db.allelepresets.id]
#links=[lambda row: A('browse',_href=URL('fastaview', args=row.name))]
grid = SQLFORM.grid(query=query, orderby=default_sort_order,
create=False, deletable=True, maxtextlength=64, paginate=35,
details=True, csv=False, ondelete=myondelete,
editable=auth.has_membership('editor_group'))#,links=links)
return dict(grid=grid,form=uploadform)
@auth.requires_login()
def predictions():
"""Parse results folder to show the actual data existing on file system
might not sync with the results ids."""
vals=[]
for root, subdirs, files in os.walk(datapath):
if not subdirs:
p1,method = os.path.split(root)
p2,genome = os.path.split(p1)
predid = os.path.basename(p2)
#print method,genome,predid
vals.append((predid, genome, method, len(files)))
df = pd.DataFrame(vals,columns=['identifier','genome','method','sequences'])
#df = df.set_index('pred. id')
db.predictions.id.readable=False
query=((db.predictions.id>0))
default_sort_order=[db.predictions.id]
grid = SQLFORM.grid(query=query, orderby=default_sort_order,
create=True, maxtextlength=350, #deletable=True,
paginate=20,details=True, csv=False, #ondelete=myondelete,
deletable=auth.has_membership('editor_group'),
editable=auth.has_membership('editor_group'))
return dict(results=df, grid=grid)
def myondelete(table, id):
form = FORM.confirm('Are you sure?')
print form
if form.accepted:
response.flash = "I don't like your submission"
print table, id
#db(db.predictions.id==id).delete()
return form
def summaryhtml(predictors):
"""Summary table of predictions"""
rows=[]
rows.append(TR(TH('name'),TH('cutoff'),TH('binders')))
for p in predictors:
pred=predictors[p]
b = pred.getPromiscuousBinders(n=2)
rows.append(TR(pred.name, pred.cutoff, len(b)))
return TABLE(*rows,_class='tinytable')
def download():
import StringIO
label = request.args[0]
g = request.args[1]
t = request.args[2]
preds,bcell,c = getPredictions(label,g,t)
data = [preds[p].data for p in preds]
df = pd.concat(data)
output = StringIO.StringIO()
df.to_csv(output,float_format='%.2f')
csvdata = output.getvalue()
return dict(csvdata=csvdata)
def clusterResults():
"""Cluster results"""
results = {}
files = ['topclusters_MTB-H37Rv.csv','topsinglebinders.csv']
for f in files:
f = os.path.join(datapath, f)
r = pd.read_csv(f, index_col=0)
r.reset_index(inplace=True,drop=True)
r.sort('name',inplace=True)
results[f] = r
return dict(results=results)
def quicksearch():
"""Non DB search just using paths"""
form = SQLFORM.factory(
Field('label',requires=IS_IN_DB(db, 'predictions.identifier',zero=None,
multiple=False),default=1,label='id'),
Field('genome',requires=IS_IN_DB(db, 'genomes.name', zero=None,
multiple=False),default=1,label='genome'),
Field('tag', 'string', label='locus tag',default='',length=10),
Field('gene', 'string', label='gene',default='',length=10),
hidden=dict(width=550,height=250,n=2),
formstyle="table3cols",_id='myform')
form.element('input[name=tag]')['_style'] = 'width:210px;'
form.element('input[name=gene]')['_style'] = 'width:210px;'
return form
def selectionForm():
"""Quick view form"""
form = SQLFORM.factory(
Field('label',requires=IS_IN_DB(db, 'predictions.identifier',zero=None,
multiple=False),default=1,label='id'),
Field('genome',requires=IS_IN_DB(db, 'genomes.name', zero=None,
multiple=False),default=1,label='genome'),
Field('tag', 'string', label='locus tag',default=''),
Field('gene', 'string', label='gene',default=''),
Field('n', 'string', label='min alleles',default=3),
Field('globalcutoff', 'boolean', label='global cutoff',default=True),
Field('perccutoff', 'string', label='perc. cutoff',default=.96),
Field('annotation', 'boolean', label='annotation',default=False),
submit_button="Update",
formstyle='table3cols',_id='myform',_class='myform')
form.element('select[name=genome]').insert(0,'other') #always add other
form.element('input[name=n]')['_style'] = 'width:50px;'
form.element('input[name=perccutoff]')['_style'] = 'width:50px;'
#form.element('input[name=scorecutoff]')['_style'] = 'width:50px;'
form.element('input[name=tag]')['_style'] = 'width:130px;'
form.element('input[name=gene]')['_style'] = 'width:130px;'
return form
@auth.requires_login()
def quickview():
"""Quickview"""
defaultid = 'results_bovine'
form = selectionForm()
searchform = findForm()
return dict(label=defaultid,form=form,searchform=searchform)
def show():
"""Quickview all results in one - faster"""
print request.vars
label = request.vars.label
g = request.vars.genome
tag = request.vars.tag
n = int(request.vars.n)
cutoff = float(request.vars.perccutoff)
gene = request.vars.gene
title=None
if gene != '':
t = getTagbyGene(g,gene)
if t != None:
tag = t
title = tag+' / '+gene
if request.vars.perccutoff == None:
cutoff = 0.96
else:
cutoff = float(request.vars.perccutoff)
if request.vars.width == None:
width = 820
else:
width = int(request.vars.width)
annot = request.vars.annotation
if label == 'dummy':
figure = plotEmpty()
preds,bcell,cutoffs = getPredictions(label,g,tag,cutoff)
if len(preds) == 0:
redirect(URL('error'))
if g == 'other':
#no genome stuff
feat = None; fastafmt=''; previous=''; next=''
seq = '' #get the fasta seq
sd=None
else:
feat = None; fastafmt=None
feat, fastafmt, previous, next = getFeature(g,tag)
seq = feat['translation']
sd=None
if request.vars.annotation == 'on':
sd = getSeqDepot(seq)['t']
script, div = plotTracks(preds,tag,n=n,title=title,width=width,seqdepot=sd,bcell=bcell)
#distplots = scoredistplots(preds)
summary = summaryhtml(preds)
#get all results into tables
data = {}
for p in preds:
data[p] = preds[p].reshape()
data = dict(data)
#top binders
b = base.getBinders(preds,n=n)
kys = b.keys()
if 'tepitope' in kys and 'netmhciipan' in kys:
shared = pd.merge(b['tepitope'],b['netmhciipan'],
on=['peptide','name','pos','core'],
copy=False).sort('pos')
else:
shared=''
seqtable = showSequence(seq,preds)
#info
path = os.path.join(datapath, label)
found = [(m,preds[m].getLength()) for m in preds]
info = TABLE(*found,_class='tinytable')
return dict(script=script,div=div,feat=feat,fastafmt=fastafmt,data=data,
b=b,summary=summary,shared=shared,n=n,seqtable=seqtable,cutoffs=cutoffs,
genome=g,tag=tag,label=label,info=info,path=path)
def error():
return dict()
def formerror():
msg = request.vars.msg
return dict(msg=msg)
@auth.requires_login()
def genomeanalysis():
"""Genome wide analysis of epitope predictions"""
defaultid = 'results_test'
predids = [p.identifier for p in db().select(db.predictions.ALL)]
opts1 = [OPTION(i,value=i) for i in predids]
genomes = [p.name for p in db().select(db.genomes.ALL)]
opts2 = [OPTION(i,value=i) for i in genomes]
form = FORM(TABLE(
TR(TD(LABEL('id:',_for='genome')),
TD(SELECT(*opts1,_name='label',
value=defaultid, _style="width:150px;"))),
TR(TD(LABEL('genome:',_for='genome')),
TD(SELECT(*opts2,_name='genome',value='',_style="width:150px;"))),
TR(TD(LABEL('method:',_for='method')),
TD(SELECT(*methods,_name='method',value='tepitope',_style="width:150px;"))),
TR(TD(LABEL('min alleles:',_for='n')),
TD(INPUT(_name='n',_type='text',value=3,_style="width:50px;"))),
TR(TD(LABEL('perc cutoff:',_for='perccutoff')),
TD(INPUT(_name='perccutoff',_type='text',value='0.96',_style="width:50px;"))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Analyse'))),
_class="smalltable"), _id="myform")
return dict(form=form)
@auth.requires_login()
def analysegenome():
"""Analyse genome predictions"""
pd.set_option('max_colwidth', 800)
gname = request.vars.genome
label = request.vars.label
method = request.vars.method
if request.vars.n != None:
n = int(request.vars.n)
else:
n = 3
if request.vars.perccutoff != None:
cutoff = float(request.vars.perccutoff)
else:
cutoff = 0.96
b,res,top,cl,fig = genomeAnalysis(label, gname, method, n, cutoff)
#plothtml = mpld3Plot(fig)
plothtml=''
link = A('download binder list',_href=URL('default','analysegenome.csv',extension='',vars=request.vars))
summary = 'Found %s binders in >=%s alleles from %s proteins' %(len(b),n,len(res))
return dict(genome=gname,method=method,cutoff=cutoff,res=res,top=top,cl=cl,
summary=summary, link=link, plothtml=plothtml)
def zip_dataframes(data, filename):
"""Zip dataframes as csv"""
'''import cStringIO, zipfile
stream = cStringIO.StringIO()
zip_file = zipfile.ZipFile(stream, "w", zipfile.ZIP_DEFLATED, False)
for df in data:
zip_file.writestr(filename, df.to_csv(None, encoding='utf-8', index=False))'''
return
def compare():
"""Correlate predictions from 2 methods"""
form = SQLFORM.factory(
Field('label',requires=IS_IN_DB(db, 'predictions.identifier',zero=None,
multiple=False),default=1,label='id'),
Field('genome',requires=IS_IN_DB(db, 'genomes.name', zero=None,
multiple=False),default=1,label='genome'),
Field('method1',requires=IS_IN_SET(methods,multiple=False,zero=None),label='method 1'),
Field('method2',requires=IS_IN_SET(methods,multiple=False,zero=None),label='method 2'),
Field('n', 'string', label='min alleles',default=3),
hidden=dict(perccutoff=.98),
formstyle="table3cols",_id='myform',_class='myform')
form.element('input[name=n]')['_style'] = 'width:50px;'
return dict(form=form)
def correlationanalysis():
fig=''
msg=None
if request.vars.method1 == request.vars.method2:
return dict(res=None,msg='2 methods are the same!')
print request.vars
res = correlation(**request.vars)
if res is None:
msg = 'no such predictions'
fig = plotCorrelation(res)
return dict(fig=fig,res=res,msg=msg)
def plotCorrelation(res):
from bokeh.models import HoverTool,ColumnDataSource
from bokeh.plotting import Figure
width=600
height=600
plot = Figure(title='',title_text_font_size="11pt",
plot_width=width, plot_height=height,
x_axis_label='method1',y_axis_label='method2',
tools="pan, wheel_zoom, resize, hover, reset, save",
background_fill="#FAFAFA")
x=res['perc_x']
y=res['perc_y']
source = ColumnDataSource(data=dict(x=x,y=y, protein=res.locus_tag))
plot.circle(x,y, color='blue', line_color='gray',fill_alpha=0.5, size=10, source=source)
hover = plot.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("binders1", "@x"),
("binders2", "@y"),
("protein", "@protein"),
])
js,html = embedPlot(plot)
return html
def conservationAnalysisForm(defaultid='test'):
defaultg = 'MTB-H37Rv'
predids = [p.identifier for p in db().select(db.predictions.ALL)]
opts1 = [OPTION(i,value=i) for i in predids]
genomes = [p.name for p in db().select(db.genomes.ALL)]
genomes.insert(0,'other')
opts2 = [OPTION(i,value=i) for i in genomes]
form = FORM(TABLE(
TR(TD(LABEL('id:',_for='genome')),
TD(SELECT(*opts1,_name='label',
value=defaultid, _style="width:150px;"))),
TR(TD(LABEL('genome:',_for='genome')),
TD(SELECT(*opts2,_name='genome',value=defaultg,_style="width:150px;"))),
TR(TD(LABEL('locus tag:',_for='tag')),
TD(INPUT(_name='tag',_type='text',value="Rv0001",_style="width:150px;"))),
TR(TD(LABEL('method:',_for='method')),
TD(SELECT(*methods,_name='method',value='tepitope',_style="width:150px;"))),
TR(TD(LABEL('min alleles:',_for='n')),
TD(INPUT(_name='n',_type='text',value=3,_style="width:50px;"))),
TR(TD(LABEL('min identity:',_for='identity')),
TD(INPUT(_name='identity',value=70,_style="width:50px;"))),
TR(TD(),TD('BLAST options')),
TR(TD(LABEL('entrez query:',_for='entrezquery')),
TD(TEXTAREA(_name='entrezquery',value='',_style="height:100px;width:150px;"))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Submit'))),
_class="smalltable"), _id="myform", hidden=dict(width=850))
return form
@auth.requires_login()
def conservation():
"""Analysis of epitope conservation"""
form = conservationAnalysisForm()
'''if form.process().accepted:
session.flash = 'form accepted'
pvars = {'seq':seq,'hitlist_size':400,'equery':equery}
task = scheduler.queue_task('doTask', #pvars=request.vars,
immediate=True, timeout=300)
print task.id
status = scheduler.task_status(task.id, output=True)
result = status.result
print status'''
return dict(form=form)
@auth.requires_login()
def conservationanalysis():
"""Analysis of epitope conservation"""
pd.set_option('max_colwidth', 3000)
label = request.vars.label
gname = request.vars.genome
method = request.vars.method
n=int(request.vars.n)
tag = request.vars.tag
identity = int(request.vars.identity)
equery = request.vars.entrezquery
retval = conservationAnalysis(**request.vars)
msg=''
if retval == 1:
msg = 'No predictions found for %s with method %s with n=%s.' %(tag,method,n)
return dict(res=None,msg=msg)
elif retval == 2:
msg = 'No BLAST results at >%s%% sequence identity.' %identity
return dict(res=None,msg=msg)
else:
res, alnrows, summary, fig = retval
alnrows = analysis.getAlignedBlastResults(alnrows)
alnrows = analysis.setBlastLink(alnrows)
plothtml = mpld3Plot(fig)
url = A('direct link to these results', _href=URL('default','conservationanalysis.load',
vars={'label':label,'genome':gname,'tag':tag,'method':method,'n':n,
'identity':identity,'equery':equery},extension=''))
return dict(res=res,alnrows=alnrows,summary=summary,plothtml=plothtml,
msg=msg,permlink=url)
def submissionForm():
"""Form for job submission"""
applySettings() #so that paths to predictors work
predids = [p.identifier for p in db().select(db.predictions.ALL)]
opts1 = [OPTION(i,value=i) for i in predids]
genomes = [p.name for p in db().select(db.genomes.ALL)]
genomes.insert(0,'')
opts2 = [OPTION(i,value=i) for i in genomes]
seqs = [p.name for p in db().select(db.sequences.ALL)]
seqs.insert(0,'')
opts3 = [OPTION(i,value=i) for i in seqs]
p1 = base.getPredictor('iedbmhc1')
mhc1alleles = p1.getMHCIList()
p2 = base.getPredictor('netmhciipan')
mhc2alleles = p2.getAlleleList()
drballeles = base.getDRBList(mhc2alleles)
dqpalleles = base.getDQPList(mhc2alleles)
tepitopealleles = tepitope.getAlleles()
#get all possible alleles for both MHCII methods
drballeles = sorted(list(set(drballeles+tepitopealleles)))
lengths = [9,11,13,15]
#presets = presetalleles.keys()
presets = [p.name for p in db().select(db.allelepresets.ALL)]
presets.insert(0,'')
user = session.auth.user['first_name']
form = FORM(DIV(
TABLE(
TR(TD(LABEL('current labels:',_for='genome')),
TD(SELECT(*opts1,_name='label',
value='', _style="width:200px;"))),
TR(TD(LABEL('OR new label:',_for='genome')),
TD(INPUT(_name='newlabel',_type='text',value="",_style="width:200px;"))),
TR(TD(LABEL('genome:',_for='genome')),
TD(SELECT(*opts2,_name='genome',value='',_style="width:200px;"))),
TR(TD(LABEL('locus tags:',_for='names')),
TD(INPUT(_name='names',_type='text',value="",_style="width:200px;"))),
TR(TD(LABEL('fasta seqs:',_for='fasta')),
TD(SELECT(*opts3,_name='fasta',value='',_style="width:200px;"))),
TR(TD(LABEL('methods:',_for='methods')),
TD(SELECT(*methods,_name='methods',value='tepitope',_size=4,_style="width:200px;",
_multiple=True))),
TR(TD(LABEL('mhc1 method:',_for='iedbmethod')),
TD(SELECT(*iedbmethods,_name='iedbmethod',value='IEDB_recommended',_size=1,
_style="width:200px;"))),
TR(TD(LABEL('bcell method:',_for='bcellmethod')),
TD(SELECT(*bcellmethods,_name='bcellmethod',value='Bepipred',_size=1,
_style="width:200px;"))),
TR(TD(LABEL('length:',_for='length')),
TD(SELECT(*lengths,_name='length',value=11,_size=1,_style="width:70px;"))),
TR(TD(),TD(INPUT(_name='submit',_type='submit',_value='Submit Job'))),
_class="smalltable"),_style='float: left'),
DIV(TABLE(
TR(TD(LABEL('MHC-I alleles:',_for='alleles')),
TD(SELECT(*mhc1alleles,_name='mhc1alleles',value='HLA-A*01:01-10',_size=6,_style="width:200px;",
_multiple=True))),
TR(TD(LABEL('MHC-II DRB:',_for='alleles')),
TD(SELECT(*drballeles,_name='drballeles',value='HLA-DRB1*0101',_size=8,_style="width:200px;",
_multiple=True))),
TR(TD(LABEL('MHC-II DQ/P:',_for='alleles')),
TD(SELECT(*dqpalleles,_name='dqpalleles',value='',_size=6,_style="width:200px;",
_multiple=True))),
TR(TD(LABEL('OR Use Preset:',_for='preset')),
TD(SELECT(*presets,_name='preset',value="",_style="width:200px;"))),
_class="smalltable"),_style='float: left'),
_id="myform", hidden=dict(user=user))
return form
@auth.requires_login()
def submit():
"""Process job for submission and queue job"""
form = submissionForm()
if form.process().accepted:
if form.vars.genome == '' and form.vars.fasta == '':
msg = 'provide a genome OR a sequence'
redirect(URL('formerror',vars={'msg':msg}))
session.flash = 'form accepted'
task = scheduler.queue_task('runPredictors', pvars=request.vars,
immediate=True, timeout=259200)
redirect(URL('jobsubmitted', vars={'id':task.id}))
elif form.errors:
response.flash = 'form has errors'
return dict(form=form)
@auth.requires_login()
def jobsubmitted():
"""Get details of a submitted job"""
taskid = int(request.vars['id'])
status = scheduler.task_status(taskid, output=True)
return dict(taskid=taskid,status=status)
def findForm():
"""Find form"""
result={}
form = SQLFORM.factory(
Field('genome',requires=IS_IN_DB(db, 'genomes.name', zero=None,
multiple=False),default=1,label='genome'),
Field('gene', 'string', label='gene',default=''),
Field('description', 'string', label='description',default=''),
submit_button="Search",
_id='findform',_class='myform')
form.element('input[name=gene]')['_style'] = 'height:30px;'
form.element('input[name=description]')['_style'] = 'height:30px;'
return form
def search():
"""Search page"""
form = findForm()
return dict(form=form)
def find():
"""Show search results"""
msg = T(" ")
results=pd.DataFrame()
pd.set_option('display.max_colwidth', -1)
gene = request.vars.gene
desc = request.vars.description
genome = request.vars.genome
results = doSearch(genome, gene, desc)
msg = 'found %s proteins' %len(results)
#lst = list(results.index)
link = A('download results',_href=URL('default','find.csv',extension='',vars=request.vars))
return dict(msg=msg,link=link,results=results)
def iedbForm():
dbs = ['iedb','hpv','imma2','hiv_frahm','tcga','tantigen']
types = ['mhc','tcell']
form = SQLFORM.factory(
Field('database', requires=IS_IN_SET(dbs,multiple=False,zero=None),label='database'),
Field('type', requires=IS_IN_SET(types,multiple=False,zero=None),label='type'),
Field('mhc_class', requires=IS_IN_SET([1,2],multiple=False,zero=None), label='mhc_class',default=2),
Field('epitope', 'string', label='epitope'),
submit_button="Search",
_id='iedbform',_class='iedbform')
return form
def datasourcesearch():
"""search IEDB page"""
form = iedbForm()
return dict(form=form)
def datasource():
"""Use pepdata to fetch and search IEDB epitopes"""
print request.vars
db = request.vars.database
epitope = request.vars.epitope
from pepdata import iedb, hpv, imma2, hiv_frahm, tcga, tantigen
if db == 'iedb':
df = iedb.mhc.load_dataframe(mhc_class=2,human=False)
df.columns = df.columns.get_level_values(1)
df = df[df.columns[5:18]]
#df = iedb.tcell.load_dataframe()
#if epitope != '':
# df = df[df['Description'].str.contains(epitope)]
#print df
elif db == 'hpv':
df = hpv.load_mhc()
#df = hpv.load_tcell()
elif db == 'IMMA2':
df, non = imma2.load_classes()
elif db == 'hiv_frahm':
df = hiv_frahm.load_dataframe()
elif db == 'tcga':
df = tcga.load_dataframe(cancer_type='paad')
df = df[:50]
elif db == 'tantigen':
df = tantigen.load_mhc()
#df = tantigen.load_tcell()
if len(df) > 5000:
df = df[:5000]
print df
return dict(results=df)
@auth.requires_login()
def test():
l='human' #'results_emida'
g='MTB-H37Rv'
tag='Rv3874'
feat, fastafmt, previous, next = getFeature(g,tag)
seq = feat['translation']
preds,bcell,c = getPredictions(l,g,tag)
exp = pd.read_csv(os.path.join(home, 'epitopedata/cfp10_regions.csv'))
exp = exp[exp.mean_sfc>0.0]
plot,figure = plotTracks(preds,tag,n=3,title='test',exp=exp)
return dict(figure=figure,exp=exp)
def plotExp(plot, data):
x = data.pos
y = data.mean_sfc
w = 15
h=40
x = [i+w/2.0 for i in x]
y = y+abs(min(y))
y = y*(h/max(y))+3
#plot.line(x, y, line_color="red", line_width=3, alpha=0.6,legend='exp')
plot.rect(x=x, y=1, width=w, height=y, color="blue", alpha=0.3)
return
def bokehtest():
"""Bokeh test"""
from bokeh.models import Range1d, HoverTool, GridPlot, ColumnDataSource
from bokeh.plotting import Figure
#from bokeh.layouts import gridplot
N = 100
x = np.random.random(size=N) * 100
y = np.random.random(size=N) * 100
radii = np.random.random(size=N) * 3
colors = ["#%02x%02x%02x" % (r, g, 150) for r, g in zip(np.floor(50+2*x), np.floor(30+2*y))]
source = ColumnDataSource(data=dict(x=x,y=y,radius=radii))
def makeplot():
p = Figure(plot_width=800, plot_height=200,tools="hover,pan",title=None)
p.scatter(x, y, radius=radii,
fill_color=colors, fill_alpha=0.6,
line_color='gray', source=source)
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
("radius", "@radius")])
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
return p
p1 = makeplot()
p2 = makeplot()
p3 = makeplot()
p = GridPlot(children=[[p1],[p2],[p3]])
#js,html = embedPlot(p)
script, div = embedPlot(p)
return dict(div=div,script=script)
@auth.requires_login()
def admin():
"""Settings"""
parser,conffile = getConfig()
options = dict(parser.items('base'))
form = SQLFORM.dictform(options)
if form.process().accepted:
for i in dict(parser.items('base')):
print i
parser.set('base', i, form.vars[i])
parser.write(open(conffile,'w'))
response.flash='Saved'
redirect(URL('default','admin'))
return dict(form=form)
def about():
msg = 'About this page'
#fp = os.path.join(request.folder,'static/docs','about.txt')
return dict(msg=msg)
def citation():
return dict()
def help():
msg = T('')
return dict(msg=msg)
| dmnfarrell/epitopemap | controllers/default.py | Python | apache-2.0 | 49,804 |
package apimanagement
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"net/http"
)
// LoggersClient is the use these REST APIs for performing operations on
// entities like API, Product, and Subscription associated with your Azure API
// Management deployment.
type LoggersClient struct {
ManagementClient
}
// NewLoggersClient creates an instance of the LoggersClient client.
func NewLoggersClient(subscriptionID string) LoggersClient {
return NewLoggersClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewLoggersClientWithBaseURI creates an instance of the LoggersClient client.
func NewLoggersClientWithBaseURI(baseURI string, subscriptionID string) LoggersClient {
return LoggersClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or Updates a logger.
//
// resourceGroupName is the name of the resource group. serviceName is the name
// of the API Management service. loggerid is identifier of the logger.
// parameters is create parameters.
func (client LoggersClient) CreateOrUpdate(resourceGroupName string, serviceName string, loggerid string, parameters LoggerCreateParameters) (result autorest.Response, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}},
{TargetValue: loggerid,
Constraints: []validation.Constraint{{Target: "loggerid", Name: validation.MaxLength, Rule: 256, Chain: nil},
{Target: "loggerid", Name: validation.Pattern, Rule: `^[^*#&+:<>?]+$`, Chain: nil}}},
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.Type", Name: validation.Null, Rule: true, Chain: nil},
{Target: "parameters.Credentials", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewErrorWithValidationError(err, "apimanagement.LoggersClient", "CreateOrUpdate")
}
req, err := client.CreateOrUpdatePreparer(resourceGroupName, serviceName, loggerid, parameters)
if err != nil {
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "CreateOrUpdate", nil, "Failure preparing request")
}
resp, err := client.CreateOrUpdateSender(req)
if err != nil {
result.Response = resp
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "CreateOrUpdate", resp, "Failure sending request")
}
result, err = client.CreateOrUpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "CreateOrUpdate", resp, "Failure responding to request")
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client LoggersClient) CreateOrUpdatePreparer(resourceGroupName string, serviceName string, loggerid string, parameters LoggerCreateParameters) (*http.Request, error) {
pathParameters := map[string]interface{}{
"loggerid": autorest.Encode("path", loggerid),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/loggers/{loggerid}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client LoggersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client LoggersClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Delete deletes the specified logger.
//
// resourceGroupName is the name of the resource group. serviceName is the name
// of the API Management service. loggerid is identifier of the logger. ifMatch
// is the entity state (Etag) version of the logger to delete. A value of "*"
// can be used for If-Match to unconditionally apply the operation.
func (client LoggersClient) Delete(resourceGroupName string, serviceName string, loggerid string, ifMatch string) (result autorest.Response, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}}}); err != nil {
return result, validation.NewErrorWithValidationError(err, "apimanagement.LoggersClient", "Delete")
}
req, err := client.DeletePreparer(resourceGroupName, serviceName, loggerid, ifMatch)
if err != nil {
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Delete", nil, "Failure preparing request")
}
resp, err := client.DeleteSender(req)
if err != nil {
result.Response = resp
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Delete", resp, "Failure sending request")
}
result, err = client.DeleteResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Delete", resp, "Failure responding to request")
}
return
}
// DeletePreparer prepares the Delete request.
func (client LoggersClient) DeletePreparer(resourceGroupName string, serviceName string, loggerid string, ifMatch string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"loggerid": autorest.Encode("path", loggerid),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/loggers/{loggerid}", pathParameters),
autorest.WithQueryParameters(queryParameters),
autorest.WithHeader("If-Match", autorest.String(ifMatch)))
return preparer.Prepare(&http.Request{})
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client LoggersClient) DeleteSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client LoggersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets the details of the logger specified by its identifier.
//
// resourceGroupName is the name of the resource group. serviceName is the name
// of the API Management service. loggerid is identifier of the logger.
func (client LoggersClient) Get(resourceGroupName string, serviceName string, loggerid string) (result LoggerResponse, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}}}); err != nil {
return result, validation.NewErrorWithValidationError(err, "apimanagement.LoggersClient", "Get")
}
req, err := client.GetPreparer(resourceGroupName, serviceName, loggerid)
if err != nil {
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Get", nil, "Failure preparing request")
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Get", resp, "Failure sending request")
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client LoggersClient) GetPreparer(resourceGroupName string, serviceName string, loggerid string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"loggerid": autorest.Encode("path", loggerid),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/loggers/{loggerid}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client LoggersClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client LoggersClient) GetResponder(resp *http.Response) (result LoggerResponse, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByService lists a collection of loggers in the specified service
// instance.
//
// resourceGroupName is the name of the resource group. serviceName is the name
// of the API Management service. filter is | Field | Supported operators |
// Supported functions |
// |-------|------------------------|---------------------------------------------|
// | id | ge, le, eq, ne, gt, lt | substringof, contains, startswith,
// endswith |
// | type | eq |
// | top is number of records to return. skip is number of records to skip.
func (client LoggersClient) ListByService(resourceGroupName string, serviceName string, filter string, top *int32, skip *int32) (result LoggerCollection, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}},
{TargetValue: top,
Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMinimum, Rule: 1, Chain: nil}}}}},
{TargetValue: skip,
Constraints: []validation.Constraint{{Target: "skip", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "skip", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}}}}}}); err != nil {
return result, validation.NewErrorWithValidationError(err, "apimanagement.LoggersClient", "ListByService")
}
req, err := client.ListByServicePreparer(resourceGroupName, serviceName, filter, top, skip)
if err != nil {
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "ListByService", nil, "Failure preparing request")
}
resp, err := client.ListByServiceSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "ListByService", resp, "Failure sending request")
}
result, err = client.ListByServiceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "ListByService", resp, "Failure responding to request")
}
return
}
// ListByServicePreparer prepares the ListByService request.
func (client LoggersClient) ListByServicePreparer(resourceGroupName string, serviceName string, filter string, top *int32, skip *int32) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
if len(filter) > 0 {
queryParameters["$filter"] = autorest.Encode("query", filter)
}
if top != nil {
queryParameters["$top"] = autorest.Encode("query", *top)
}
if skip != nil {
queryParameters["$skip"] = autorest.Encode("query", *skip)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/loggers", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListByServiceSender sends the ListByService request. The method will close the
// http.Response Body if it receives an error.
func (client LoggersClient) ListByServiceSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// ListByServiceResponder handles the response to the ListByService request. The method always
// closes the http.Response Body.
func (client LoggersClient) ListByServiceResponder(resp *http.Response) (result LoggerCollection, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByServiceNextResults retrieves the next set of results, if any.
func (client LoggersClient) ListByServiceNextResults(lastResults LoggerCollection) (result LoggerCollection, err error) {
req, err := lastResults.LoggerCollectionPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "ListByService", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByServiceSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "ListByService", resp, "Failure sending next results request")
}
result, err = client.ListByServiceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "ListByService", resp, "Failure responding to next results request")
}
return
}
// Update updates an existing logger.
//
// resourceGroupName is the name of the resource group. serviceName is the name
// of the API Management service. loggerid is identifier of the logger.
// parameters is update parameters. ifMatch is the entity state (Etag) version
// of the logger to update. A value of "*" can be used for If-Match to
// unconditionally apply the operation.
func (client LoggersClient) Update(resourceGroupName string, serviceName string, loggerid string, parameters LoggerUpdateParameters, ifMatch string) (result autorest.Response, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: serviceName,
Constraints: []validation.Constraint{{Target: "serviceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
{Target: "serviceName", Name: validation.MinLength, Rule: 1, Chain: nil},
{Target: "serviceName", Name: validation.Pattern, Rule: `^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$`, Chain: nil}}}}); err != nil {
return result, validation.NewErrorWithValidationError(err, "apimanagement.LoggersClient", "Update")
}
req, err := client.UpdatePreparer(resourceGroupName, serviceName, loggerid, parameters, ifMatch)
if err != nil {
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Update", nil, "Failure preparing request")
}
resp, err := client.UpdateSender(req)
if err != nil {
result.Response = resp
return result, autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Update", resp, "Failure sending request")
}
result, err = client.UpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "apimanagement.LoggersClient", "Update", resp, "Failure responding to request")
}
return
}
// UpdatePreparer prepares the Update request.
func (client LoggersClient) UpdatePreparer(resourceGroupName string, serviceName string, loggerid string, parameters LoggerUpdateParameters, ifMatch string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"loggerid": autorest.Encode("path", loggerid),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"serviceName": autorest.Encode("path", serviceName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsPatch(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/loggers/{loggerid}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters),
autorest.WithHeader("If-Match", autorest.String(ifMatch)))
return preparer.Prepare(&http.Request{})
}
// UpdateSender sends the Update request. The method will close the
// http.Response Body if it receives an error.
func (client LoggersClient) UpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// UpdateResponder handles the response to the Update request. The method always
// closes the http.Response Body.
func (client LoggersClient) UpdateResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
autorest.ByClosing())
result.Response = resp
return
}
| stubey/azure-sdk-for-go | arm/apimanagement/loggers.go | GO | apache-2.0 | 20,774 |
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"sync"
"time"
"github.com/prometheus/common/log"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/util/httputil"
"github.com/prometheus/prometheus/util/strutil"
)
const (
sourceServicePrefix = "services"
// kubernetesMetaLabelPrefix is the meta prefix used for all meta labels.
// in this discovery.
metaLabelPrefix = model.MetaLabelPrefix + "kubernetes_"
// serviceNamespaceLabel is the name for the label containing a target's service namespace.
serviceNamespaceLabel = metaLabelPrefix + "service_namespace"
// serviceNameLabel is the name for the label containing a target's service name.
serviceNameLabel = metaLabelPrefix + "service_name"
// nodeLabelPrefix is the prefix for the node labels.
nodeLabelPrefix = metaLabelPrefix + "node_label_"
// serviceLabelPrefix is the prefix for the service labels.
serviceLabelPrefix = metaLabelPrefix + "service_label_"
// serviceAnnotationPrefix is the prefix for the service annotations.
serviceAnnotationPrefix = metaLabelPrefix + "service_annotation_"
// nodesTargetGroupName is the name given to the target group for nodes.
nodesTargetGroupName = "nodes"
// apiServersTargetGroupName is the name given to the target group for API servers.
apiServersTargetGroupName = "apiServers"
// roleLabel is the name for the label containing a target's role.
roleLabel = metaLabelPrefix + "role"
serviceAccountToken = "/var/run/secrets/kubernetes.io/serviceaccount/token"
serviceAccountCACert = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
apiVersion = "v1"
apiPrefix = "/api/" + apiVersion
nodesURL = apiPrefix + "/nodes"
servicesURL = apiPrefix + "/services"
endpointsURL = apiPrefix + "/endpoints"
serviceEndpointsURL = apiPrefix + "/namespaces/%s/endpoints/%s"
)
// Discovery implements a TargetProvider for Kubernetes services.
type Discovery struct {
client *http.Client
Conf *config.KubernetesSDConfig
apiServers []config.URL
apiServersMu sync.RWMutex
nodes map[string]*Node
services map[string]map[string]*Service
nodesMu sync.RWMutex
servicesMu sync.RWMutex
runDone chan struct{}
}
// Initialize sets up the discovery for usage.
func (kd *Discovery) Initialize() error {
client, err := newKubernetesHTTPClient(kd.Conf)
if err != nil {
return err
}
kd.apiServers = kd.Conf.APIServers
kd.client = client
kd.runDone = make(chan struct{})
return nil
}
// Sources implements the TargetProvider interface.
func (kd *Discovery) Sources() []string {
sourceNames := make([]string, 0, len(kd.apiServers))
for _, apiServer := range kd.apiServers {
sourceNames = append(sourceNames, apiServersTargetGroupName+":"+apiServer.Host)
}
nodes, _, err := kd.getNodes()
if err != nil {
// If we can't list nodes then we can't watch them. Assume this is a misconfiguration
// & log & return empty.
log.Errorf("Unable to initialize Kubernetes nodes: %s", err)
return []string{}
}
sourceNames = append(sourceNames, kd.nodeSources(nodes)...)
services, _, err := kd.getServices()
if err != nil {
// If we can't list services then we can't watch them. Assume this is a misconfiguration
// & log & return empty.
log.Errorf("Unable to initialize Kubernetes services: %s", err)
return []string{}
}
sourceNames = append(sourceNames, kd.serviceSources(services)...)
return sourceNames
}
func (kd *Discovery) nodeSources(nodes map[string]*Node) []string {
var sourceNames []string
for name := range nodes {
sourceNames = append(sourceNames, nodesTargetGroupName+":"+name)
}
return sourceNames
}
func (kd *Discovery) serviceSources(services map[string]map[string]*Service) []string {
var sourceNames []string
for _, ns := range services {
for _, service := range ns {
sourceNames = append(sourceNames, serviceSource(service))
}
}
return sourceNames
}
// Run implements the TargetProvider interface.
func (kd *Discovery) Run(ch chan<- config.TargetGroup, done <-chan struct{}) {
defer close(ch)
if tg := kd.updateAPIServersTargetGroup(); tg != nil {
select {
case ch <- *tg:
case <-done:
return
}
}
retryInterval := time.Duration(kd.Conf.RetryInterval)
update := make(chan interface{}, 10)
go kd.watchNodes(update, done, retryInterval)
go kd.startServiceWatch(update, done, retryInterval)
var tg *config.TargetGroup
for {
select {
case <-done:
return
case event := <-update:
switch obj := event.(type) {
case *nodeEvent:
kd.updateNode(obj.Node, obj.EventType)
tg = kd.updateNodesTargetGroup()
case *serviceEvent:
tg = kd.updateService(obj.Service, obj.EventType)
case *endpointsEvent:
tg = kd.updateServiceEndpoints(obj.Endpoints, obj.EventType)
}
}
if tg == nil {
continue
}
select {
case ch <- *tg:
case <-done:
return
}
}
}
func (kd *Discovery) queryAPIServerPath(path string) (*http.Response, error) {
req, err := http.NewRequest("GET", path, nil)
if err != nil {
return nil, err
}
return kd.queryAPIServerReq(req)
}
func (kd *Discovery) queryAPIServerReq(req *http.Request) (*http.Response, error) {
// Lock in case we need to rotate API servers to request.
kd.apiServersMu.Lock()
defer kd.apiServersMu.Unlock()
var lastErr error
for i := 0; i < len(kd.apiServers); i++ {
cloneReq := *req
cloneReq.URL.Host = kd.apiServers[0].Host
cloneReq.URL.Scheme = kd.apiServers[0].Scheme
res, err := kd.client.Do(&cloneReq)
if err == nil {
return res, nil
}
lastErr = err
kd.rotateAPIServers()
}
return nil, fmt.Errorf("Unable to query any API servers: %v", lastErr)
}
func (kd *Discovery) rotateAPIServers() {
if len(kd.apiServers) > 1 {
kd.apiServers = append(kd.apiServers[1:], kd.apiServers[0])
}
}
func (kd *Discovery) updateAPIServersTargetGroup() *config.TargetGroup {
tg := &config.TargetGroup{
Source: apiServersTargetGroupName,
Labels: model.LabelSet{
roleLabel: model.LabelValue("apiserver"),
},
}
for _, apiServer := range kd.apiServers {
apiServerAddress := apiServer.Host
_, _, err := net.SplitHostPort(apiServerAddress)
// If error then no port is specified - use default for scheme.
if err != nil {
switch apiServer.Scheme {
case "http":
apiServerAddress = net.JoinHostPort(apiServerAddress, "80")
case "https":
apiServerAddress = net.JoinHostPort(apiServerAddress, "443")
}
}
t := model.LabelSet{
model.AddressLabel: model.LabelValue(apiServerAddress),
model.SchemeLabel: model.LabelValue(apiServer.Scheme),
}
tg.Targets = append(tg.Targets, t)
}
return tg
}
func (kd *Discovery) updateNodesTargetGroup() *config.TargetGroup {
kd.nodesMu.RLock()
defer kd.nodesMu.RUnlock()
tg := &config.TargetGroup{
Source: nodesTargetGroupName,
Labels: model.LabelSet{
roleLabel: model.LabelValue("node"),
},
}
// Now let's loop through the nodes & add them to the target group with appropriate labels.
for nodeName, node := range kd.nodes {
address := fmt.Sprintf("%s:%d", node.Status.Addresses[0].Address, kd.Conf.KubeletPort)
t := model.LabelSet{
model.AddressLabel: model.LabelValue(address),
model.InstanceLabel: model.LabelValue(nodeName),
}
for k, v := range node.ObjectMeta.Labels {
labelName := strutil.SanitizeLabelName(nodeLabelPrefix + k)
t[model.LabelName(labelName)] = model.LabelValue(v)
}
tg.Targets = append(tg.Targets, t)
}
return tg
}
func (kd *Discovery) updateNode(node *Node, eventType EventType) {
kd.nodesMu.Lock()
defer kd.nodesMu.Unlock()
updatedNodeName := node.ObjectMeta.Name
switch eventType {
case deleted:
// Deleted - remove from nodes map.
delete(kd.nodes, updatedNodeName)
case added, modified:
// Added/Modified - update the node in the nodes map.
kd.nodes[updatedNodeName] = node
}
}
func (kd *Discovery) getNodes() (map[string]*Node, string, error) {
res, err := kd.queryAPIServerPath(nodesURL)
if err != nil {
// If we can't list nodes then we can't watch them. Assume this is a misconfiguration
// & return error.
return nil, "", fmt.Errorf("Unable to list Kubernetes nodes: %s", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, "", fmt.Errorf("Unable to list Kubernetes nodes. Unexpected response: %d %s", res.StatusCode, res.Status)
}
var nodes NodeList
if err := json.NewDecoder(res.Body).Decode(&nodes); err != nil {
body, _ := ioutil.ReadAll(res.Body)
return nil, "", fmt.Errorf("Unable to list Kubernetes nodes. Unexpected response body: %s", string(body))
}
nodeMap := map[string]*Node{}
for idx, node := range nodes.Items {
nodeMap[node.ObjectMeta.Name] = &nodes.Items[idx]
}
return nodeMap, nodes.ResourceVersion, nil
}
func (kd *Discovery) getServices() (map[string]map[string]*Service, string, error) {
res, err := kd.queryAPIServerPath(servicesURL)
if err != nil {
// If we can't list services then we can't watch them. Assume this is a misconfiguration
// & return error.
return nil, "", fmt.Errorf("Unable to list Kubernetes services: %s", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, "", fmt.Errorf("Unable to list Kubernetes services. Unexpected response: %d %s", res.StatusCode, res.Status)
}
var services ServiceList
if err := json.NewDecoder(res.Body).Decode(&services); err != nil {
body, _ := ioutil.ReadAll(res.Body)
return nil, "", fmt.Errorf("Unable to list Kubernetes services. Unexpected response body: %s", string(body))
}
serviceMap := map[string]map[string]*Service{}
for idx, service := range services.Items {
namespace, ok := serviceMap[service.ObjectMeta.Namespace]
if !ok {
namespace = map[string]*Service{}
serviceMap[service.ObjectMeta.Namespace] = namespace
}
namespace[service.ObjectMeta.Name] = &services.Items[idx]
}
return serviceMap, services.ResourceVersion, nil
}
// watchNodes watches nodes as they come & go.
func (kd *Discovery) watchNodes(events chan interface{}, done <-chan struct{}, retryInterval time.Duration) {
until(func() {
nodes, resourceVersion, err := kd.getNodes()
if err != nil {
log.Errorf("Cannot initialize nodes collection: %s", err)
return
}
// Reset the known nodes.
kd.nodes = map[string]*Node{}
for _, node := range nodes {
events <- &nodeEvent{added, node}
}
req, err := http.NewRequest("GET", nodesURL, nil)
if err != nil {
log.Errorf("Cannot create nodes request: %s", err)
return
}
values := req.URL.Query()
values.Add("watch", "true")
values.Add("resourceVersion", resourceVersion)
req.URL.RawQuery = values.Encode()
res, err := kd.queryAPIServerReq(req)
if err != nil {
log.Errorf("Failed to watch nodes: %s", err)
return
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
log.Errorf("Failed to watch nodes: %d", res.StatusCode)
return
}
d := json.NewDecoder(res.Body)
for {
var event nodeEvent
if err := d.Decode(&event); err != nil {
log.Errorf("Watch nodes unexpectedly closed: %s", err)
return
}
select {
case events <- &event:
case <-done:
}
}
}, retryInterval, done)
}
// watchServices watches services as they come & go.
func (kd *Discovery) startServiceWatch(events chan<- interface{}, done <-chan struct{}, retryInterval time.Duration) {
until(func() {
// We use separate target groups for each discovered service so we'll need to clean up any if they've been deleted
// in Kubernetes while we couldn't connect - small chance of this, but worth dealing with.
existingServices := kd.services
// Reset the known services.
kd.services = map[string]map[string]*Service{}
services, resourceVersion, err := kd.getServices()
if err != nil {
log.Errorf("Cannot initialize services collection: %s", err)
return
}
// Now let's loop through the old services & see if they still exist in here
for oldNSName, oldNS := range existingServices {
if ns, ok := services[oldNSName]; !ok {
for _, service := range existingServices[oldNSName] {
events <- &serviceEvent{deleted, service}
}
} else {
for oldServiceName, oldService := range oldNS {
if _, ok := ns[oldServiceName]; !ok {
events <- &serviceEvent{deleted, oldService}
}
}
}
}
// Discard the existing services map for GC.
existingServices = nil
for _, ns := range services {
for _, service := range ns {
events <- &serviceEvent{added, service}
}
}
var wg sync.WaitGroup
wg.Add(2)
go func() {
kd.watchServices(resourceVersion, events, done)
wg.Done()
}()
go func() {
kd.watchServiceEndpoints(resourceVersion, events, done)
wg.Done()
}()
wg.Wait()
}, retryInterval, done)
}
func (kd *Discovery) watchServices(resourceVersion string, events chan<- interface{}, done <-chan struct{}) {
req, err := http.NewRequest("GET", servicesURL, nil)
if err != nil {
log.Errorf("Failed to create services request: %s", err)
return
}
values := req.URL.Query()
values.Add("watch", "true")
values.Add("resourceVersion", resourceVersion)
req.URL.RawQuery = values.Encode()
res, err := kd.queryAPIServerReq(req)
if err != nil {
log.Errorf("Failed to watch services: %s", err)
return
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
log.Errorf("Failed to watch services: %d", res.StatusCode)
return
}
d := json.NewDecoder(res.Body)
for {
var event serviceEvent
if err := d.Decode(&event); err != nil {
log.Errorf("Watch services unexpectedly closed: %s", err)
return
}
select {
case events <- &event:
case <-done:
return
}
}
}
// watchServiceEndpoints watches service endpoints as they come & go.
func (kd *Discovery) watchServiceEndpoints(resourceVersion string, events chan<- interface{}, done <-chan struct{}) {
req, err := http.NewRequest("GET", endpointsURL, nil)
if err != nil {
log.Errorf("Failed to create service endpoints request: %s", err)
return
}
values := req.URL.Query()
values.Add("watch", "true")
values.Add("resourceVersion", resourceVersion)
req.URL.RawQuery = values.Encode()
res, err := kd.queryAPIServerReq(req)
if err != nil {
log.Errorf("Failed to watch service endpoints: %s", err)
return
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
log.Errorf("Failed to watch service endpoints: %d", res.StatusCode)
return
}
d := json.NewDecoder(res.Body)
for {
var event endpointsEvent
if err := d.Decode(&event); err != nil {
log.Errorf("Watch service endpoints unexpectedly closed: %s", err)
return
}
select {
case events <- &event:
case <-done:
}
}
}
func (kd *Discovery) updateService(service *Service, eventType EventType) *config.TargetGroup {
kd.servicesMu.Lock()
defer kd.servicesMu.Unlock()
switch eventType {
case deleted:
return kd.deleteService(service)
case added, modified:
return kd.addService(service)
}
return nil
}
func (kd *Discovery) deleteService(service *Service) *config.TargetGroup {
tg := &config.TargetGroup{Source: serviceSource(service)}
delete(kd.services[service.ObjectMeta.Namespace], service.ObjectMeta.Name)
if len(kd.services[service.ObjectMeta.Namespace]) == 0 {
delete(kd.services, service.ObjectMeta.Namespace)
}
return tg
}
func (kd *Discovery) addService(service *Service) *config.TargetGroup {
namespace, ok := kd.services[service.ObjectMeta.Namespace]
if !ok {
namespace = map[string]*Service{}
kd.services[service.ObjectMeta.Namespace] = namespace
}
namespace[service.ObjectMeta.Name] = service
endpointURL := fmt.Sprintf(serviceEndpointsURL, service.ObjectMeta.Namespace, service.ObjectMeta.Name)
res, err := kd.queryAPIServerPath(endpointURL)
if err != nil {
log.Errorf("Error getting service endpoints: %s", err)
return nil
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
log.Errorf("Failed to get service endpoints: %d", res.StatusCode)
return nil
}
var eps Endpoints
if err := json.NewDecoder(res.Body).Decode(&eps); err != nil {
log.Errorf("Error getting service endpoints: %s", err)
return nil
}
return kd.updateServiceTargetGroup(service, &eps)
}
func (kd *Discovery) updateServiceTargetGroup(service *Service, eps *Endpoints) *config.TargetGroup {
tg := &config.TargetGroup{
Source: serviceSource(service),
Labels: model.LabelSet{
serviceNamespaceLabel: model.LabelValue(service.ObjectMeta.Namespace),
serviceNameLabel: model.LabelValue(service.ObjectMeta.Name),
},
}
for k, v := range service.ObjectMeta.Labels {
labelName := strutil.SanitizeLabelName(serviceLabelPrefix + k)
tg.Labels[model.LabelName(labelName)] = model.LabelValue(v)
}
for k, v := range service.ObjectMeta.Annotations {
labelName := strutil.SanitizeLabelName(serviceAnnotationPrefix + k)
tg.Labels[model.LabelName(labelName)] = model.LabelValue(v)
}
serviceAddress := service.ObjectMeta.Name + "." + service.ObjectMeta.Namespace + ".svc"
// Append the first TCP service port if one exists.
for _, port := range service.Spec.Ports {
if port.Protocol == ProtocolTCP {
serviceAddress += fmt.Sprintf(":%d", port.Port)
break
}
}
t := model.LabelSet{
model.AddressLabel: model.LabelValue(serviceAddress),
roleLabel: model.LabelValue("service"),
}
tg.Targets = append(tg.Targets, t)
// Now let's loop through the endpoints & add them to the target group with appropriate labels.
for _, ss := range eps.Subsets {
epPort := ss.Ports[0].Port
for _, addr := range ss.Addresses {
ipAddr := addr.IP
if len(ipAddr) == net.IPv6len {
ipAddr = "[" + ipAddr + "]"
}
address := fmt.Sprintf("%s:%d", ipAddr, epPort)
t := model.LabelSet{
model.AddressLabel: model.LabelValue(address),
roleLabel: model.LabelValue("endpoint"),
}
tg.Targets = append(tg.Targets, t)
}
}
return tg
}
func (kd *Discovery) updateServiceEndpoints(endpoints *Endpoints, eventType EventType) *config.TargetGroup {
kd.servicesMu.Lock()
defer kd.servicesMu.Unlock()
serviceNamespace := endpoints.ObjectMeta.Namespace
serviceName := endpoints.ObjectMeta.Name
if service, ok := kd.services[serviceNamespace][serviceName]; ok {
return kd.updateServiceTargetGroup(service, endpoints)
}
return nil
}
func newKubernetesHTTPClient(conf *config.KubernetesSDConfig) (*http.Client, error) {
bearerTokenFile := conf.BearerTokenFile
caFile := conf.TLSConfig.CAFile
if conf.InCluster {
if len(bearerTokenFile) == 0 {
bearerTokenFile = serviceAccountToken
}
if len(caFile) == 0 {
// With recent versions, the CA certificate is mounted as a secret
// but we need to handle older versions too. In this case, don't
// set the CAFile & the configuration will have to use InsecureSkipVerify.
if _, err := os.Stat(serviceAccountCACert); err == nil {
caFile = serviceAccountCACert
}
}
}
tlsOpts := httputil.TLSOptions{
InsecureSkipVerify: conf.TLSConfig.InsecureSkipVerify,
CAFile: caFile,
CertFile: conf.TLSConfig.CertFile,
KeyFile: conf.TLSConfig.KeyFile,
}
tlsConfig, err := httputil.NewTLSConfig(tlsOpts)
if err != nil {
return nil, err
}
var rt http.RoundTripper = &http.Transport{
Dial: func(netw, addr string) (c net.Conn, err error) {
c, err = net.DialTimeout(netw, addr, time.Duration(conf.RequestTimeout))
return
},
TLSClientConfig: tlsConfig,
}
// If a bearer token is provided, create a round tripper that will set the
// Authorization header correctly on each request.
bearerToken := conf.BearerToken
if len(bearerToken) == 0 && len(bearerTokenFile) > 0 {
b, err := ioutil.ReadFile(bearerTokenFile)
if err != nil {
return nil, fmt.Errorf("unable to read bearer token file %s: %s", bearerTokenFile, err)
}
bearerToken = string(b)
}
if len(bearerToken) > 0 {
rt = httputil.NewBearerAuthRoundTripper(bearerToken, rt)
}
if conf.BasicAuth != nil {
rt = httputil.NewBasicAuthRoundTripper(conf.BasicAuth.Username, conf.BasicAuth.Password, rt)
}
return &http.Client{
Transport: rt,
}, nil
}
func serviceSource(service *Service) string {
return sourceServicePrefix + ":" + service.ObjectMeta.Namespace + "/" + service.ObjectMeta.Name
}
// Until loops until stop channel is closed, running f every period.
// f may not be invoked if stop channel is already closed.
func until(f func(), period time.Duration, stopCh <-chan struct{}) {
select {
case <-stopCh:
return
default:
f()
}
for {
select {
case <-stopCh:
return
case <-time.After(period):
f()
}
}
}
| wu8685/prometheus | retrieval/discovery/kubernetes/discovery.go | GO | apache-2.0 | 21,262 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.commons.imaging.common;
import java.util.ArrayList;
import java.util.List;
public class ImageMetadata implements IImageMetadata {
private static final String NEWLINE = System.getProperty("line.separator");
private final List<IImageMetadataItem> items = new ArrayList<IImageMetadataItem>();
public void add(final String keyword, final String text) {
add(new Item(keyword, text));
}
public void add(final IImageMetadataItem item) {
items.add(item);
}
public List<? extends IImageMetadataItem> getItems() {
return new ArrayList<IImageMetadataItem>(items);
}
@Override
public String toString() {
return toString(null);
}
public String toString(String prefix) {
if (null == prefix) {
prefix = "";
}
final StringBuilder result = new StringBuilder();
for (int i = 0; i < items.size(); i++) {
if (i > 0) {
result.append(NEWLINE);
}
// if (null != prefix)
// result.append(prefix);
final ImageMetadata.IImageMetadataItem item = items.get(i);
result.append(item.toString(prefix + "\t"));
// Debug.debug("prefix", prefix);
// Debug.debug("item", items.get(i));
// Debug.debug();
}
return result.toString();
}
public static class Item implements IImageMetadataItem {
private final String keyword;
private final String text;
public Item(final String keyword, final String text) {
this.keyword = keyword;
this.text = text;
}
public String getKeyword() {
return keyword;
}
public String getText() {
return text;
}
@Override
public String toString() {
return toString(null);
}
public String toString(final String prefix) {
final String result = keyword + ": " + text;
if (null != prefix) {
return prefix + result;
} else {
return result;
}
}
}
}
| windwardadmin/android-awt | src/main/java/org/apache/commons/imaging/common/ImageMetadata.java | Java | apache-2.0 | 2,989 |
<?php
header("location:Login.php");
?> | ThanakornDN/WorkShopI | Logout.php | PHP | apache-2.0 | 38 |
package io.intercom.api;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
@SuppressWarnings("UnusedDeclaration")
@JsonIgnoreProperties(ignoreUnknown = true)
public class NoteCollection extends TypedDataCollection<Note> {
public NoteCollection() {
}
@Override
public NoteCollection nextPage() {
return fetchNextPage(NoteCollection.class);
}
@SuppressWarnings("EmptyMethod")
@JsonProperty("notes")
@Override
public List<Note> getPage() {
return super.getPage();
}
}
| intercom/intercom-java | intercom-java/src/main/java/io/intercom/api/NoteCollection.java | Java | apache-2.0 | 617 |
/*
* Copyright (C) 2015 The Gravitee team (http://gravitee.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {AfterViewInit, Component, ElementRef, Inject, Input, OnInit, ViewChild} from "@angular/core";
import { ActivatedRoute, Router } from "@angular/router";
import { SnackbarService } from "../../../../services/snackbar.service";
import { DialogService } from "../../../../services/dialog.service";
import { EmailService } from "../../../../services/email.service";
import { NgForm } from "@angular/forms";
import { MAT_DIALOG_DATA, MatDialog, MatDialogRef } from "@angular/material/dialog";
export interface DialogData {
rawTemplate: string;
template: string;
}
@Component({
selector: 'app-email',
templateUrl: './email.component.html',
styleUrls: ['./email.component.scss']
})
export class EmailComponent implements OnInit, AfterViewInit {
private domainId: string;
private appId: string;
private defaultEmailContent = `// Custom email...`;
template: string;
rawTemplate: string;
email: any;
emailName: string;
emailContent: string = (' ' + this.defaultEmailContent).slice(1);
originalEmailContent: string = (' ' + this.emailContent).slice(1);
emailFound = false;
formChanged = false;
config: any = { lineNumbers: true, readOnly: true};
@ViewChild('editor', { static: true }) editor: any;
@ViewChild('preview', { static: true }) preview: ElementRef;
@ViewChild('emailForm', { static: true }) public emailForm: NgForm;
@Input('createMode') createMode: boolean;
@Input('editMode') editMode: boolean;
@Input('deleteMode') deleteMode: boolean;
constructor(private router: Router,
private route: ActivatedRoute,
private emailService: EmailService,
private snackbarService: SnackbarService,
private dialogService: DialogService,
public dialog: MatDialog) { }
ngOnInit() {
this.domainId = this.route.snapshot.data['domain']?.id;
this.appId = this.route.snapshot.params['appId'];
this.rawTemplate = this.route.snapshot.queryParams['template'];
this.email = this.route.snapshot.data['email']
if (this.email && this.email.content) {
this.emailContent = this.email.content;
this.originalEmailContent = (' ' + this.emailContent).slice(1);
this.emailFound = true;
} else {
this.email = {};
this.email.template = this.rawTemplate
this.email.expiresAfter = (this.email.template === 'MFA_CHALLENGE' ? 600 : 86400);
}
this.template = this.rawTemplate.toLowerCase().replace(/_/g, ' ');
this.emailName = this.template.charAt(0).toUpperCase() + this.template.slice(1);
}
ngAfterViewInit(): void {
this.enableCodeMirror();
}
isEnabled() {
return this.email && this.email.enabled;
}
enableEmail(event) {
this.email.enabled = event.checked;
this.enableCodeMirror();
this.formChanged = true;
}
onTabSelectedChanged(e) {
if (e.index === 1) {
this.refreshPreview();
}
}
refreshPreview() {
let doc = this.preview.nativeElement.contentDocument || this.preview.nativeElement.contentWindow;
doc.open();
doc.write(this.emailContent);
doc.close();
}
onContentChanges(e) {
if (e !== this.originalEmailContent) {
this.formChanged = true;
}
}
resizeIframe() {
this.preview.nativeElement.style.height = this.preview.nativeElement.contentWindow.document.body.scrollHeight + 'px';
}
canEdit(): boolean {
return this.emailFound ? this.editMode : this.createMode;
}
save() {
if (!this.emailFound) {
this.create();
} else {
this.update();
}
}
create() {
this.email['content'] = this.emailContent;
this.emailService.create(this.domainId, this.appId, this.email).subscribe(data => {
this.snackbarService.open('Email created');
this.emailFound = true;
this.email = data;
this.formChanged = false;
this.emailForm.reset(this.email);
})
}
update() {
this.email['content'] = this.emailContent;
this.emailService.update(this.domainId, this.appId, this.email.id, this.email).subscribe(data => {
this.snackbarService.open('Email updated');
this.emailFound = true;
this.email = data;
this.formChanged = false;
this.emailForm.reset(this.email);
})
}
delete(event) {
event.preventDefault();
this.dialogService
.confirm('Delete email', 'Are you sure you want to delete this email ?')
.subscribe(res => {
if (res) {
this.emailService.delete(this.domainId, this.appId, this.email.id).subscribe(response => {
this.snackbarService.open('Email deleted');
this.email = {};
this.email.template = this.route.snapshot.queryParams['template'];
this.email.expiresAfter = 86400;
this.emailContent = (' ' + this.defaultEmailContent).slice(1);
this.originalEmailContent = (' ' + this.emailContent).slice(1);
this.emailFound = false;
this.formChanged = false;
this.enableCodeMirror();
});
}
});
}
openDialog() {
this.dialog.open(EmailInfoDialog, {
data: {rawTemplate: this.rawTemplate, template: this.template}
});
}
isFormInvalid() {
return (this.emailForm.pristine || !this.emailForm.valid) && !this.formChanged;
}
private enableCodeMirror(): void {
this.editor.instance.setOption('readOnly', !this.email.enabled);
}
}
@Component({
selector: 'email-info-dialog',
templateUrl: './dialog/email-info.component.html',
})
export class EmailInfoDialog {
constructor(public dialogRef: MatDialogRef<EmailInfoDialog>, @Inject(MAT_DIALOG_DATA) public data: DialogData) {}
}
| gravitee-io/graviteeio-access-management | gravitee-am-ui/src/app/domain/components/emails/email/email.component.ts | TypeScript | apache-2.0 | 6,286 |
/*
* $Header$
* $Revision: 1228 $
* $Date: 2006-11-08 08:00:22 -0800 (Wed, 08 Nov 2006) $
*
* ====================================================================
*
* Copyright 2000-2002 bob mcwhirter & James Strachan.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of the Jaxen Project nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ====================================================================
* This software consists of voluntary contributions made by many
* individuals on behalf of the Jaxen Project and was originally
* created by bob mcwhirter <bob@werken.com> and
* James Strachan <jstrachan@apache.org>. For more information on the
* Jaxen Project, please see <http://www.jaxen.org/>.
*
* $Id: UnionExpr.java 1228 2006-11-08 16:00:22Z elharo $
*/
package net.arnx.xmlic.internal.org.jaxen.expr;
/**
* Represents an XPath union expression. This is production 18 in the
* <a href="http://www.w3.org/TR/xpath#NT-UnionExpr">XPath 1.0 specification</a>:
*
* <table><tr valign="baseline">
* <td><a name="NT-UnionExpr"></a>[18] </td><td>UnionExpr</td><td> ::= </td><td><a href="http://www.w3.org/TR/xpath#NT-PathExpr">PathExpr</a></td><td></td>
* </tr><tr valign="baseline">
* <td></td><td></td><td></td><td>| <a href="http://www.w3.org/TR/xpath#NT-UnionExpr">UnionExpr</a> '|' <a href="http://www.w3.org/TR/xpath#NT-PathExpr">PathExpr</a>
* </tr></table>
*
*/
public interface UnionExpr extends BinaryExpr
{
}
| hidekatsu-izuno/xmlic | src/main/java/net/arnx/xmlic/internal/org/jaxen/expr/UnionExpr.java | Java | apache-2.0 | 3,019 |
/*
Copyright 2019 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Text;
using ESRI.ArcGIS.Framework;
using Microsoft.Win32;
namespace RecentFilesCommandsCS
{
/// <summary>
/// Helper class to process recent file lists stored in the registry
/// </summary>
class RecentFilesRegistryHelper
{
const string RecentFileRegistryKeyPath = @"Software\ESRI\Desktop{0}\{1}\Recent File List";
public static string[] GetRecentFiles(IApplication app)
{
List<string> recentFilePaths = new List<string>();
//Read the registry to get the recent file list
string version = ESRI.ArcGIS.RuntimeManager.ActiveRuntime.Version;
string openKey = string.Format(RecentFileRegistryKeyPath, version, app.Name);
RegistryKey recentListKey = Registry.CurrentUser.OpenSubKey(openKey);
if (recentListKey != null)
{
string[] listNames = recentListKey.GetValueNames();
foreach (string name in listNames)
{
string fileName = recentListKey.GetValue(name, string.Empty).ToString();
if (!string.IsNullOrEmpty(fileName))
recentFilePaths.Add(fileName);
}
}
return recentFilePaths.ToArray();
}
}
}
| Esri/arcobjects-sdk-community-samples | Net/Framework/RecentFilesCommands/CSharp/RecentFilesRegistryHelper.cs | C# | apache-2.0 | 1,930 |
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Program stackdriver-reverse-proxy provides a Stackdriver reverse
// proxy that creates traces for the incoming requests, logs request
// details, and reports errors.
package main
import (
"flag"
"fmt"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"go.opencensus.io/exporter/stackdriver"
"go.opencensus.io/exporter/stackdriver/propagation"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
)
var (
projectID string
listen string
target string
tlsCert string
tlsKey string
traceFrac float64
disableMonitoring bool
monitoringPeriod string
)
const usage = `stackdriver-reverse-proxy [opts...] -target=<host:port>
For example, to start at localhost:6996 to proxy requests to localhost:6060,
$ stackdriver-reverse-proxy -target=http://localhost:6060
Options:
-http hostname:port to start the proxy server, by default localhost:6996.
-target hostname:port where the app server is running.
-project Google Cloud Platform project ID if running outside of GCP.
Tracing options:
-trace-sampling Tracing sampling fraction, between 0 and 1.0.
HTTPS options:
-tls-cert TLS cert file to start an HTTPS proxy.
-tls-key TLS key file to start an HTTPS proxy.
`
func main() {
flag.Usage = func() {
fmt.Println(usage)
}
flag.StringVar(&projectID, "project", "", "")
flag.StringVar(&listen, "http", ":6996", "host:port proxy listens")
flag.StringVar(&target, "target", "", "target server")
flag.Float64Var(&traceFrac, "trace-sampling", 1, "sampling fraction for tracing")
flag.StringVar(&tlsCert, "tls-cert", "", "TLS cert file to start an HTTPS proxy")
flag.StringVar(&tlsKey, "tls-key", "", "TLS key file to start an HTTPS proxy")
flag.Parse()
if target == "" {
usageExit()
}
exporter, err := stackdriver.NewExporter(stackdriver.Options{
ProjectID: projectID,
})
if err != nil {
log.Fatal(err)
}
view.RegisterExporter(exporter)
trace.RegisterExporter(exporter)
view.Subscribe(ochttp.DefaultViews...)
trace.SetDefaultSampler(trace.ProbabilitySampler(traceFrac))
url, err := url.Parse(target)
if err != nil {
log.Fatalf("Cannot URL parse -target: %v", err)
}
proxy := httputil.NewSingleHostReverseProxy(url)
proxy.Transport = &ochttp.Transport{
Propagation: &propagation.HTTPFormat{},
}
if tlsCert != "" && tlsKey != "" {
log.Fatal(http.ListenAndServeTLS(listen, tlsCert, tlsKey, proxy))
} else {
log.Fatal(http.ListenAndServe(listen, proxy))
}
}
func usageExit() {
flag.Usage()
os.Exit(1)
}
| GoogleCloudPlatform/stackdriver-reverse-proxy | cmd/stackdriver-reverse-proxy/main.go | GO | apache-2.0 | 3,169 |
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.simplesystemsmanagement.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.Request;
import com.amazonaws.http.HttpMethodName;
import com.amazonaws.services.simplesystemsmanagement.model.*;
import com.amazonaws.transform.Marshaller;
import com.amazonaws.protocol.*;
import com.amazonaws.protocol.Protocol;
import com.amazonaws.annotation.SdkInternalApi;
/**
* DescribeInventoryDeletionsRequest Marshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class DescribeInventoryDeletionsRequestProtocolMarshaller implements
Marshaller<Request<DescribeInventoryDeletionsRequest>, DescribeInventoryDeletionsRequest> {
private static final OperationInfo SDK_OPERATION_BINDING = OperationInfo.builder().protocol(Protocol.AWS_JSON).requestUri("/")
.httpMethodName(HttpMethodName.POST).hasExplicitPayloadMember(false).hasPayloadMembers(true)
.operationIdentifier("AmazonSSM.DescribeInventoryDeletions").serviceName("AWSSimpleSystemsManagement").build();
private final com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory;
public DescribeInventoryDeletionsRequestProtocolMarshaller(com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory) {
this.protocolFactory = protocolFactory;
}
public Request<DescribeInventoryDeletionsRequest> marshall(DescribeInventoryDeletionsRequest describeInventoryDeletionsRequest) {
if (describeInventoryDeletionsRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
final ProtocolRequestMarshaller<DescribeInventoryDeletionsRequest> protocolMarshaller = protocolFactory.createProtocolMarshaller(
SDK_OPERATION_BINDING, describeInventoryDeletionsRequest);
protocolMarshaller.startMarshalling();
DescribeInventoryDeletionsRequestMarshaller.getInstance().marshall(describeInventoryDeletionsRequest, protocolMarshaller);
return protocolMarshaller.finishMarshalling();
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
| jentfoo/aws-sdk-java | aws-java-sdk-ssm/src/main/java/com/amazonaws/services/simplesystemsmanagement/model/transform/DescribeInventoryDeletionsRequestProtocolMarshaller.java | Java | apache-2.0 | 2,897 |
/**
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.runtime.flow;
import java.util.List;
/**
* A List interface for reusing instances of element objects.
<pre><code>
Iterator<Hoge> iter = ...;
ListBuffer<Hoge> hoges = ...;
...
hoges.begin();
while (iter.hasNext()) {
Hoge hoge = iter.next();
if (hoges.isExpandRequired()) {
hoges.expand(new Hoge());
}
hoges.advance().set(hoge);
}
hoges.end();
// use hoges as List
hoges.shrink();
</code></pre>
* @param <E> the element type
*/
public interface ListBuffer<E> extends List<E> {
/**
* Begins changing the list buffer.
* Initially, the internal cursor is on the head of this buffer, and clients can move it to the next element
* by invoking {@link #advance()}.
* After changing the buffer, then clients must invoke {@link #end()} and the buffer can be used as the
* unmodifiable list.
* @see #advance()
* @throws BufferException if failed to prepare buffer
*/
void begin();
/**
* Ends changing the list buffer.
* After this, clients should not change the buffer contents.
* If clients want to change the buffer, must invoke {@link #begin()} once more.
* @throws BufferException if failed to prepare buffer
*/
void end();
/**
* Returns whether a new element object is required for the buffer or not.
* If it required, clients must use {@link #expand(Object)} to add a new object before invoke {@link #advance()}.
* This method must be invoked between {@link #begin()} and {@link #end()}.
* @return {@code true} if a new element object is required, otherwise {@code false}
* @see #expand(Object)
* @throws BufferException if failed to prepare buffer
*/
boolean isExpandRequired();
/**
* Adds a new element object into the tail of this buffer.
* This method must be invoked between {@link #begin()} and {@link #end()}.
* @param value the object
* @see #isExpandRequired()
* @throws IndexOutOfBoundsException if expand is not required (optional operation)
* @see #isExpandRequired()
* @throws BufferException if failed to prepare buffer
*/
void expand(E value);
/**
* Returns the next element object on the internal cursor, and then move the cursor to the next element.
* This method must be invoked between {@link #begin()} and {@link #end()}.
* @return the next element object
* @throws IndexOutOfBoundsException if the next element object is not prepared
* @see #isExpandRequired()
* @see #expand(Object)
* @throws BufferException if failed to prepare buffer
*/
E advance();
/**
* Shrinks this buffer.
*/
void shrink();
}
| asakusafw/asakusafw | core-project/asakusa-runtime/src/main/java/com/asakusafw/runtime/flow/ListBuffer.java | Java | apache-2.0 | 3,339 |
/**
* Copyright (c) 2012-2013, Michael Yang 杨福海 (www.yangfuhai.com).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.tsz.afinal.bitmap.core;
import net.tsz.afinal.utils.Utils;
import android.graphics.Bitmap;
public class BaseMemoryCacheImpl implements IMemoryCache {
private final LruMemoryCache<String, Bitmap> mMemoryCache;
public BaseMemoryCacheImpl(int size) {
mMemoryCache = new LruMemoryCache<String, Bitmap>(size) {
protected int sizeOf(String key, Bitmap bitmap) {
return Utils.getBitmapSize(bitmap);
}
};
}
public void put(String key, Bitmap bitmap) {
mMemoryCache.put(key, bitmap);
}
public Bitmap get(String key) {
return mMemoryCache.get(key);
}
public void evictAll() {
mMemoryCache.evictAll();
}
public void remove(String key) {
mMemoryCache.remove(key);
}
}
| luoqii/osgi_android_demos_from_network | apkplug/apkplugDemo/bundle/BundleDemoApkplugService/src/net/tsz/afinal/bitmap/core/BaseMemoryCacheImpl.java | Java | apache-2.0 | 1,471 |
using UnityEngine;
using System.Collections;
public class monkey7 : MonoBehaviour {
public float speed = 100f;//英雄速度
public Vector2 current_position;
Animator animator;
public Grid grid = new Grid(14,10);
public AStar astar;
public float time = 0;
public bool finish = false;//是否已经完成
public bool walk = false;//是否已经开始
private bool findroad = false;//是否找到路径
private int hasdone; //已经走过的寻路结点
public background7 bg;//调用脚本background中的地图
// Use this for initialization
void Start () {
GameObject backgd = GameObject.Find ("background"); //调用脚本background中的地图
bg = (background7)backgd.GetComponent (typeof(background7));
}
public void InitGame()//初始化寻路网格
{
hasdone = 1;
astar = new AStar();
animator = GetComponent<Animator> ();
for (int i =0; i< 14; i++) {//初始化地图是否可走
for (int j =0; j< 10; j++) {
if(bg.level7.map [i, j]!= 0)
grid.SetWalkbale(i,j,false);
else
grid.SetWalkbale(i,j,true);
}
}
grid.SetStartNode (10,6);
grid.SetEndNode (7,4);
}
void FixedUpdate()
{
if(bg.stspt.click == true && walk == false) {//开始键被按下
InitGame();
walk = true;
if(astar.findPath(grid) == true)
{
findroad = true;
//print(astar._path.Count);
}
}
else if (walk == true && hasdone < astar._path.Count) {
time += Time.deltaTime;
Vector2 des;//目的地
des.x = astar._path[hasdone].x*64 + 32;
des.y = astar._path[hasdone].y*64 + 50;
if(WalkTo (des) == true)
{
hasdone++;
}
}
if (astar!=null && hasdone >= astar._path.Count)
{
finish = true;
walk = false;
bg.stspt.click = false;
//Destroy(gameObject);
transform.localScale = new Vector3(0,0,0);
}
}
void TurnRight()//向右转弯的时候打开所有通向右方向的动画和关闭从右出去的动画
{
//animator.SetInteger ("etorr", 1);
animator.SetInteger ("rbtorr", 1);
animator.SetInteger ("rftorr", 1);
animator.SetInteger ("rrtorb", 0);
animator.SetInteger ("rrtorf", 0);
}
void TurnLeft()
{
//animator.SetInteger ("etorl", 1);
animator.SetInteger ("rbtorl", 1);
animator.SetInteger ("rftorl", 1);
animator.SetInteger ("rltorb", 0);
animator.SetInteger ("rltorf", 0);
}
void TurnBack()
{
//animator.SetInteger ("etorb", 1);
animator.SetInteger ("rftorb", 1);
animator.SetInteger ("rrtorb", 1);
animator.SetInteger ("rltorb", 1);
animator.SetInteger ("rbtorl", 0);
animator.SetInteger ("rbtorr", 0);
animator.SetInteger ("rbtorf", 0);
}
void TurnFront()
{
//animator.SetInteger ("etorf", 1);
animator.SetInteger ("rbtorf", 1);
animator.SetInteger ("rrtorf", 1);
animator.SetInteger ("rltorf", 1);
animator.SetInteger ("rftorr", 0);
animator.SetInteger ("rftorl", 0);
animator.SetInteger ("rftorb", 0);
}
void GoRight(Vector2 CurPos)
{
TurnRight ();
Vector2 target = Vector2.right*speed+ CurPos;
transform.position = Vector2.Lerp( CurPos, target, Time.deltaTime );
}
void GoLeft(Vector2 CurPos)
{
TurnLeft ();
Vector2 target = -Vector2.right*speed + CurPos;
transform.position = Vector2.Lerp( CurPos, target, Time.deltaTime );
}
void GoBack(Vector2 CurPos)
{
TurnBack ();
Vector2 target = Vector2.up*speed + CurPos;
transform.position = Vector2.Lerp( CurPos, target, Time.deltaTime );
}
void GoFront(Vector2 CurPos)
{
TurnFront ();
Vector2 target = -Vector2.up*speed + CurPos;
transform.position = Vector2.Lerp( CurPos, target, Time.deltaTime );
}
bool WalkTo(Vector2 target)//根据目标点调用向哪个方向走
{
current_position = transform.position;
if (current_position.x < target.x && Mathf.Abs(current_position.x - target.x) >5 ) {
GoRight (current_position);
return false;
}
else if (current_position.x > target.x && Mathf.Abs(current_position.x - target.x) >5) {
GoLeft (current_position);
return false;
}
else if (current_position.y < target.y && Mathf.Abs(current_position.y - target.y) >5) {
GoBack (current_position);
return false;
}
else if (current_position.y > target.y && Mathf.Abs(current_position.y - target.y) >5) {
GoFront(current_position);
return false;
}
return true;
}
}
| renmaoting/maze-storm | maze storm/Assets/script/level7/monkey7.cs | C# | apache-2.0 | 4,277 |
package org.develnext.jphp.swing.loader.support.propertyreaders;
import org.develnext.jphp.swing.loader.support.PropertyReader;
import org.develnext.jphp.swing.loader.support.Value;
import javax.swing.*;
import java.util.HashMap;
import java.util.Map;
public class JToolBarPropertyReaders extends PropertyReaders<JToolBar> {
protected final Map<String, PropertyReader<JToolBar>> register = new HashMap<String, PropertyReader<JToolBar>>(){{
put("floatable", FLOATABLE);
put("vertical", VERTICAL);
put("rollover", ROLLOVER);
}};
@Override
protected Map<String, PropertyReader<JToolBar>> getRegister() {
return register;
}
@Override
public Class<JToolBar> getRegisterClass() {
return JToolBar.class;
}
public final static PropertyReader<JToolBar> FLOATABLE = new PropertyReader<JToolBar>() {
@Override
public void read(JToolBar component, Value value) {
component.setFloatable(value.asBoolean());
}
};
public final static PropertyReader<JToolBar> VERTICAL = new PropertyReader<JToolBar>() {
@Override
public void read(JToolBar component, Value value) {
component.setOrientation(value.asBoolean() ? SwingConstants.VERTICAL : SwingConstants.HORIZONTAL);
}
};
public final static PropertyReader<JToolBar> ROLLOVER = new PropertyReader<JToolBar>() {
@Override
public void read(JToolBar component, Value value) {
component.setRollover(value.asBoolean());
}
};
}
| livingvirus/jphp | jphp-swing-ext/src/org/develnext/jphp/swing/loader/support/propertyreaders/JToolBarPropertyReaders.java | Java | apache-2.0 | 1,566 |
package android.nfc;
/*
* #%L
* Matos
* $Id:$
* $HeadURL:$
* %%
* Copyright (C) 2010 - 2014 Orange SA
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
public final class NfcEvent
{
// Fields
public final NfcAdapter nfcAdapter = (NfcAdapter) null;
// Constructors
NfcEvent(NfcAdapter arg1){
}
}
| Orange-OpenSource/matos-profiles | matos-android/src/main/java/android/nfc/NfcEvent.java | Java | apache-2.0 | 842 |
define(function (require, exports) {
var ko = require('knockout')
var expressionViewer = require('./components/CohortExpressionViewer');
ko.components.register('cohort-expression-viewer', expressionViewer);
var criteriaGroup = require('./components/CriteriaGroup');
ko.components.register('criteria-group-viewer', criteriaGroup);
var conditionOccurrence = require('./components/ConditionOccurrence');
ko.components.register('condition-occurrence-criteria-viewer', conditionOccurrence);
var conditionEra = require('./components/ConditionEra');
ko.components.register('condition-era-criteria-viewer', conditionEra);
var drugExposure = require('./components/DrugExposure');
ko.components.register('drug-exposure-criteria-viewer', drugExposure);
var drugEra = require('./components/DrugEra');
ko.components.register('drug-era-criteria-viewer', drugEra);
var doseEra = require('./components/DoseEra');
ko.components.register('dose-era-criteria-viewer', doseEra);
var procedureOccurrence = require('./components/ProcedureOccurrence');
ko.components.register('procedure-occurrence-criteria-viewer', procedureOccurrence);
var observation = require('./components/Observation');
ko.components.register('observation-criteria-viewer', observation);
var visitOccurrence = require('./components/VisitOccurrence');
ko.components.register('visit-occurrence-criteria-viewer', visitOccurrence);
var deviceExposure = require('./components/DeviceExposure');
ko.components.register('device-exposure-criteria-viewer', deviceExposure);
var measurement = require('./components/Measurement');
ko.components.register('measurement-criteria-viewer', measurement);
var observationPeriod = require('./components/ObservationPeriod');
ko.components.register('observation-period-criteria-viewer', observationPeriod);
var specimen = require('./components/Specimen');
ko.components.register('specimen-criteria-viewer', specimen);
var death = require('./components/Death');
ko.components.register('death-criteria-viewer', death);
var numericRange = require('./components/NumericRange');
ko.components.register('numeric-range-viewer', numericRange);
var dateRange = require('./components/DateRange');
ko.components.register('date-range-viewer', dateRange);
var windowInput = require('./components/WindowInput');
ko.components.register('window-input-viewer',windowInput);
var textFilter = require('./components/TextFilter');
ko.components.register('text-filter-viewer',textFilter);
var conceptList = require('./components/ConceptList');
ko.components.register('concept-list-viewer',conceptList);
var conceptSetReference = require('./components/ConceptSetReference');
ko.components.register('conceptset-reference',conceptSetReference);
var conceptSetViewer = require('./components/ConceptSetViewer');
ko.components.register('conceptset-viewer',conceptSetViewer);
});
| OHDSI/Circe | js/modules/cohortdefinitionviewer/main.js | JavaScript | apache-2.0 | 2,914 |
/*
* Copyright 2010 John Hopper
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.jps.lioc.context;
/**
*
* @author zinic
*/
public interface ContextReferenceAliaser {
/**
* Creates a context sensitive reference alias for the registered element
*
* @param referenceAlias
*/
void as(String referenceAlias);
}
| zinic/jliocc | src/main/java/net/jps/lioc/context/ContextReferenceAliaser.java | Java | apache-2.0 | 860 |
package com.sushe.service;
import com.sushe.entity.School;
import java.util.List;
/**
* Created by jiangbin on 15/4/23.
*/
public interface SchoolService {
/**
* 查询学校
* @return
*/
public List<School> selectAll();
public School selectByName(String name);
}
| Guitjerry/schoolApp | src/com/sushe/service/SchoolService.java | Java | apache-2.0 | 296 |
/*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.sop4j.base.google.common.util.concurrent;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import javax.annotation.Nullable;
/**
* A callback for accepting the results of a {@link java.util.concurrent.Future}
* computation asynchronously.
*
* <p>To attach to a {@link ListenableFuture} use {@link Futures#addCallback}.
*
* @author Anthony Zana
* @since 10.0
*/
public interface FutureCallback<V> {
/**
* Invoked with the result of the {@code Future} computation when it is
* successful.
*/
void onSuccess(@Nullable V result);
/**
* Invoked when a {@code Future} computation fails or is canceled.
*
* <p>If the future's {@link Future#get() get} method throws an {@link
* ExecutionException}, then the cause is passed to this method. Any other
* thrown object is passed unaltered.
*/
void onFailure(Throwable t);
}
| wspeirs/sop4j-base | src/main/java/com/sop4j/base/google/common/util/concurrent/FutureCallback.java | Java | apache-2.0 | 1,509 |
/**
* Copyright 2012 GroupDocs.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.groupdocs.sdk.common;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.text.ParseException;
import javax.ws.rs.core.MultivaluedMap;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.core.header.ContentDisposition;
public class FileStream {
private InputStream inputStream;
private String contentType;
private String fileName;
private long size = -1;
public FileStream(InputStream inputStream) {
this.inputStream = inputStream;
}
public FileStream(String requestUri, ClientResponse response) {
this.inputStream = response.getEntityInputStream();
if(response.getType() != null){
this.contentType = response.getType().toString();
}
MultivaluedMap<String, String> headers = response.getHeaders();
try {
// http://www.ietf.org/rfc/rfc2183.txt
ContentDisposition cd = new ContentDisposition(headers.getFirst("Content-Disposition"));
fileName = cd.getFileName() == null ? getFileNameFromUrl(requestUri) : cd.getFileName();
size = cd.getSize() == 0 ? response.getLength() : cd.getSize();
} catch (ParseException e) {
}
}
private String getFileNameFromUrl(String requestUri) {
try {
URL url = new URL(requestUri);
String path = url.getPath();
return path.substring(path.lastIndexOf('/') + 1);
} catch (MalformedURLException e) {
return null;
}
}
public InputStream getInputStream() {
return inputStream;
}
public void setInputStream(InputStream inputStream) {
this.inputStream = inputStream;
}
public String getFileName() {
return fileName;
}
public void setFileName(String fileName) {
this.fileName = fileName;
}
public long getSize() {
return size;
}
public void setSize(long size) {
this.size = size;
}
public String getContentType() {
return contentType;
}
public void setContentType(String contentType) {
this.contentType = contentType;
}
}
| liosha2007/temporary-groupdocs-java-sdk | src/main/java/com/groupdocs/sdk/common/FileStream.java | Java | apache-2.0 | 2,541 |
#
# Copyright (c) 2017 Juniper Networks, Inc. All rights reserved.
#
import argparse, os, ConfigParser, sys, re
from pysandesh.sandesh_base import *
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from sandesh_common.vns.constants import SERVICE_ALARM_GENERATOR, \
ServicesDefaultConfigurationFiles
class CfgParser(object):
def __init__(self, argv):
self._devices = []
self._args = None
self.__pat = None
self._argv = argv or ' '.join(sys.argv[1:])
def parse(self):
'''
command line example
contrail-alarm-gen --log_level SYS_DEBUG
--logging_level DEBUG
--log_category test
--log_file <stdout>
--use_syslog
--syslog_facility LOG_USER
--worker_id 0
--partitions 5
--redis_password
--http_server_port 5995
--redis_server_port 6379
--redis_uve_list 127.0.0.1:6379
--alarmgen_list 127.0.0.1:0
--kafka_broker_list 127.0.0.1:9092
--zk_list 127.0.0.1:2181
--rabbitmq_server_list 127.0.0.1:5672
--conf_file /etc/contrail/contrail-alarm-gen.conf
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file", action="append",
help="Specify config file", metavar="FILE",
default=ServicesDefaultConfigurationFiles.get(
SERVICE_ALARM_GENERATOR, None))
args, remaining_argv = conf_parser.parse_known_args(self._argv.split())
defaults = {
'host_ip' : '127.0.0.1',
'collectors' : [],
'kafka_broker_list' : ['127.0.0.1:9092'],
'log_local' : False,
'log_level' : SandeshLevel.SYS_DEBUG,
'log_category' : '',
'log_file' : Sandesh._DEFAULT_LOG_FILE,
'use_syslog' : False,
'syslog_facility' : Sandesh._DEFAULT_SYSLOG_FACILITY,
'http_server_port' : 5995,
'worker_id' : '0',
'partitions' : 15,
'zk_list' : None,
'alarmgen_list' : ['127.0.0.1:0'],
'cluster_id' :'',
}
defaults.update(SandeshConfig.get_default_options(['DEFAULTS']))
redis_opts = {
'redis_server_port' : 6379,
'redis_password' : None,
'redis_uve_list' : ['127.0.0.1:6379'],
}
configdb_opts = {
'rabbitmq_server_list': None,
'rabbitmq_port': 5672,
'rabbitmq_user': 'guest',
'rabbitmq_password': 'guest',
'rabbitmq_vhost': None,
'rabbitmq_ha_mode': False,
'rabbitmq_use_ssl': False,
'kombu_ssl_version': '',
'kombu_ssl_keyfile': '',
'kombu_ssl_certfile': '',
'kombu_ssl_ca_certs': '',
'config_db_server_list': None,
'config_db_username': None,
'config_db_password': None
}
sandesh_opts = SandeshConfig.get_default_options()
config = None
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.optionxform = str
config.read(args.conf_file)
if 'DEFAULTS' in config.sections():
defaults.update(dict(config.items('DEFAULTS')))
if 'REDIS' in config.sections():
redis_opts.update(dict(config.items('REDIS')))
if 'CONFIGDB' in config.sections():
configdb_opts.update(dict(config.items('CONFIGDB')))
SandeshConfig.update_options(sandesh_opts, config)
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
defaults.update(redis_opts)
defaults.update(configdb_opts)
defaults.update(sandesh_opts)
parser.set_defaults(**defaults)
parser.add_argument("--host_ip",
help="Host IP address")
parser.add_argument("--collectors",
help="List of Collector IP addresses in ip:port format",
nargs="+")
parser.add_argument(
"--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument("--use_syslog",
action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--http_server_port", type=int,
help="introspect server port")
parser.add_argument("--worker_id",
help="Worker Id")
parser.add_argument("--partitions", type=int,
help="Number of partitions for hashing UVE keys")
parser.add_argument("--redis_server_port",
type=int,
help="Redis server port")
parser.add_argument("--redis_password",
help="Redis server password")
parser.add_argument("--kafka_broker_list",
help="List of bootstrap kafka brokers in ip:port format",
nargs="+")
parser.add_argument("--zk_list",
help="List of zookeepers in ip:port format",
nargs="+")
parser.add_argument("--rabbitmq_server_list", type=str,
help="List of Rabbitmq server ip address separated by comma")
parser.add_argument("--rabbitmq_port",
help="Rabbitmq server port")
parser.add_argument("--rabbitmq_user",
help="Username for Rabbitmq")
parser.add_argument("--rabbitmq_password",
help="Password for Rabbitmq")
parser.add_argument("--rabbitmq_vhost",
help="vhost for Rabbitmq")
parser.add_argument("--rabbitmq_ha_mode",
action="store_true",
help="True if the rabbitmq cluster is mirroring all queue")
parser.add_argument("--config_db_server_list",
help="List of cassandra servers in ip:port format",
nargs='+')
parser.add_argument("--config_db_username",
help="Cassandra user name")
parser.add_argument("--config_db_password",
help="Cassandra password")
parser.add_argument("--redis_uve_list",
help="List of redis-uve in ip:port format. For internal use only",
nargs="+")
parser.add_argument("--alarmgen_list",
help="List of alarmgens in ip:inst format. For internal use only",
nargs="+")
parser.add_argument("--cluster_id",
help="Analytics Cluster Id")
SandeshConfig.add_parser_arguments(parser)
self._args = parser.parse_args(remaining_argv)
if type(self._args.collectors) is str:
self._args.collectors = self._args.collectors.split()
if type(self._args.kafka_broker_list) is str:
self._args.kafka_broker_list= self._args.kafka_broker_list.split()
if type(self._args.zk_list) is str:
self._args.zk_list= self._args.zk_list.split()
if type(self._args.redis_uve_list) is str:
self._args.redis_uve_list = self._args.redis_uve_list.split()
if type(self._args.alarmgen_list) is str:
self._args.alarmgen_list = self._args.alarmgen_list.split()
if type(self._args.config_db_server_list) is str:
self._args.config_db_server_list = \
self._args.config_db_server_list.split()
self._args.conf_file = args.conf_file
def _pat(self):
if self.__pat is None:
self.__pat = re.compile(', *| +')
return self.__pat
def _mklist(self, s):
return self._pat().split(s)
def redis_uve_list(self):
return self._args.redis_uve_list
def alarmgen_list(self):
return self._args.alarmgen_list
def collectors(self):
return self._args.collectors
def kafka_broker_list(self):
return self._args.kafka_broker_list
def zk_list(self):
return self._args.zk_list;
def log_local(self):
return self._args.log_local
def log_category(self):
return self._args.log_category
def log_level(self):
return self._args.log_level
def log_file(self):
return self._args.log_file
def use_syslog(self):
return self._args.use_syslog
def syslog_facility(self):
return self._args.syslog_facility
def http_port(self):
return self._args.http_server_port
def worker_id(self):
return self._args.worker_id
def partitions(self):
return self._args.partitions
def redis_password(self):
return self._args.redis_password
def redis_server_port(self):
return self._args.redis_server_port
def host_ip(self):
return self._args.host_ip
def kafka_prefix(self):
return self._args.cluster_id
def rabbitmq_params(self):
return {'servers': self._args.rabbitmq_server_list,
'port': self._args.rabbitmq_port,
'user': self._args.rabbitmq_user,
'password': self._args.rabbitmq_password,
'vhost': self._args.rabbitmq_vhost,
'ha_mode': self._args.rabbitmq_ha_mode,
'use_ssl': self._args.rabbitmq_use_ssl,
'ssl_version': self._args.kombu_ssl_version,
'ssl_keyfile': self._args.kombu_ssl_keyfile,
'ssl_certfile': self._args.kombu_ssl_certfile,
'ssl_ca_certs': self._args.kombu_ssl_ca_certs}
def cassandra_params(self):
return {'servers': self._args.config_db_server_list,
'user': self._args.config_db_username,
'password': self._args.config_db_password,
'cluster_id': self._args.cluster_id}
# end cassandra_params
def sandesh_config(self):
return SandeshConfig.from_parser_arguments(self._args)
| nischalsheth/contrail-controller | src/opserver/alarmgen_cfg.py | Python | apache-2.0 | 11,029 |
<?php
// This file is part of Moodle - http://moodle.org/
//
// Moodle is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Moodle is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Moodle. If not, see <http://www.gnu.org/licenses/>.
/**
* Strings for component 'book', language 'az', branch 'MOODLE_22_STABLE'
*
* @package book
* @copyright 1999 onwards Martin Dougiamas {@link http://moodle.com}
* @license http://www.gnu.org/copyleft/gpl.html GNU GPL v3 or later
*/
defined('MOODLE_INTERNAL') || die();
$string['addafter'] = 'Yeni fəsil əlavə edin';
$string['book:edit'] = 'Kitabın fəsillərini redaktə edin';
$string['book:read'] = 'Kitabı oxuyun';
$string['book:viewhiddenchapters'] = 'Kitabın gizli fəsillərinə baxış keçirilsin';
$string['chapterscount'] = 'Fəsillərin sayı';
$string['chaptertitle'] = 'Fəsillərin adı';
$string['confchapterdelete'] = 'Siz bu fəsili pozmağa əminsiniz?';
$string['confchapterdeleteall'] = 'Siz bu fəsili və onun məzmununu silməkdə əminsiniz?';
$string['content'] = 'Məzmun';
$string['customtitles'] = 'Qeyri-standart başlıqlar';
$string['customtitles_help'] = 'Fəsillərin qeyd olunmuş adları yalnız başlıqlarda göstərilmişdir';
$string['editingchapter'] = 'Fəslin redaktəsi';
$string['errorchapter'] = 'Kitabın fəsiləsinin oxunması zamanı səhv baş vermişdir.';
$string['faq'] = 'FAQ kitabçası (tez-tez verilən suallar)';
$string['faq_help'] = '* Nə üçün yalnız iki səviyyə? *
Adətən iki səviyyə bütün kitablar üçün kifayətdir. Üç səviyyə sənədin strukturunun korlanmasına səbəb ola bilər. Kitab modulu kiçik həcmli çoxsəhifəli tədris vəsaitinin yaradılması üçün nəzərdə tutulmuşdur. Daha böyük həcmli sənədlər üçün PDF formatından istifadə etmək məsləhət görülür. PDF fayllarının yaradılmasının ən asan yolu virtual printerlərdən istifadə etməkdir (bax. <a href="http://sector7g.wurzel6.de/pdfcreator/index_en.htm" target="_blank">PDFCreator</a>, <a href="http://fineprint.com/products/pdffactory/index.html" target="_blank">PDFFactory</a>, <a href="http://www.adobe.com/products/acrobatstd/main.html" target="_blank">Adobe Acrobat</a>, və s.).
* Tələbə kitabı redaktə edə bilərmi? *
Kitabları yalnız müəllimlər yarada və redaktə edə bilər. Hələ ki, tələbələrə bu imkanı vermək planlaşdırılmayıb. Bunun əsas səbəbi Kitab modulunun mümkün dərəcədə sadə saxlanılmasıdır.
* Mən kitab daxilində axtarış edə bilərəmmi? *
Hal-hazırda yalnız bir üsul mövcuddur - "çap üçün" səhifəsindən, brauzerin imkanlarından istifadə edərək axtarış aparmaq olar. Qobal axtarış hələ ki, yalnız Moodle sisteminin forumlarında mümkündür. Kitab modulu da daxil olmaqla bütün resurslarda axtarış aparmaq imkanı da olsaydı pis olmazdı. Könnüllü vardırmı?
* Fəsilin adı bir Başlıq sətirinə yerləşmir *
Adı qısaltmağa çalışın və ya administratordan Başlıq sətirini genişləndirməyi xahiş edin.O yalnız modul konfiqurasiyası səhifəsindəki bütün kitablar üçün qlobal olaraq təyin olunur.';
$string['modulename'] = 'Kitab';
$string['modulename_help'] = 'Kitab sadə çoxsəhifəli tədris vəsaitidir.';
$string['modulenameplural'] = 'Kitablar';
$string['navexit'] = 'Kitabdan çıxış';
$string['navnext'] = 'Nəvbəti';
$string['navprev'] = 'Əvvəlki';
$string['numbering'] = 'Fəsillərin nömrələnməsi';
$string['numbering0'] = 'Yoxdur';
$string['numbering1'] = 'Nömrələr';
$string['numbering2'] = 'Markerlər';
$string['numbering3'] = 'Boşluq';
$string['numbering_help'] = '*Yoxdur - fəsillər və altbölmələrə nömrələmə və formatlama şamil olunmur. Sizin fəsilələrinizin adlarında artıq nömrələnmə vardırsa bu üsuldan istifadə edin. Məsələn, "1. Birinci fəsil", "1.a Birinci mövzu", ... .
*Nömrələr - fəsillər və altböllmələr rəqəmlərlə nömrələnir (1, 1.1, 2, 2,...).
*Boşluq - altbölmələr boşluqlarla əks olunur.';
$string['numberingoptions'] = 'Mümkün nömrələmə parametrləri';
$string['numberingoptions_help'] = 'Yeni kitablar yaradılarkən əlyetərli olmalı nömrələmə parametrlərini seçin.';
$string['pluginadministration'] = 'Kitab administrasiyası';
$string['pluginname'] = 'Kitab';
$string['subchapter'] = 'Altfəsilələr';
$string['toc'] = 'Mündəricat';
$string['top'] = 'yuxar';
| carnegiespeech/translations | az/book.php | PHP | apache-2.0 | 4,904 |
package com.coderli.shurnim.storage.plugin;
import java.io.File;
/**
* 插件资源<br>
* 定义了插件的描述信息以及对应的处理器信息(例如: 文件解析器。{@code PluginParser})
*
* @author OneCoder
* @date 2014年4月20日 下午8:53:17
* @website http://www.coderli.com
*/
public interface PluginResource {
/**
* 获取对应的配置文件解析器
*
* @return
* @author OneCoder
* @date 2014年4月20日 下午8:56:32
*/
FileParser getFileParser();
/**
* 获取实际的文件资源
*
* @return
* @author OneCoder
* @date 2014年4月20日 下午8:58:37
*/
File getConfigFile();
}
| lihongzheshuai/shurnim-storage | src/main/java/com/coderli/shurnim/storage/plugin/PluginResource.java | Java | apache-2.0 | 648 |
package migrations
import (
"github.com/fnproject/fn/api/datastore/sql/migratex"
)
// Migrations is the list of fn specific sql migrations to run
var Migrations []migratex.Migration
func vfunc(v int64) func() int64 { return func() int64 { return v } }
| fnproject/fn | api/datastore/sql/migrations/migs.go | GO | apache-2.0 | 256 |
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Diagnostics;
using System.Linq;
// using System.ServiceProcess;
using System.Text;
using System.Threading;
using System.Security.Cryptography.X509Certificates;
using System.ServiceModel;
using System.ServiceModel.Description;
using System.Xml;
using Microsoft.Win32;
using dp2Library;
using DigitalPlatform;
using DigitalPlatform.IO;
using DigitalPlatform.Text;
namespace dp2LibraryXE
{
public class LibraryHost : HostBase
{
public static string default_miniserver_urls = "http://localhost:8001/dp2library/xe;net.pipe://localhost/dp2library/xe;rest.http://localhost/dp2library/xe/rest";
public static string default_single_url = "net.pipe://localhost/dp2library/xe";
// ServiceHost _host = null;
List<ServiceHost> m_hosts = new List<ServiceHost>();
public string HostUrl = default_single_url; // "net.pipe://localhost/dp2library/xe";
public override void ThreadMethod()
{
string strError = "";
_running = true;
int nRet = Start(this.DataDir, out strError);
if (nRet == -1)
{
this.ErrorInfo = strError;
// this._host = null;
this.m_hosts.Clear();
}
this._eventStarted.Set();
while (_running)
{
Thread.Sleep(100);
}
this.CloseHosts();
this._thread = null;
this._eventClosed.Set();
}
#if NO
void CloseHosts()
{
if (this._host != null)
{
HostInfo info = _host.Extensions.Find<HostInfo>();
if (info != null)
{
info.Dispose();
_host.Extensions.Remove(info);
}
_host.Close();
_host = null;
}
}
#endif
public override void CloseHosts()
{
foreach (ServiceHost host in this.m_hosts)
{
HostInfo info = host.Extensions.Find<HostInfo>();
if (info != null)
{
host.Extensions.Remove(info);
info.Dispose();
}
host.Close();
}
this.m_hosts.Clear();
}
int Start(string strDataDir,
out string strError)
{
strError = "";
CloseHosts();
List<string> urls = StringUtil.SplitList(this.HostUrl, ';');
ServiceHost host = new ServiceHost(typeof(LibraryService));
this.m_hosts.Add(host);
HostInfo info = new HostInfo();
info.DataDir = strDataDir;
host.Extensions.Add(info);
bool bHasWsHttp = false;
int i = 0;
foreach (string strTempUrl in urls)
{
string strUrl = strTempUrl.Trim();
if (string.IsNullOrEmpty(strUrl) == true)
continue;
///
// 绑定协议
Uri uri = null;
try
{
uri = new Uri(strUrl);
}
catch (Exception ex)
{
strError = "dp2Library OnStart() 警告:发现不正确的协议URL '" + strUrl + "' (异常信息: " + ex.Message + ")。该URL已被放弃绑定。";
return -1;
}
if (uri.Scheme.ToLower() == "net.pipe")
{
host.AddServiceEndpoint(typeof(ILibraryService),
CreateNamedpipeBinding0(),
strUrl);
}
else if (uri.Scheme.ToLower() == "net.tcp")
{
host.AddServiceEndpoint(typeof(ILibraryService),
CreateNetTcpBinding0(),
strUrl);
}
else if (uri.Scheme.ToLower() == "http")
{
ServiceEndpoint endpoint = host.AddServiceEndpoint(typeof(ILibraryService),
CreateWsHttpBinding1(),
strUrl);
bHasWsHttp = true;
}
else if (uri.Scheme.ToLower() == "rest.http")
{
ServiceEndpoint endpoint = host.AddServiceEndpoint(typeof(ILibraryServiceREST),
CreateWebHttpBinding1(),
strUrl.Substring(5)); // rest. 这几个字符要去掉
if (endpoint.Behaviors.Find<WebHttpBehavior>() == null)
{
WebHttpBehavior behavior = new WebHttpBehavior();
behavior.DefaultBodyStyle = System.ServiceModel.Web.WebMessageBodyStyle.Wrapped;
behavior.DefaultOutgoingResponseFormat = System.ServiceModel.Web.WebMessageFormat.Json;
behavior.AutomaticFormatSelectionEnabled = true;
behavior.HelpEnabled = true;
endpoint.Behaviors.Add(behavior);
}
}
else
{
// 警告不能支持的协议
strError = "dp2Library OnStart() 警告:发现不能支持的协议类型 '" + strUrl + "'";
return -1;
}
info.Protocol = uri.Scheme.ToLower();
// 只有第一个host才有metadata能力
if (// i == 0 //
uri.Scheme.ToLower() == "http"
&& host.Description.Behaviors.Find<ServiceMetadataBehavior>() == null)
{
string strMetadataUrl = strUrl; // "http://localhost:8001/dp2library/xe/";
if (strMetadataUrl[strMetadataUrl.Length - 1] != '/')
strMetadataUrl += "/";
strMetadataUrl += "metadata";
ServiceMetadataBehavior behavior = new ServiceMetadataBehavior();
behavior.HttpGetEnabled = true;
behavior.HttpGetUrl = new Uri(strMetadataUrl);
host.Description.Behaviors.Add(behavior);
this.MetadataUrl = strMetadataUrl;
}
i++;
}
// 如果具有ws1/ws2 binding,才启用证书
if (bHasWsHttp == true)
{
try
{
string strCertSN = "";
X509Certificate2 cert = GetCertificate(strCertSN,
out strError);
if (cert == null)
{
strError = "dp2Library OnStart() 准备证书 时发生错误: " + strError;
return -1;
}
else
host.Credentials.ServiceCertificate.Certificate = cert;
}
catch (Exception ex)
{
strError = "dp2Library OnStart() 获取证书时发生错误: " + ExceptionUtil.GetExceptionMessage(ex);
return -1;
}
}
if (host.Description.Behaviors.Find<ServiceThrottlingBehavior>() == null)
{
ServiceThrottlingBehavior behavior = new ServiceThrottlingBehavior();
behavior.MaxConcurrentCalls = 50;
behavior.MaxConcurrentInstances = 1000;
behavior.MaxConcurrentSessions = 1000;
host.Description.Behaviors.Add(behavior);
}
// IncludeExceptionDetailInFaults
ServiceDebugBehavior debug_behavior = host.Description.Behaviors.Find<ServiceDebugBehavior>();
if (debug_behavior == null)
{
host.Description.Behaviors.Add(new ServiceDebugBehavior() { IncludeExceptionDetailInFaults = true });
}
else
{
if (debug_behavior.IncludeExceptionDetailInFaults == false)
debug_behavior.IncludeExceptionDetailInFaults = true;
}
host.Opening += new EventHandler(host_Opening);
host.Closing += new EventHandler(m_host_Closing);
try
{
host.Open();
}
catch (Exception ex)
{
string strInstanceName = "";
strError = "dp2Library OnStart() host.Open() 时发生错误: instancename=[" + strInstanceName + "]:" + ExceptionUtil.GetExceptionMessage(ex);
return -1;
}
#if NO
strError = "test error";
return -1;
#endif
return 0;
}
void host_Opening(object sender, EventArgs e)
{
}
void m_host_Closing(object sender, EventArgs e)
{
#if NO
if (this._host != null)
{
HostInfo info = _host.Extensions.Find<HostInfo>();
if (info != null)
{
info.Dispose();
_host.Extensions.Remove(info);
}
}
#endif
foreach (ServiceHost host in this.m_hosts)
{
HostInfo info = host.Extensions.Find<HostInfo>();
if (info != null)
{
info.Dispose();
host.Extensions.Remove(info);
}
}
}
public void SetTestMode(bool bTestMode)
{
foreach (ServiceHost host in this.m_hosts)
{
HostInfo info = host.Extensions.Find<HostInfo>();
if (info != null)
{
info.TestMode = bTestMode;
if (info.App != null)
info.App.TestMode = bTestMode;
}
}
}
public void SetMaxClients(int nMaxClients)
{
foreach (ServiceHost host in this.m_hosts)
{
HostInfo info = host.Extensions.Find<HostInfo>();
if (info != null)
{
info.MaxClients = nMaxClients;
if (info.App != null)
info.App.MaxClients = nMaxClients;
}
}
}
public void SetLicenseType(string strLicenseType)
{
foreach (ServiceHost host in this.m_hosts)
{
HostInfo info = host.Extensions.Find<HostInfo>();
if (info != null)
{
info.LicenseType = strLicenseType;
if (info.App != null)
info.App.LicenseType = strLicenseType;
}
}
}
public void SetFunction(string strFunction)
{
foreach (ServiceHost host in this.m_hosts)
{
HostInfo info = host.Extensions.Find<HostInfo>();
if (info != null)
{
info.Function = strFunction;
if (info.App != null)
info.App.Function = strFunction;
}
}
}
static void SetTimeout(System.ServiceModel.Channels.Binding binding)
{
binding.SendTimeout = new TimeSpan(0, 20, 0);
binding.ReceiveTimeout = new TimeSpan(0, 20, 0); // 决定Session存活
binding.CloseTimeout = new TimeSpan(0, 20, 0);
binding.OpenTimeout = new TimeSpan(0, 20, 0);
}
// np0: namedpipe
System.ServiceModel.Channels.Binding CreateNamedpipeBinding0()
{
NetNamedPipeBinding binding = new NetNamedPipeBinding();
binding.Namespace = "http://dp2003.com/dp2library/";
binding.Security.Mode = NetNamedPipeSecurityMode.None;
binding.MaxReceivedMessageSize = 1024 * 1024;
// binding.MessageEncoding = WSMessageEncoding.Mtom;
XmlDictionaryReaderQuotas quotas = new XmlDictionaryReaderQuotas();
quotas.MaxArrayLength = 1024 * 1024;
quotas.MaxStringContentLength = 1024 * 1024;
binding.ReaderQuotas = quotas;
SetTimeout(binding);
// binding.ReliableSession.Enabled = false;
return binding;
}
// nt0: net.tcp
System.ServiceModel.Channels.Binding CreateNetTcpBinding0()
{
NetTcpBinding binding = new NetTcpBinding();
binding.Namespace = "http://dp2003.com/dp2library/";
binding.Security.Mode = SecurityMode.None;
binding.MaxReceivedMessageSize = 1024 * 1024;
// binding.MessageEncoding = WSMessageEncoding.Mtom;
XmlDictionaryReaderQuotas quotas = new XmlDictionaryReaderQuotas();
quotas.MaxArrayLength = 1024 * 1024;
quotas.MaxStringContentLength = 1024 * 1024;
binding.ReaderQuotas = quotas;
SetTimeout(binding);
binding.ReliableSession.InactivityTimeout = new TimeSpan(0, 20, 0);
// binding.ReliableSession.Enabled = false;
return binding;
}
// ws1: anonymouse -- ClientCredentitialType = None
System.ServiceModel.Channels.Binding CreateWsHttpBinding1()
{
WSHttpBinding binding = new WSHttpBinding();
binding.Namespace = "http://dp2003.com/dp2library/";
binding.Security.Mode = SecurityMode.Message;
#if !USERNAME
binding.Security.Message.ClientCredentialType = MessageCredentialType.None;
#else
binding.Security.Message.ClientCredentialType = MessageCredentialType.UserName;
#endif
binding.MaxReceivedMessageSize = 1024 * 1024;
binding.MessageEncoding = WSMessageEncoding.Mtom;
XmlDictionaryReaderQuotas quotas = new XmlDictionaryReaderQuotas();
quotas.MaxArrayLength = 1024 * 1024;
quotas.MaxStringContentLength = 1024 * 1024;
binding.ReaderQuotas = quotas;
SetTimeout(binding);
binding.ReliableSession.InactivityTimeout = new TimeSpan(0, 20, 0);
// binding.ReliableSession.Enabled = false;
binding.ReliableSession.InactivityTimeout = new TimeSpan(0, 20, 0);
return binding;
}
System.ServiceModel.Channels.Binding CreateWebHttpBinding1()
{
WebHttpBinding binding = new WebHttpBinding();
binding.Namespace = "http://dp2003.com/dp2library/";
binding.Security.Mode = WebHttpSecurityMode.None;
// binding.Security.Message.ClientCredentialType = MessageCredentialType.None;
binding.MaxReceivedMessageSize = 1024 * 1024;
// binding.MessageEncoding = WSMessageEncoding.Mtom;
XmlDictionaryReaderQuotas quotas = new XmlDictionaryReaderQuotas();
quotas.MaxArrayLength = 1024 * 1024;
quotas.MaxStringContentLength = 1024 * 1024;
binding.ReaderQuotas = quotas;
SetTimeout(binding);
// binding.ReliableSession.InactivityTimeout = new TimeSpan(0, 20, 0);
// binding.ReliableSession.InactivityTimeout = new TimeSpan(0, 20, 0);
return binding;
}
static X509Certificate2 FindCertificate(
StoreLocation location, StoreName name,
X509FindType findType, string findValue)
{
X509Store store = new X509Store(name, location);
try
{
// create and open store for read-only access
store.Open(OpenFlags.ReadOnly);
// search store
X509Certificate2Collection col = store.Certificates.Find(
findType, findValue, false);
if (col.Count == 0)
return null;
// return first certificate found
return col[0];
}
// always close the store
finally { store.Close(); }
}
X509Certificate2 GetCertificate(
string strCertSN,
out string strError)
{
strError = "";
/*
string strCertSN = GetProductString(
"dp2Library",
"cert_sn");
* */
if (string.IsNullOrEmpty(strCertSN) == false)
{
X509Certificate2 cert = FindCertificate(
StoreLocation.LocalMachine,
StoreName.Root,
X509FindType.FindBySerialNumber,
strCertSN);
if (cert == null)
{
strError = "序列号为 '" + strCertSN + "' 的证书在 StoreLocation.LocalMachine | StoreLocation.CurrentUser / StoreName.Root 中不存在。";
return null;
}
return cert;
}
// 缺省的SubjectName为DigitalPlatform的证书
string strCurrentDir = System.Reflection.Assembly.GetExecutingAssembly().Location; // Environment.CurrentDirectory;
strCurrentDir = PathUtil.PathPart(strCurrentDir);
string strCerFilename = PathUtil.MergePath(strCurrentDir, "digitalplatform.pfx");
return new X509Certificate2(strCerFilename, "setupdp2");
}
}
}
| renyh1013/dp2 | dp2LibraryXE/LibraryHost.cs | C# | apache-2.0 | 17,502 |
/*
* Copyright 2013 Netherlands eScience Center
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.esciencecenter.xenon.filesystems;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import nl.esciencecenter.xenon.InvalidCredentialException;
import nl.esciencecenter.xenon.InvalidLocationException;
import nl.esciencecenter.xenon.InvalidPropertyException;
import nl.esciencecenter.xenon.UnknownAdaptorException;
import nl.esciencecenter.xenon.UnknownPropertyException;
import nl.esciencecenter.xenon.UnsupportedOperationException;
import nl.esciencecenter.xenon.XenonException;
import nl.esciencecenter.xenon.adaptors.AdaptorLoader;
import nl.esciencecenter.xenon.adaptors.NotConnectedException;
import nl.esciencecenter.xenon.adaptors.XenonProperties;
import nl.esciencecenter.xenon.adaptors.filesystems.FileAdaptor;
import nl.esciencecenter.xenon.credentials.Credential;
import nl.esciencecenter.xenon.credentials.DefaultCredential;
import nl.esciencecenter.xenon.utils.DaemonThreadFactory;
/**
* FileSystem represent a (possibly remote) file system that can be used to access data.
*/
public abstract class FileSystem implements AutoCloseable {
private static FileAdaptor getAdaptorByName(String adaptorName) throws UnknownAdaptorException {
return AdaptorLoader.getFileAdaptor(adaptorName);
}
/**
* Gives a list names of the available adaptors.
*
* @return the list
*/
public static String[] getAdaptorNames() {
return AdaptorLoader.getFileAdaptorNames();
}
/**
* Gives the description of the adaptor with the given name.
*
* @param adaptorName
* the type of file system to connect to (e.g. "sftp" or "webdav")
* @return the description
* @throws UnknownAdaptorException
* If the adaptor name is absent in {@link #getAdaptorNames()}.
*/
public static FileSystemAdaptorDescription getAdaptorDescription(String adaptorName) throws UnknownAdaptorException {
return getAdaptorByName(adaptorName);
}
/**
* Gives a list of the descriptions of the available adaptors.
*
* @return the list
*/
public static FileSystemAdaptorDescription[] getAdaptorDescriptions() {
return AdaptorLoader.getFileAdaptorDescriptions();
}
/**
* CopyStatus contains status information for a specific copy operation.
*/
static class CopyStatusImplementation implements CopyStatus {
private final String copyIdentifier;
private final String state;
private final XenonException exception;
private final long bytesToCopy;
private final long bytesCopied;
public CopyStatusImplementation(String copyIdentifier, String state, long bytesToCopy, long bytesCopied, XenonException exception) {
super();
this.copyIdentifier = copyIdentifier;
this.state = state;
this.bytesToCopy = bytesToCopy;
this.bytesCopied = bytesCopied;
this.exception = exception;
}
@Override
public String getCopyIdentifier() {
return copyIdentifier;
}
@Override
public String getState() {
return state;
}
@Override
public XenonException getException() {
return exception;
}
@Override
public void maybeThrowException() throws XenonException {
if (hasException()) {
throw getException();
}
}
@Override
public boolean isRunning() {
return "RUNNING".equals(state);
}
@Override
public boolean isDone() {
return "DONE".equals(state) || "FAILED".equals(state);
}
@Override
public boolean hasException() {
return exception != null;
}
@Override
public long bytesToCopy() {
return bytesToCopy;
}
@Override
public long bytesCopied() {
return bytesCopied;
}
@Override
public String toString() {
return "CopyStatus [copyIdentifier=" + copyIdentifier + ", state=" + state + ", exception=" + exception + ", bytesToCopy=" + bytesToCopy
+ ", bytesCopied=" + bytesCopied + "]";
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
CopyStatusImplementation that = (CopyStatusImplementation) o;
return bytesToCopy == that.bytesToCopy && bytesCopied == that.bytesCopied && Objects.equals(copyIdentifier, that.copyIdentifier)
&& Objects.equals(state, that.state) && Objects.equals(exception, that.exception);
}
@Override
public int hashCode() {
return Objects.hash(copyIdentifier, state, exception, bytesToCopy, bytesCopied);
}
}
/**
* Create a new FileSystem using the <code>adaptor</code> that connects to a data store at <code>location</code> using the <code>credentials</code> to get
* access. Use <code>properties</code> to (optionally) configure the FileSystem when it is created.
*
* Make sure to always close {@code FileSystem} instances by calling {@code close(FileSystem)} when you no longer need them, otherwise their associated
* resources remain allocated.
*
* @see <a href="../../../../overview-summary.html#filesystems">Documentation on the supported adaptors and locations.</a>
*
* @param adaptor
* the type of file system to connect to (e.g. "sftp" or "webdav")
* @param location
* the location of the FileSystem.
* @param credential
* the Credentials to use to get access to the FileSystem.
* @param properties
* optional properties to use when creating the FileSystem.
*
* @return the new FileSystem.
*
* @throws UnknownPropertyException
* If a unknown property was provided.
* @throws InvalidPropertyException
* If a known property was provided with an invalid value.
* @throws UnknownAdaptorException
* If the adaptor was invalid.
* @throws InvalidLocationException
* If the location was invalid.
* @throws InvalidCredentialException
* If the credential is invalid to access the location.
*
* @throws XenonException
* If the creation of the FileSystem failed.
* @throws IllegalArgumentException
* If adaptor is null.
*/
public static FileSystem create(String adaptor, String location, Credential credential, Map<String, String> properties) throws XenonException {
return getAdaptorByName(adaptor).createFileSystem(location, credential, properties);
}
/**
* Create a new FileSystem using the <code>adaptor</code> that connects to a data store at <code>location</code> using the <code>credentials</code> to get
* access.
*
* Make sure to always close {@code FileSystem} instances by calling {@code close(FileSystem)} when you no longer need them, otherwise their associated
* resources remain allocated.
*
* @see <a href="../../../../overview-summary.html#filesystems">Documentation on the supported adaptors and locations.</a>
*
* @param adaptor
* the type of file system to connect to (e.g. "sftp" or "webdav")
* @param location
* the location of the FileSystem.
* @param credential
* the Credentials to use to get access to the FileSystem.
*
* @return the new FileSystem.
*
* @throws UnknownPropertyException
* If a unknown property was provided.
* @throws InvalidPropertyException
* If a known property was provided with an invalid value.
* @throws UnknownAdaptorException
* If the adaptor was invalid.
* @throws InvalidLocationException
* If the location was invalid.
* @throws InvalidCredentialException
* If the credential is invalid to access the location.
* @throws XenonException
* If the creation of the FileSystem failed.
* @throws IllegalArgumentException
* If adaptor is null.
*/
public static FileSystem create(String adaptor, String location, Credential credential) throws XenonException {
return create(adaptor, location, credential, new HashMap<>(0));
}
/**
* Create a new FileSystem using the <code>adaptor</code> that connects to a data store at <code>location</code> using the default credentials to get
* access.
*
* Make sure to always close {@code FileSystem} instances by calling {@code close(FileSystem)} when you no longer need them, otherwise their associated
* resources remain allocated.
*
* @see <a href="../../../../overview-summary.html#filesystems">Documentation on the supported adaptors and locations.</a>
*
* @param adaptor
* the type of file system to connect to (e.g. "sftp" or "webdav")
* @param location
* the location of the FileSystem.
*
* @return the new FileSystem.
*
* @throws UnknownPropertyException
* If a unknown property was provided.
* @throws InvalidPropertyException
* If a known property was provided with an invalid value.
* @throws UnknownAdaptorException
* If the adaptor was invalid.
* @throws InvalidLocationException
* If the location was invalid.
* @throws InvalidCredentialException
* If the credential is invalid to access the location.
*
* @throws XenonException
* If the creation of the FileSystem failed.
* @throws IllegalArgumentException
* If adaptor is null.
*/
public static FileSystem create(String adaptor, String location) throws XenonException {
return create(adaptor, location, new DefaultCredential());
}
/**
* Create a new FileSystem using the <code>adaptor</code> that connects to a data store at the default location using the default credentials to get access.
*
* Note that there are very few filesystem adaptors that support a default location. The local filesystem adaptor is the prime example.
*
* Make sure to always close {@code FileSystem} instances by calling {@code close(FileSystem)} when you no longer need them, otherwise their associated
* resources remain allocated.
*
* @see <a href="overview-summary.html#filesystems">Documentation on the supported adaptors and locations.</a>
*
* @param adaptor
* the type of file system to connect to (e.g. "sftp" or "webdav")
*
* @return the new FileSystem.
*
* @throws UnknownPropertyException
* If a unknown property was provided.
* @throws InvalidPropertyException
* If a known property was provided with an invalid value.
* @throws UnknownAdaptorException
* If the adaptor was invalid.
* @throws InvalidLocationException
* If the location was invalid.
* @throws InvalidCredentialException
* If the credential is invalid to access the location.
*
* @throws XenonException
* If the creation of the FileSystem failed.
* @throws IllegalArgumentException
* If adaptor is null.
*/
public static FileSystem create(String adaptor) throws XenonException {
return create(adaptor, null);
}
class CopyCallback {
private long bytesToCopy = 0;
private long bytesCopied = 0;
private boolean started = false;
private boolean cancelled = false;
synchronized void start(long bytesToCopy) {
if (!started) {
started = true;
this.bytesToCopy = bytesToCopy;
}
}
synchronized boolean isStarted() {
return started;
}
synchronized long getBytesCopied() {
return bytesCopied;
}
synchronized long getBytesToCopy() {
return bytesToCopy;
}
synchronized void addBytesCopied(long bytes) {
this.bytesCopied += bytes;
}
synchronized void cancel() {
cancelled = true;
}
synchronized boolean isCancelled() {
return cancelled;
}
}
private class PendingCopy {
Future<Void> future;
CopyCallback callback;
public PendingCopy(Future<Void> future, CopyCallback callback) {
super();
this.future = future;
this.callback = callback;
}
}
private final String uniqueID;
private final String adaptor;
private final String location;
private final Credential credential;
private final XenonProperties properties;
private final ExecutorService pool;
private Path workingDirectory;
private long nextCopyID = 0;
private int bufferSize;
private final HashMap<String, PendingCopy> pendingCopies = new HashMap<>();
protected FileSystem(String uniqueID, String adaptor, String location, Credential credential, Path workDirectory, int bufferSize,
XenonProperties properties) {
if (uniqueID == null) {
throw new IllegalArgumentException("Identifier may not be null!");
}
if (adaptor == null) {
throw new IllegalArgumentException("Adaptor may not be null!");
}
if (location == null) {
throw new IllegalArgumentException("Location may not be null!");
}
if (credential == null) {
throw new IllegalArgumentException("Credential may not be null!");
}
if (workDirectory == null) {
throw new IllegalArgumentException("EntryPath may not be null!");
}
if (bufferSize <= 0) {
throw new IllegalArgumentException("Buffer size may not be 0 or smaller!");
}
this.uniqueID = uniqueID;
this.adaptor = adaptor;
this.location = location;
this.credential = credential;
this.workingDirectory = workDirectory;
this.properties = properties;
this.bufferSize = bufferSize;
this.pool = Executors.newFixedThreadPool(1, new DaemonThreadFactory("CopyThread." + uniqueID));
}
protected int getBufferSize() {
return bufferSize;
}
private synchronized String getNextCopyID() {
return "COPY-" + getAdaptorName() + "-" + nextCopyID++;
}
/**
* Get the name of the adaptor that created this FileSystem.
*
* @return the name of the adaptor.
*/
public String getAdaptorName() {
return adaptor;
}
/**
* Get the location of the FileSystem.
*
* @return the location of the FileSystem.
*/
public String getLocation() {
return location;
}
/**
* Get the credential that this FileSystem is using.
*
* @return the credential this FileSystem is using.
*/
public Credential getCredential() {
return credential;
}
/**
* Get the properties used to create this FileSystem.
*
* @return the properties used to create this FileSystem.
*/
public Map<String, String> getProperties() {
return properties.toMap();
}
/**
* Get the current working directory of this file system.
*
* All relative paths provided to FileSystem methods are resolved against this current working directory.
*
* The current working directory is set when a FileSystem is created using the path specified in the location. If no path is specified in the location, an
* adaptor specific default path is used, for example <code>"/home/username"</code>.
*
* @return the current working directory of this file system.
*/
public Path getWorkingDirectory() {
return workingDirectory;
}
/**
* Get the path separator used by this file system.
*
* The path separator is set when a FileSystem is created.
*
* @return the path separator used by this file system.
*/
public String getPathSeparator() {
return "" + workingDirectory.getSeparator();
}
/**
* Set the current working directory of this file system to <code>directory</code>.
*
* The provided <code>directory</code> must exist and be a directory. Both an absolute or relative path may be provided. In the latter case, the path will
* be resolved against the current working directory.
*
* @param directory
* a path to which the current working directory must be set.
* @throws NoSuchPathException
* if the <code>directory</code> does not exist
* @throws InvalidPathException
* if <code>directory</code> is not a directory
* @throws NotConnectedException
* if file system is closed.
* @throws IllegalArgumentException
* if the argument is null.
* @throws XenonException
* if an I/O error occurred
*/
public void setWorkingDirectory(Path directory) throws XenonException {
Path wd = toAbsolutePath(directory);
assertDirectoryExists(wd);
workingDirectory = wd;
}
/**
* Close this FileSystem. If the adaptor does not support closing this is a no-op.
*
* @throws XenonException
* If the FileSystem failed to close or if an I/O error occurred.
*/
public void close() throws XenonException {
try {
pool.shutdownNow();
} catch (Exception e) {
throw new XenonException(getAdaptorName(), "Failed to cleanly shutdown copy thread pool");
}
}
/**
* Return if the connection to the FileSystem is open. An adaptor which does not support closing is always open.
*
* @throws XenonException
* if the test failed or an I/O error occurred.
* @return if the connection to the FileSystem is open.
*/
public abstract boolean isOpen() throws XenonException;
/**
* Rename an existing source path to a non-existing target path (optional operation).
* <p>
*
* This method only implements a <em>rename</em> operation, not a <em>move</em> operation. Hence, this method will not copy files and should return (almost)
* instantaneously.
*
* The parent of the target path (e.g. <code>target.getParent</code>) must exist.
*
* If the target is equal to the source this method has no effect.
*
* If the source is a link, the link itself will be renamed, not the path to which it refers.
*
* If the source is a directory, it will be renamed to the target. This implies that a moving a directory between physical locations may fail.
* </p>
*
* @param source
* the existing source path.
* @param target
* the non existing target path.
*
* @throws UnsupportedOperationException
* If the adapter does not support renaming.
* @throws NoSuchPathException
* If the source file does not exist or the target parent directory does not exist.
* @throws PathAlreadyExistsException
* If the target file already exists.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* If the move failed.
* @throws IllegalArgumentException
* If one or both of the arguments are null.
*/
public abstract void rename(Path source, Path target) throws XenonException;
/**
* Creates a new directory, failing if the directory already exists. All nonexistent parent directories are also created.
*
* @param dir
* the directory to create.
*
* @throws PathAlreadyExistsException
* If the directory already exists or if a parent directory could not be created because a file with the same name already exists.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* If an I/O error occurred.
* @throws IllegalArgumentException
* If one or both of the arguments are null.
*/
public void createDirectories(Path dir) throws XenonException {
Path absolute = toAbsolutePath(dir);
Path parent = absolute.getParent();
if (parent != null && !exists(parent)) {
// Recursive call
createDirectories(parent);
}
createDirectory(absolute);
}
/**
* Creates a new directory, failing if the directory already exists.
*
* The parent directory of the file must already exists.
*
* @param dir
* the directory to create.
*
* @throws PathAlreadyExistsException
* If the directory already exists.
* @throws NoSuchPathException
* If the parent directory does not exist.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* If an I/O error occurred.
* @throws IllegalArgumentException
* If the argument is null.
*
*/
public abstract void createDirectory(Path dir) throws XenonException;
/**
* Creates a new empty file, failing if the file already exists.
*
* The parent directory of the file must already exists.
*
* @param file
* a path referring to the file to create.
*
* @throws PathAlreadyExistsException
* If the file already exists.
* @throws NoSuchPathException
* If the parent directory does not exist.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* If an I/O error occurred.
* @throws IllegalArgumentException
* If one or both of the arguments are null.
*/
public abstract void createFile(Path file) throws XenonException;
/**
* Creates a new symbolic link, failing if the link already exists (optional operation).
*
* The target is taken as is. It may be absolute, relative path and/or non-normalized path and may or may not exist.
*
* @param link
* the symbolic link to create.
* @param target
* the target the symbolic link should refer to.
*
* @throws PathAlreadyExistsException
* If the link already exists.
* @throws NoSuchPathException
* If the target or parent directory of link does not exist
* @throws InvalidPathException
* If parent of link is not a directory
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* If an I/O error occurred.
* @throws IllegalArgumentException
* If one or both of the arguments are null.
*/
public abstract void createSymbolicLink(Path link, Path target) throws XenonException;
/**
* Deletes an existing path.
*
* If path is a symbolic link the symbolic link is removed and the symbolic link's target is not deleted.
*
* If the path is a directory and <code>recursive</code> is set to true, the contents of the directory will also be deleted. If <code>recursive</code> is
* set to <code>false</code>, a directory will only be removed if it is empty.
*
* @param path
* the path to delete.
* @param recursive
* if the delete must be done recursively
* @throws DirectoryNotEmptyException
* if the directory was not empty (and the delete was not recursive).
* @throws NoSuchPathException
* if the provided path does not exist.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public void delete(Path path, boolean recursive) throws XenonException {
Path absPath = toAbsolutePath(path);
assertPathExists(absPath);
if (getAttributes(absPath).isDirectory()) {
Iterable<PathAttributes> itt = list(absPath, false);
if (recursive) {
for (PathAttributes p : itt) {
delete(p.getPath(), true);
}
} else {
if (itt.iterator().hasNext()) {
throw new DirectoryNotEmptyException(getAdaptorName(), "Directory not empty: " + absPath.toString());
}
}
deleteDirectory(absPath);
} else {
deleteFile(absPath);
}
}
/**
* Tests if a path exists.
*
* @param path
* the path to test.
*
* @return If the path exists.
*
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public abstract boolean exists(Path path) throws XenonException;
/**
* List all entries in the directory <code>dir</code>.
*
* All entries in the directory are returned, but subdirectories will not be traversed by default. Set <code>recursive</code> to <code>true</code>, include
* the listing of all subdirectories.
*
* Symbolic links are not followed.
*
* @param dir
* the target directory.
* @param recursive
* should the list recursively traverse the subdirectories ?
*
* @return a {@link List} of {@link PathAttributes} that iterates over all entries in the directory <code>dir</code>.
*
* @throws NoSuchPathException
* If a directory does not exists.
* @throws InvalidPathException
* If <code>dir</code> is not a directory.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public Iterable<PathAttributes> list(Path dir, boolean recursive) throws XenonException {
Path absolute = toAbsolutePath(dir);
assertDirectoryExists(dir);
ArrayList<PathAttributes> result = new ArrayList<>();
list(absolute, result, recursive);
return result;
}
/**
* Open an existing file and return an {@link InputStream} to read from this file.
*
* @param file
* the to read.
*
* @return the {@link InputStream} to read from the file.
*
* @throws NoSuchPathException
* If the file does not exists.
* @throws InvalidPathException
* If the file is not regular file.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public abstract InputStream readFromFile(Path file) throws XenonException;
/**
* Open a file and return an {@link OutputStream} to write to this file.
* <p>
*
* The size of the file (once all data has been written) must be specified using the <code>size</code> parameter. This is required by some implementations
* (typically blob-stores).
*
* </p>
*
* @param path
* the target file for the OutputStream.
* @param size
* the size of the file once fully written.
*
* @return the {@link OutputStream} to write to the file.
*
* @throws PathAlreadyExistsException
* If the target existed.
* @throws NoSuchPathException
* if a parent directory does not exist.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public abstract OutputStream writeToFile(Path path, long size) throws XenonException;
/**
* Open a file and return an {@link OutputStream} to write to this file. (optional operation)
* <p>
* If the file already exists it will be replaced and its data will be lost.
*
* The amount of data that will be written to the file is not specified in advance. This operation may not be supported by all implementations.
*
* </p>
*
* @param file
* the target file for the OutputStream.
*
* @return the {@link OutputStream} to write to the file.
*
*
* @throws PathAlreadyExistsException
* If the target existed.
* @throws NoSuchPathException
* if a parent directory does not exist.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public abstract OutputStream writeToFile(Path file) throws XenonException;
/**
* Open an existing file and return an {@link OutputStream} to append data to this file. (optional operation)
* <p>
* If the file does not exist, an exception will be thrown.
*
* This operation may not be supported by all implementations.
*
* </p>
*
* @param file
* the target file for the OutputStream.
*
* @return the {@link OutputStream} to write to the file.
*
* @throws PathAlreadyExistsException
* If the target existed.
* @throws NoSuchPathException
* if a parent directory does not exist.
* @throws InvalidPathException
* if not a regular file
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
* @throws UnsupportedOperationException
* if the adaptor does not support appending
*/
public abstract OutputStream appendToFile(Path file) throws XenonException;
/**
* Get the {@link PathAttributes} of an existing path.
*
* @param path
* the existing path.
*
* @return the FileAttributes of the path.
*
* @throws NoSuchPathException
* If the file does not exists.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public abstract PathAttributes getAttributes(Path path) throws XenonException;
/**
* Reads the target of a symbolic link (optional operation).
*
* @param link
* the link to read.
*
* @return a Path representing the target of the link.
*
* @throws NoSuchPathException
* If the link does not exists.
* @throws InvalidPathException
* If the source is not a link.
* @throws UnsupportedOperationException
* If this FileSystem does not support symbolic links.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public abstract Path readSymbolicLink(Path link) throws XenonException;
/**
* Sets the POSIX permissions of a path (optional operation).
*
* @param path
* the target path.
* @param permissions
* the permissions to set.
*
* @throws NoSuchPathException
* If the target path does not exists.
* @throws UnsupportedOperationException
* If this FileSystem does not support symbolic links.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If path is null.
*/
public abstract void setPosixFilePermissions(Path path, Set<PosixFilePermission> permissions) throws XenonException;
/**
* Convert the provided path to an absolute path by (if necessary) resolving a relative path against the working directory of this FileSystem. The resulting
* path is also normalized.
*
* @param path
* the path to convert
* @throws IllegalArgumentException
* if path is null.
* @return an absolute path
*/
protected Path toAbsolutePath(Path path) {
assertNotNull(path);
if (path.isAbsolute()) {
return path.normalize();
}
return workingDirectory.resolve(path).normalize();
}
/**
* Copy data from <code>in</code> to <code>out</code> using a buffer size of <code>buffersize</code>.
*
* After each <code>buffersize</code> block of data, <code>callback.addBytesCopied</code> will be invoked to report the number of bytes copied and
* <code>callback.isCancelled</code> will be invoked to determine if the copy should continue.
*
* @param in
* the stream to copy the data from.
* @param out
* the stream to copy the data to.
* @param buffersize
* the buffer size to use for copying.
* @param callback
* the callback to report bytes copied to and check cancellation from.
* @throws IOException
* if an I/O exception occurred.
* @throws CopyCancelledException
* if the copy was cancelled by the user.
*/
protected void streamCopy(InputStream in, OutputStream out, int buffersize, CopyCallback callback) throws IOException, CopyCancelledException {
byte[] buffer = new byte[buffersize];
int size = in.read(buffer);
while (size > 0) {
out.write(buffer, 0, size);
callback.addBytesCopied(size);
if (callback.isCancelled()) {
throw new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
}
size = in.read(buffer);
}
// Flush the output to ensure all data is written when this method returns.
out.flush();
}
/**
* Copy a symbolic link to another file system (optional operation).
*
* This is a blocking copy operation. It only returns once the link has been copied or the copy has failed.
*
* This operation may be re-implemented by the various implementations of FileSystem.
*
* This default implementation is based on a creating a new link on the destination filesystem. Note that the file the link is referring to is not copied.
* Only the link itself is copied.
*
* @param source
* the link to copy.
* @param destinationFS
* the destination {@link FileSystem} to copy to.
* @param destination
* the destination link on the destination file system.
* @param mode
* selects what should happen if the destination link already exists
* @param callback
* a {@link CopyCallback} used to update the status of the copy, or cancel it while in progress.
*
* @throws InvalidPathException
* if the provide source is not a link.
* @throws NoSuchPathException
* if the source link does not exist or the destination parent directory does not exist.
* @throws PathAlreadyExistsException
* if the destination link already exists.
* @throws UnsupportedOperationException
* if the destination FileSystem does not support symbolic links.
* @throws XenonException
* if the link could not be copied.
*/
protected void copySymbolicLink(Path source, FileSystem destinationFS, Path destination, CopyMode mode, CopyCallback callback) throws XenonException {
PathAttributes attributes = getAttributes(source);
if (!attributes.isSymbolicLink()) {
throw new InvalidPathException(getAdaptorName(), "Source is not a regular file: " + source);
}
destinationFS.assertParentDirectoryExists(destination);
if (destinationFS.exists(destination)) {
switch (mode) {
case CREATE:
throw new PathAlreadyExistsException(getAdaptorName(), "Destination path already exists: " + destination);
case IGNORE:
return;
case REPLACE:
// continue
break;
}
}
Path target = readSymbolicLink(source);
destinationFS.createSymbolicLink(destination, target);
}
/**
* Copy a single file to another file system.
*
* This is a blocking copy operation. It only returns once the file has been copied or the copy has failed.
*
* This operation may be re-implemented by the various implementations of FileSystem. This default implementation is based on a simple stream based copy.
*
* @param source
* the file to copy.
* @param destinationFS
* the destination {@link FileSystem} to copy to.
* @param destination
* the destination file on the destination file system.
* @param mode
* selects what should happen if the destination file already exists
* @param callback
* a {@link CopyCallback} used to update the status of the copy, or cancel it while in progress.
*
* @throws InvalidPathException
* if the provide source is not a regular file.
* @throws NoSuchPathException
* if the source file does not exist or the destination parent directory does not exist.
* @throws PathAlreadyExistsException
* if the destination file already exists.
* @throws XenonException
* If the file could not be copied.
*/
protected void copyFile(Path source, FileSystem destinationFS, Path destination, CopyMode mode, CopyCallback callback) throws XenonException {
PathAttributes attributes = getAttributes(source);
if (!attributes.isRegular()) {
throw new InvalidPathException(getAdaptorName(), "Source is not a regular file: " + source);
}
destinationFS.assertParentDirectoryExists(destination);
if (destinationFS.exists(destination)) {
switch (mode) {
case CREATE:
throw new PathAlreadyExistsException(getAdaptorName(), "Destination path already exists: " + destination);
case IGNORE:
return;
case REPLACE:
destinationFS.delete(destination, true);
// continue
break;
}
}
if (callback.isCancelled()) {
throw new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
}
try (InputStream in = readFromFile(source); OutputStream out = destinationFS.writeToFile(destination, attributes.getSize())) {
streamCopy(in, out, bufferSize, callback);
} catch (Exception e) {
throw new XenonException(getAdaptorName(), "Stream copy failed", e);
}
}
/**
* Perform a (possibly) recursive copy from a path on this filesystem to a path on <code>destinationFS</code>.
*
* @param source
* the source path on this FileSystem.
* @param destinationFS
* the destination FileSystem.
* @param destination
* the destination path.
* @param mode
* the copy mode that determines how to react if the destination already exists.
* @param recursive
* should the copy be performed recursively ?
* @param callback
* a {@link CopyCallback} used to return status information on the copy.
* @throws XenonException
* if an error occurred.
*/
protected void performCopy(Path source, FileSystem destinationFS, Path destination, CopyMode mode, boolean recursive, CopyCallback callback)
throws XenonException {
if (!exists(source)) {
throw new NoSuchPathException(getAdaptorName(), "No such file " + source.toString());
}
PathAttributes attributes = getAttributes(source);
// if (attributes.isRegular() || attributes.isSymbolicLink()) {
if (attributes.isRegular()) {
copyFile(source, destinationFS, destination, mode, callback);
return;
}
if (attributes.isSymbolicLink()) {
copySymbolicLink(source, destinationFS, destination, mode, callback);
return;
}
if (!attributes.isDirectory()) {
throw new InvalidPathException(getAdaptorName(), "Source path is not a file, link or directory: " + source);
}
if (!recursive) {
throw new InvalidPathException(getAdaptorName(), "Source path is a directory: " + source);
}
// From here on we know the source is a directory. We should also check the destination type.
if (destinationFS.exists(destination)) {
switch (mode) {
case CREATE:
throw new PathAlreadyExistsException(getAdaptorName(), "Destination path already exists: " + destination);
case IGNORE:
return;
case REPLACE:
// continue
break;
}
attributes = destinationFS.getAttributes(destination);
if (attributes.isRegular() || attributes.isSymbolicLink()) {
destinationFS.delete(destination, false);
destinationFS.createDirectory(destination);
} else if (!attributes.isDirectory()) {
throw new InvalidPathException(getAdaptorName(), "Existing destination is not a file, link or directory: " + source);
}
} else {
destinationFS.createDirectory(destination);
}
// We are now sure the target directory exists.
copyRecursive(source, destinationFS, destination, mode, callback);
}
private void copyRecursive(Path source, FileSystem destinationFS, Path destination, CopyMode mode, CopyCallback callback) throws XenonException {
long bytesToCopy = 0;
Iterable<PathAttributes> listing = list(source, true);
for (PathAttributes p : listing) {
if (callback.isCancelled()) {
throw new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
}
if (p.isDirectory() && !isDotDot(p.getPath())) {
Path rel = source.relativize(p.getPath());
Path dst = destination.resolve(rel);
if (destinationFS.exists(dst)) {
if (destinationFS.getAttributes(dst).isDirectory()) {
switch (mode) {
case CREATE:
throw new PathAlreadyExistsException(getAdaptorName(), "Directory already exists: " + dst);
case REPLACE:
break; // leave directory
case IGNORE:
return; // ignore subdir
}
} else {
destinationFS.delete(dst, true);
}
} else {
destinationFS.createDirectories(dst);
}
} else if (p.isRegular()) {
bytesToCopy += p.getSize();
}
}
callback.start(bytesToCopy);
for (PathAttributes p : listing) {
if (callback.isCancelled()) {
throw new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
}
if (p.isRegular()) {
Path rel = source.relativize(p.getPath());
Path dst = destination.resolve(rel);
copyFile(p.getPath(), destinationFS, dst, mode, callback);
}
}
}
/**
* Delete a file. Is only called on existing files
*
* This operation must be implemented by the various implementations of FileSystem.
*
* @param file
* the file to remove
* @throws InvalidPathException
* if the provided path is not a file.
* @throws NoSuchPathException
* if the provided file does not exist.
* @throws XenonException
* If the file could not be removed.
*/
protected abstract void deleteFile(Path file) throws XenonException;
/**
* Delete an empty directory. Is only called on empty directories
*
* This operation can only delete empty directories (analogous to <code>rmdir</code> in Linux).
*
* This operation must be implemented by the various implementations of FileSystem.
*
* @param path
* the directory to remove
* @throws InvalidPathException
* if the provided path is not a directory.
* @throws NoSuchPathException
* if the provided path does not exist.
* @throws XenonException
* If the directory could not be removed.
*/
protected abstract void deleteDirectory(Path path) throws XenonException;
/**
* Return the list of entries in a directory.
*
* This operation is non-recursive; any subdirectories in <code>dir</code> will be returned as part of the list, but they will not be listed themselves.
*
* This operation must be implemented by the various implementations of FileSystem.
*
* @param dir
* the directory to list
* @return a {@link Iterable} that iterates over all entries in <code>dir</code>
* @throws XenonException
* If the list could not be retrieved.
*/
protected abstract Iterable<PathAttributes> listDirectory(Path dir) throws XenonException;
/**
* Returns an (optionally recursive) listing of the entries in a directory <code>dir</code>.
*
* This is a generic implementation which relies on <code>listDirectory</code> to provide listings of individual directories.
*
* @param dir
* the directory to list.
* @param list
* the list to which the directory entries will be added.
* @param recursive
* if the listing should be done recursively.
* @throws XenonException
* If the list could not be retrieved.
*/
protected void list(Path dir, ArrayList<PathAttributes> list, boolean recursive) throws XenonException {
Iterable<PathAttributes> tmp = listDirectory(dir);
for (PathAttributes p : tmp) {
if (!isDotDot(p.getPath())) {
list.add(p);
}
}
if (recursive) {
for (PathAttributes current : tmp) {
// traverse subdirs provided they are not "." or "..".
if (current.isDirectory() && !isDotDot(current.getPath())) {
list(dir.resolve(current.getPath().getFileNameAsString()), list, true);
}
}
}
}
/**
* Asynchronously Copy an existing source path to a target path on a different file system.
*
* If the source path is a file, it will be copied to the destination file on the target file system.
*
* If the source path is a directory, it will only be copied if <code>recursive</code> is set to <code>true</code>. Otherwise, an exception will be thrown.
* When copying recursively, the directory and its content (both files and subdirectories with content), will be copied to <code>destination</code>.
*
* Exceptions that occur during copying will not be thrown by this function, but instead are contained in a {@link CopyStatus} object which can be obtained
* with {@link FileSystem#getStatus(String)}
*
* @param source
* the source path (on this filesystem) to copy from.
* @param destinationFS
* the destination filesystem to copy to.
* @param destination
* the destination path (on the destination filesystem) to copy to.
* @param mode
* how to react if the destination already exists.
* @param recursive
* if the copy should be recursive.
*
* @return a {@link String} that identifies this copy and be used to inspect its progress.
*
* @throws IllegalArgumentException
* If source, destinationFS, destination or mode is null.
*/
public synchronized String copy(final Path source, final FileSystem destinationFS, final Path destination, final CopyMode mode, final boolean recursive) {
if (source == null) {
throw new IllegalArgumentException("Source path is null");
}
if (destinationFS == null) {
throw new IllegalArgumentException("Destination filesystem is null");
}
if (destination == null) {
throw new IllegalArgumentException("Destination path is null");
}
if (mode == null) {
throw new IllegalArgumentException("Copy mode is null!");
}
String copyID = getNextCopyID();
final CopyCallback callback = new CopyCallback();
Future<Void> future = pool.submit(() -> {
if (Thread.currentThread().isInterrupted()) {
throw new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
}
performCopy(toAbsolutePath(source), destinationFS, toAbsolutePath(destination), mode, recursive, callback);
return null;
});
pendingCopies.put(copyID, new PendingCopy(future, callback));
return copyID;
}
/**
* Cancel a copy operation. Afterwards, the copy is forgotten and subsequent queries with this copy string will lead to {@link NoSuchCopyException}
*
* @param copyIdentifier
* the identifier of the copy operation which to cancel.
*
* @return a {@link CopyStatus} containing the status of the copy.
*
* @throws NoSuchCopyException
* If the copy is not known.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If the copyIdentifier is null.
*/
public synchronized CopyStatus cancel(String copyIdentifier) throws XenonException {
if (copyIdentifier == null) {
throw new IllegalArgumentException("Copy identifier may not be null");
}
PendingCopy copy = pendingCopies.remove(copyIdentifier);
if (copy == null) {
throw new NoSuchCopyException(getAdaptorName(), "Copy not found: " + copyIdentifier);
}
copy.callback.cancel();
copy.future.cancel(true);
XenonException ex = null;
String state = "DONE";
try {
copy.future.get();
} catch (ExecutionException ee) {
ex = new XenonException(getAdaptorName(), ee.getMessage(), ee);
state = "FAILED";
} catch (CancellationException ce) {
ex = new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
state = "FAILED";
} catch (InterruptedException e) {
ex = new CopyCancelledException(getAdaptorName(), "Copy interrupted by user");
state = "FAILED";
Thread.currentThread().interrupt();
}
return new CopyStatusImplementation(copyIdentifier, state, copy.callback.getBytesToCopy(), copy.callback.getBytesCopied(), ex);
}
/**
* Wait until a copy operation is done or until a timeout expires.
* <p>
* This method will wait until a copy operation is done (either gracefully or by producing an error), or until the timeout expires, whichever comes first.
* If the timeout expires, the copy operation will continue to run.
* </p>
* <p>
* The timeout is in milliseconds and must be >= 0. When timeout is 0, it will be ignored and this method will wait until the copy operation is done.
* </p>
* After this operation, the copy is forgotten and subsequent queries with this copy string will lead to {@link NoSuchCopyException}
* <p>
* A {@link CopyStatus} is returned that can be used to determine why the call returned.
* </p>
*
* @param copyIdentifier
* the identifier of the copy operation to wait for.
* @param timeout
* the maximum time to wait for the copy operation in milliseconds.
*
* @return a {@link CopyStatus} containing the status of the copy.
*
* @throws IllegalArgumentException
* If argument is illegal.
* @throws NoSuchCopyException
* If the copy handle is not known.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If the copyIdentifier is null or if the value of timeout is negative.
*/
public CopyStatus waitUntilDone(String copyIdentifier, long timeout) throws XenonException {
if (copyIdentifier == null) {
throw new IllegalArgumentException("Copy identifier may not be null");
}
PendingCopy copy = pendingCopies.get(copyIdentifier);
if (copy == null) {
throw new NoSuchCopyException(getAdaptorName(), "Copy not found: " + copyIdentifier);
}
XenonException ex = null;
String state = "DONE";
try {
copy.future.get(timeout, TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
state = "RUNNING";
} catch (ExecutionException ee) {
Throwable cause = ee.getCause();
if (cause instanceof XenonException) {
ex = (XenonException) cause;
} else {
ex = new XenonException(getAdaptorName(), cause.getMessage(), cause);
}
state = "FAILED";
} catch (CancellationException ce) {
ex = new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
state = "FAILED";
} catch (InterruptedException ie) {
ex = new CopyCancelledException(getAdaptorName(), "Copy interrupted by user");
state = "FAILED";
Thread.currentThread().interrupt();
}
if (copy.future.isDone()) {
pendingCopies.remove(copyIdentifier);
}
return new CopyStatusImplementation(copyIdentifier, state, copy.callback.getBytesToCopy(), copy.callback.getBytesCopied(), ex);
}
/**
* Retrieve the status of an copy. After obtaining the status of a completed copy, the copy is forgotten and subsequent queries with this copy string will
* lead to {@link NoSuchCopyException}.
*
* @param copyIdentifier
* the identifier of the copy for which to retrieve the status.
*
* @return a {@link CopyStatus} containing the status of the asynchronous copy.
*
* @throws NoSuchCopyException
* If the copy is not known.
* @throws NotConnectedException
* If file system is closed.
* @throws XenonException
* if an I/O error occurred.
* @throws IllegalArgumentException
* If the copyIdentifier is null.
*/
public CopyStatus getStatus(String copyIdentifier) throws XenonException {
if (copyIdentifier == null) {
throw new IllegalArgumentException("Copy identifier may not be null");
}
PendingCopy copy = pendingCopies.get(copyIdentifier);
if (copy == null) {
throw new NoSuchCopyException(getAdaptorName(), "Copy not found: " + copyIdentifier);
}
XenonException ex = null;
String state = "PENDING";
if (copy.future.isDone()) {
pendingCopies.remove(copyIdentifier);
// We have either finished, crashed, or cancelled
try {
copy.future.get();
state = "DONE";
} catch (ExecutionException ee) {
ex = new XenonException(getAdaptorName(), ee.getMessage(), ee);
state = "FAILED";
} catch (CancellationException ce) {
ex = new CopyCancelledException(getAdaptorName(), "Copy cancelled by user");
state = "FAILED";
} catch (InterruptedException ie) {
ex = new CopyCancelledException(getAdaptorName(), "Copy interrupted by user");
state = "FAILED";
Thread.currentThread().interrupt();
}
} else if (copy.callback.isStarted()) {
state = "RUNNING";
}
return new CopyStatusImplementation(copyIdentifier, state, copy.callback.getBytesToCopy(), copy.callback.getBytesCopied(), ex);
}
protected void assertNotNull(Path path) {
if (path == null) {
throw new IllegalArgumentException("Path is null");
}
}
protected void assertPathExists(Path path) throws XenonException {
assertNotNull(path);
if (!exists(path)) {
throw new NoSuchPathException(getAdaptorName(), "Path does not exist: " + path);
}
}
protected void assertPathNotExists(Path path) throws XenonException {
assertNotNull(path);
if (exists(path)) {
throw new PathAlreadyExistsException(getAdaptorName(), "Path already exists: " + path);
}
}
protected void assertPathIsNotDirectory(Path path) throws XenonException {
assertNotNull(path);
if (exists(path)) {
PathAttributes a = getAttributes(path);
if (a.isDirectory()) {
throw new InvalidPathException(getAdaptorName(), "Was expecting a regular file, but got a directory: " + path.toString());
}
}
}
protected void assertPathIsFile(Path path) throws XenonException {
assertNotNull(path);
if (!getAttributes(path).isRegular()) {
throw new InvalidPathException(getAdaptorName(), "Path is not a file: " + path);
}
}
protected void assertPathIsDirectory(Path path) throws XenonException {
assertNotNull(path);
PathAttributes a = getAttributes(path);
if (a == null) {
throw new InvalidPathException(getAdaptorName(), "Path failed to produce attributes: " + path);
}
if (!a.isDirectory()) {
throw new InvalidPathException(getAdaptorName(), "Path is not a directory: " + path);
}
}
protected void assertFileExists(Path file) throws XenonException {
assertPathExists(file);
assertPathIsFile(file);
}
protected void assertDirectoryExists(Path dir) throws XenonException {
assertPathExists(dir);
assertPathIsDirectory(dir);
}
protected void assertParentDirectoryExists(Path path) throws XenonException {
assertNotNull(path);
Path parent = path.getParent();
if (parent != null) {
assertDirectoryExists(parent);
}
}
protected void assertFileIsSymbolicLink(Path link) throws XenonException {
assertNotNull(link);
assertPathExists(link);
if (!getAttributes(link).isSymbolicLink()) {
throw new InvalidPathException(getAdaptorName(), "Not a symbolic link: " + link);
}
}
protected void assertIsOpen() throws XenonException {
if (!isOpen()) {
throw new NotConnectedException(getAdaptorName(), "Connection is closed");
}
}
// Expects two non-null, normalized absolute paths
protected boolean areSamePaths(Path source, Path target) {
return source.equals(target);
}
protected boolean isDotDot(Path path) {
assertNotNull(path);
String filename = path.getFileNameAsString();
return ".".equals(filename) || "..".equals(filename);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
FileSystem that = (FileSystem) o;
return Objects.equals(uniqueID, that.uniqueID);
}
@Override
public int hashCode() {
return Objects.hash(uniqueID);
}
}
| NLeSC/Xenon | src/main/java/nl/esciencecenter/xenon/filesystems/FileSystem.java | Java | apache-2.0 | 63,280 |
maintainer 'Wesleyan University'
maintainer_email 'software@wesleyan.edu'
license 'Apache 2.0'
description 'Manages file type associations'
long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
version '0.2.0'
depends 'common'
depends 'homebrew'
| wesleyan/chef-cookbooks | cookbooks/launch_association/metadata.rb | Ruby | apache-2.0 | 311 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using UnityEngine;
namespace Assets.NVR.Interfaces.Elements
{
public interface IElement : IUnit
{
void SetColor(string color);
void SetPosition(float pos);
}
}
| tsvetie/nativescript-cli | resources/vr/Assets/NVR/Interfaces/Elements/IElement.cs | C# | apache-2.0 | 278 |
package org.infinispan.protostream;
/**
* An interface to be implemented by marshaller objects of type {@link MessageMarshaller}) that are able to handle
* unknown fields by storing them into an {@link UnknownFieldSet}.
*
* @author anistor@redhat.com
* @since 3.0
*/
public interface UnknownFieldSetHandler<T> {
UnknownFieldSet getUnknownFieldSet(T message);
void setUnknownFieldSet(T message, UnknownFieldSet unknownFieldSet);
}
| jmarkos/protostream | core/src/main/java/org/infinispan/protostream/UnknownFieldSetHandler.java | Java | apache-2.0 | 445 |
public struct SomeValue
{
public static SomeValue operator-(SomeValue someValue, SomeValue someValue2)
{
return new SomeValue();
}
}
public class Program
{
public static void Main()
{
SomeValue? nullable = new SomeValue();
SomeValue someValue = new SomeValue();
nullable -= someValue;
}
} | consulo/consulo-csharp | csharp-impl/src/test/resources/resolve/other/Issue424.cs | C# | apache-2.0 | 306 |
package com.fasterxml.jackson.annotation;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Annotation that can be used to define one or more alternative names for
* a property, accepted during deserialization as alternative to the official
* name. Alias information is also exposed during POJO introspection, but has
* no effect during serialization where primary name is always used.
*<p>
* Examples:
*<pre>
*public class Info {
* @JsonAlias({ "n", "Name" })
* public String name;
*}
*</pre>
*
* @since 2.9
*/
@Target({ElementType.ANNOTATION_TYPE, // for combo-annotations
ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER// for properties (field, setter, ctor param)
})
@Retention(RetentionPolicy.RUNTIME)
@JacksonAnnotation
public @interface JsonAlias
{
/**
* One or more secondary names to accept as aliases to the official name.
*
* @return Zero or more aliases to associate with property annotated
*/
public String[] value() default { };
}
| FasterXML/jackson-annotations | src/main/java/com/fasterxml/jackson/annotation/JsonAlias.java | Java | apache-2.0 | 1,130 |
package com.example.linxj.tool;
public interface RequestListener {
public static final int EVENT_BASE = 0x100;
/**
* 没有网络的信息提示
* */
public static final int EVENT_NOT_NETWORD = EVENT_BASE + 1;
/**
* 网络异常的信息提示
* */
public static final int EVENT_NETWORD_EEEOR = EVENT_BASE + 2;
/**
* 获取网络数据失败
* */
public static final int EVENT_GET_DATA_EEEOR = EVENT_BASE + 3;
/**
* 获取网络数据成功
* */
public static final int EVENT_GET_DATA_SUCCESS = EVENT_BASE + 4;
/**
* 获取网络数据成功
* */
public static final int EVENT_CLOSE_SOCKET = EVENT_BASE + 5;
public void action(int actionCode, Object object);
} | lab702-flyme/sweo_android | app/src/main/java/com/example/linxj/tool/RequestListener.java | Java | apache-2.0 | 787 |
package net.joelinn.riot.staticdata.dto;
/**
* Joe Linn
* 2/1/14
*/
public class RuneType {
public boolean isrune;
public String tier;
public String type;
}
| jlinn/riot-api-java | src/main/java/net/joelinn/riot/staticdata/dto/RuneType.java | Java | apache-2.0 | 173 |
package com.example.mohit.tpomnnit.tpo;
import android.app.Dialog;
import android.content.DialogInterface;
import android.content.Intent;
import android.content.SharedPreferences;
import android.database.sqlite.SQLiteDatabase;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.os.Bundle;
import android.preference.PreferenceManager;
import android.support.annotation.NonNull;
import android.support.design.widget.FloatingActionButton;
import android.support.v7.app.AlertDialog;
import android.view.View;
import android.support.design.widget.NavigationView;
import android.support.v4.view.GravityCompat;
import android.support.v4.widget.DrawerLayout;
import android.support.v7.app.ActionBarDrawerToggle;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.Toolbar;
import android.view.Menu;
import android.view.MenuItem;
import android.view.Window;
import android.widget.AdapterView;
import android.widget.ArrayAdapter;
import android.widget.Button;
import android.widget.EditText;
import android.widget.ImageView;
import android.widget.Spinner;
import android.widget.TextView;
import android.widget.Toast;
import com.example.mohit.tpomnnit.Landing;
import com.example.mohit.tpomnnit.R;
import com.example.mohit.tpomnnit.contactUs;
import com.example.mohit.tpomnnit.login_signup.TpoLogin;
import com.example.mohit.tpomnnit.student.StudentProfile;
import com.example.mohit.tpomnnit.student.profile.UserData;
import com.google.android.gms.tasks.OnFailureListener;
import com.google.android.gms.tasks.OnSuccessListener;
import com.google.firebase.database.DataSnapshot;
import com.google.firebase.database.DatabaseError;
import com.google.firebase.database.DatabaseReference;
import com.google.firebase.database.FirebaseDatabase;
import com.google.firebase.database.ValueEventListener;
import com.google.firebase.storage.FileDownloadTask;
import com.google.firebase.storage.FirebaseStorage;
import com.google.firebase.storage.StorageReference;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class TpoHome extends AppCompatActivity
implements NavigationView.OnNavigationItemSelectedListener {
private EditText name,regnum,branch,course,regno;
private String registrationnum,userId;
private DatabaseReference mDatabase;
private StorageReference storage,imageref;
private ImageView imageview,verified;
String nameuser;
Spinner spinnerbranch,spinnercourse;
String branchselected,courseselected;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_tpo_home);
Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
FloatingActionButton fab = (FloatingActionButton) findViewById(R.id.fab);
fab.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
Intent i = new Intent(TpoHome.this,AddCompany.class);
startActivity(i);
}
});
DrawerLayout drawer = (DrawerLayout) findViewById(R.id.drawer_layout);
ActionBarDrawerToggle toggle = new ActionBarDrawerToggle(
this, drawer, toolbar, R.string.navigation_drawer_open, R.string.navigation_drawer_close);
drawer.setDrawerListener(toggle);
toggle.syncState();
final NavigationView navigationView = (NavigationView) findViewById(R.id.nav_view);
navigationView.setNavigationItemSelectedListener(this);
mDatabase = FirebaseDatabase.getInstance().getReference("tpouserdata");
userId=mDatabase.push().getKey();
name=(EditText)findViewById(R.id.name);
regnum=(EditText)findViewById(R.id.regnum);
branch=(EditText)findViewById(R.id.branch);
course=(EditText)findViewById(R.id.course);
verified=(ImageView)findViewById(R.id.verified);
imageview = (ImageView)findViewById(R.id.imageView3);
registrationnum = getIntent().getStringExtra("reg");
// Log.e("reg",registrationnum);
storage = FirebaseStorage.getInstance().getReference("userimage/"+registrationnum+".jpg");
imageref = storage;
File localFile = null;
try {
localFile = File.createTempFile("images", "jpg");
} catch (IOException e) {
e.printStackTrace();
}
final File finalLocalFile = localFile;
imageref.getFile(localFile).addOnSuccessListener(new OnSuccessListener<FileDownloadTask.TaskSnapshot>() {
@Override
public void onSuccess(FileDownloadTask.TaskSnapshot taskSnapshot) {
// Local temp file has been created
Toast.makeText(getApplicationContext(),"File Download",Toast.LENGTH_LONG);
Bitmap bitmap = BitmapFactory.decodeFile(finalLocalFile.getAbsolutePath());
imageview.setImageBitmap(bitmap);
}
}).addOnFailureListener(new OnFailureListener() {
@Override
public void onFailure(@NonNull Exception exception) {
// Handle any errors
Toast.makeText(TpoHome.this,"Image not found",Toast.LENGTH_LONG).show();
}
});
ValueEventListener vel = new ValueEventListener() {
@Override
public void onDataChange(DataSnapshot dataSnapshot) {
UserData user= dataSnapshot.getValue(UserData.class);
for(DataSnapshot userDetails : dataSnapshot.getChildren()) {
if(registrationnum.equals(userDetails.child("regno").getValue().toString()))
{
name.setText(userDetails.child("name").getValue().toString());
View h1 = navigationView.getHeaderView(0);
TextView nav_user = h1.findViewById(R.id.name);
TextView nav_email = h1.findViewById(R.id.email);
nav_user.setText( "\t "+userDetails.child("name").getValue().toString());
nav_email.setText("\t "+userDetails.child("email").getValue().toString());
/*View h1 = navigationView.getHeaderView(0);
TextView nav_user = h1.findViewById(R.id.name);
TextView nav_email = h1.findViewById(R.id.email);
//Toast.makeText(StudentProfile.this,""+userDetails.child("name").getValue().toString(),Toast.LENGTH_LONG).show();
nav_user.setText( "\t "+userDetails.child("name").getValue().toString());
nav_email.setText("\t "+userDetails.child("email").getValue().toString());*/
course.setText(userDetails.child("course").getValue().toString());
branch.setText(userDetails.child("branch").getValue().toString());
regnum.setText(registrationnum);
/*int val=Integer.parseInt(userDetails.child("verified").getValue().toString());
if(val==1)
{
//Drawable d=R.drawable.tick;
verified.setImageResource(R.drawable.tick);
}*/
//Access all data
}
// Log.d("valueName:", userDetails.child("name").getValue().toString());
// Log.d("valueEmail:", userDetails.child("email").getValue().toString());
// Log.d("valueuserid:", userDetails.child("studentid").getValue().toString());
// Log.d("password:", userDetails.child("password").getValue().toString());
}
}
@Override
public void onCancelled(DatabaseError databaseError) {
}
};
mDatabase.addListenerForSingleValueEvent(vel);
}
@Override
public void onBackPressed()
{
Intent i=new Intent(TpoHome.this,Landing.class);
startActivity(i);
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.tpo_home, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
// Handle action bar item clicks here. The action bar will
// automatically handle clicks on the Home/Up button, so long
// as you specify a parent activity in AndroidManifest.xml.
int id = item.getItemId();
//noinspection SimplifiableIfStatement
if (id == R.id.action_settings) {
}
return super.onOptionsItemSelected(item);
}
@SuppressWarnings("StatementWithEmptyBody")
@Override
public boolean onNavigationItemSelected(MenuItem item) {
// Handle navigation view item clicks here.
int id = item.getItemId();
if (id == R.id.verifyuser) {
Intent i = new Intent(TpoHome.this,VerifyUser.class);
i.putExtra("flag",0);
startActivity(i);
// Handle the camera action
} else if (id == R.id.manage_student) {
final Dialog dialog = new Dialog(TpoHome.this);
dialog.requestWindowFeature(Window.FEATURE_NO_TITLE);
dialog.setCancelable(false);
dialog.setContentView(R.layout.activity_student_filter);
dialog.setCanceledOnTouchOutside(true);
dialog.onBackPressed();
spinnerbranch=(Spinner)dialog.findViewById(R.id.spinnerbranch);
spinnercourse=(Spinner)dialog.findViewById(R.id.spinnercourse);
regno=(EditText) dialog.findViewById(R.id.regno);
Button find=(Button)dialog.findViewById(R.id.find);
branchspinner();
coursespinner();
find.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
dialog.dismiss();
Intent intent=new Intent(TpoHome.this,ManageStudents.class);
intent.putExtra("flag",1);
intent.putExtra("course",courseselected);
intent.putExtra("branch",branchselected);
intent.putExtra("regno",regno.getText().toString().trim());
startActivity(intent);
}
});
dialog.show();
//Intent i = new Intent(TpoHome.this,StudentFilter.class);
//startActivity(i);
} else if (id == R.id.update_company) {
Intent intent=new Intent(TpoHome.this,UpdateCompany.class);
startActivity(intent);
} else if (id == R.id.nav_manage) {
} else if (id == R.id.nav_share) {
Intent i = new Intent(TpoHome.this,contactUs.class);
startActivity(i);
} else if (id == R.id.nav_send) {
} else if (id == R.id.logout){
new AlertDialog.Builder(this)
.setIcon(android.R.drawable.ic_dialog_alert)
.setTitle("Logging Off")
.setMessage("Are you sure you want to logout?")
.setPositiveButton("Yes", new DialogInterface.OnClickListener()
{
@Override
public void onClick(DialogInterface dialog, int which) {
SQLiteDatabase data = openOrCreateDatabase("login", MODE_PRIVATE, null);
data.execSQL("drop table if exists tpo");
Intent i = new Intent(TpoHome.this, TpoLogin.class);
startActivity(i);
finish();
}
})
.setNegativeButton("No", null)
.show();
}
DrawerLayout drawer = (DrawerLayout) findViewById(R.id.drawer_layout);
drawer.closeDrawer(GravityCompat.START);
return true;
}
private void branchspinner()
{
// Spinner click listener
List<String> branches = new ArrayList<String>();
branches.add("ALL");
branches.add("CSE");
branches.add("IT");
branches.add("ECE");
branches.add("EE");
branches.add("ME");
branches.add("PIE");
branches.add("CHE");
branches.add("BIO");
branches.add("CIVIL");
branches.add("MCA");
ArrayAdapter<String> dataAdapterbranch;
dataAdapterbranch = new ArrayAdapter<String>(this, android.R.layout.simple_spinner_item,branches);
// Drop down layout style - list view with radio button
dataAdapterbranch.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
// attaching data adapter to spinner
spinnerbranch.setAdapter(dataAdapterbranch);
spinnerbranch.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> parent, View view,
int position, long id) {
branchselected = parent.getItemAtPosition(position).toString();
if(branchselected.equals("ALL"))
branchselected="";
// TODO Auto-generated method stub
}
@Override
public void onNothingSelected(AdapterView<?> parent) {
// TODO Auto-generated method stub
}
});
}
private void coursespinner()
{
// Spinner click listener
List<String> courses = new ArrayList<String>();
courses.add("ALL");
courses.add("BTech");
courses.add("MTech");
courses.add("MCA");
courses.add("PhD");
courses.add("MBA");
ArrayAdapter<String> dataAdapterbranch;
dataAdapterbranch = new ArrayAdapter<String>(this, android.R.layout.simple_spinner_item,courses);
// Drop down layout style - list view with radio button
dataAdapterbranch.setDropDownViewResource(android.R.layout.simple_spinner_dropdown_item);
// attaching data adapter to spinner
spinnercourse.setAdapter(dataAdapterbranch);
spinnercourse.setOnItemSelectedListener(new AdapterView.OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> parent, View view,
int position, long id) {
courseselected = parent.getItemAtPosition(position).toString();
if(courseselected.equals("ALL"))
courseselected="";
// TODO Auto-generated method stub
}
@Override
public void onNothingSelected(AdapterView<?> parent) {
// TODO Auto-generated method stub
}
});
}
}
| mkfeuhrer/TPO-MNNIT | app/src/main/java/com/example/mohit/tpomnnit/tpo/TpoHome.java | Java | apache-2.0 | 15,187 |
namespace Husky
{
public enum OnOff
{
[Label("关闭", CssClass = "text-danger")]
Off,
[Label("开启", CssClass = "text-success")]
On
}
}
| cwx521/Husky | src/Husky.Helpers/Enums/OnOff.cs | C# | apache-2.0 | 154 |
package com.packt.sfjd.ch11;
import java.io.Serializable;
import scala.runtime.AbstractFunction2;
public class AbsFunc2 extends AbstractFunction2<Object, String, Object> implements Serializable{
@Override
public Object apply(Object arg0, String arg1) {
return true;
}
} | kumarsumit1/learning | src/main/java/com/packt/sfjd/ch11/AbsFunc2.java | Java | apache-2.0 | 283 |
'use strict';
import express from 'express';
import passport from 'passport';
import config from '../config/environment';
import {User} from '../sqldb';
// Passport Configuration
require('./local/passport').setup(User, config);
require('./facebook/passport').setup(User, config);
require('./google/passport').setup(User, config);
require('./twitter/passport').setup(User, config);
var router = express.Router();
router.use('/local', require('./local'));
router.use('/facebook', require('./facebook'));
router.use('/twitter', require('./twitter'));
router.use('/google', require('./google'));
router.use('/wechat', require('./wechat'));
export default router;
| jintou/jintou-backend | server/auth/index.js | JavaScript | apache-2.0 | 664 |
// Copyright 2006 Konrad Twardowski
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.makagiga.commons.swing;
import java.awt.BorderLayout;
import java.awt.ComponentOrientation;
import java.awt.Insets;
import java.awt.Window;
import javax.swing.JComponent;
import javax.swing.JWindow;
import org.makagiga.commons.MApplication;
import org.makagiga.commons.UI;
/**
* @since 4.0 (org.makagiga.commons.swing package)
*/
public class MWindow extends JWindow implements MBorderLayout {
// private
private boolean respectScreenInsets = true;
private UI.HorizontalPosition horizontalPosition;
private UI.VerticalPosition verticalPosition;
// public
public MWindow() {
this(null);
}
public MWindow(final Window owner) {
super(owner);
}
public UI.HorizontalPosition getHorizontalPosition() { return horizontalPosition; }
public void setHorizontalPosition(final UI.HorizontalPosition value) {
horizontalPosition = value;
setLocation(getNewXPosition(), getLocation().y);
}
/**
* @since 4.8
*/
public boolean getRespectScreenInsets() { return respectScreenInsets; }
/**
* @since 4.8
*/
public void setRespectScreenInsets(final boolean value) { respectScreenInsets = value; }
public UI.VerticalPosition getVerticalPosition() { return verticalPosition; }
public void setVerticalPosition(final UI.VerticalPosition value) {
verticalPosition = value;
setLocation(getLocation().x, getNewYPosition());
}
@Override
public void setVisible(final boolean value) {
if (value && MApplication.getForceRTL() && !isVisible())
applyComponentOrientation(ComponentOrientation.RIGHT_TO_LEFT);
super.setVisible(value);
}
public void showAtPosition(final UI.VerticalPosition v, final UI.HorizontalPosition h) {
showAtPosition(v, h, true);
}
/**
* @since 4.8
*/
public void showAtPosition(final UI.VerticalPosition v, final UI.HorizontalPosition h, final boolean pack) {
horizontalPosition = h;
verticalPosition = v;
if (pack)
pack();
setLocation(getNewXPosition(), getNewYPosition());
setVisible(true);
}
// MBorderLayout
/**
* @since 1.2
*/
@Override
public void addCenter(final JComponent component) {
UI.addCenter(this, component);
}
/**
* @since 1.2
*/
@Override
public void addEast(final JComponent component) {
add(component, BorderLayout.LINE_END);
}
/**
* @since 1.2
*/
@Override
public void addNorth(final JComponent component) {
add(component, BorderLayout.PAGE_START);
}
/**
* @since 1.2
*/
@Override
public void addSouth(final JComponent component) {
add(component, BorderLayout.PAGE_END);
}
/**
* @since 1.2
*/
@Override
public void addWest(final JComponent component) {
add(component, BorderLayout.LINE_START);
}
// private
private int getNewXPosition() {
Insets i = getScreenInsets();
return
(horizontalPosition == UI.HorizontalPosition.LEFT)
? i.left // left
: UI.getScreenSize().width - getWidth() - i.right; // right
}
private int getNewYPosition() {
Insets i = getScreenInsets();
return
(verticalPosition == UI.VerticalPosition.BOTTOM)
? UI.getScreenSize().height - getHeight() - i.bottom // bottom
: i.top; // top
}
private Insets getScreenInsets() {
if (respectScreenInsets)
return UI.getScreenInsets();
return new Insets(0, 0, 0, 0);
}
}
| stuffer2325/Makagiga | src/org/makagiga/commons/swing/MWindow.java | Java | apache-2.0 | 3,865 |
import { TestBed } from '@angular/core/testing';
import { FormsModule } from '@angular/forms';
import { AssistanceReviewComponent } from './review.component';
import { MspDataService } from '../../../../services/msp-data.service';
import { LocalStorageModule } from 'angular-2-local-storage';
import {RouterTestingModule} from '@angular/router/testing';
import {MspLogService} from '../../../../services/log.service';
import { ModalModule } from 'ngx-bootstrap/modal';
import {HttpClientModule} from '@angular/common/http';
import { MspCoreModule } from '../../../msp-core/msp-core.module';
describe('AssistanceReviewComponent', () => {
beforeEach(() => {
TestBed.configureTestingModule({
declarations: [AssistanceReviewComponent],
imports: [
FormsModule,
RouterTestingModule,
HttpClientModule,
LocalStorageModule.withConfig({
prefix: 'ca.bc.gov.msp',
storageType: 'sessionStorage'
}),
ModalModule.forRoot(),
MspCoreModule
],
providers: [
MspDataService,
MspLogService
]
});
});
it ('should work', () => {
const fixture = TestBed.createComponent(AssistanceReviewComponent);
expect(fixture.componentInstance instanceof AssistanceReviewComponent).toBe(true, 'should create AssistanceReviewComponent');
});
});
| bcgov/MyGovBC-MSP | src/app/modules/assistance/pages/review/review.component.spec.ts | TypeScript | apache-2.0 | 1,356 |
export default class Shared {
getPrettyDate(time) {
const date = new Date((time || '').replace(/-/g,'/').replace(/[TZ]/g,' '));
const diff = (((new Date()).getTime() - date.getTime()) / 1000);
const day_diff = Math.floor(diff / 86400);
// return date for anything greater than a day
if (isNaN(day_diff) || day_diff < 0 || day_diff > 0) {
return date.getDate() + ' ' + date.toDateString().split(' ')[1];
}
return (day_diff === 0 && ((diff < 60 && 'just now') || (diff < 120 && '1 minute ago') || (diff < 3600 && Math.floor( diff / 60 ) + ' minutes ago') || (diff < 7200 && '1 hour ago') || (diff < 86400 && Math.floor( diff / 3600 ) + ' hours ago')))
|| (day_diff === 1 && 'Yesterday') || (day_diff < 7 && day_diff + ' days ago') || (day_diff < 31 && Math.ceil( day_diff / 7 ) + ' weeks ago');
}
} | beckettkev/react-chat-back | src/utils/shared.js | JavaScript | apache-2.0 | 867 |
package com.amazonaws.fps.model;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for anonymous complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType>
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="TransactionId" type="{http://www.w3.org/2001/XMLSchema}string"/>
* <element name="TransactionStatus" type="{http://fps.amazonaws.com/doc/2008-09-17/}TransactionStatus"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
* Generated by AWS Code Generator
* <p/>
* Tue Sep 29 03:25:23 PDT 2009
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "", propOrder = {
"transactionId",
"transactionStatus"
})
@XmlRootElement(name = "WriteOffDebtResult")
public class WriteOffDebtResult {
@XmlElement(name = "TransactionId", required = true)
protected String transactionId;
@XmlElement(name = "TransactionStatus", required = true)
protected TransactionStatus transactionStatus;
/**
* Default constructor
*
*/
public WriteOffDebtResult() {
super();
}
/**
* Value constructor
*
*/
public WriteOffDebtResult(final String transactionId, final TransactionStatus transactionStatus) {
this.transactionId = transactionId;
this.transactionStatus = transactionStatus;
}
/**
* Gets the value of the transactionId property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getTransactionId() {
return transactionId;
}
/**
* Sets the value of the transactionId property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setTransactionId(String value) {
this.transactionId = value;
}
public boolean isSetTransactionId() {
return (this.transactionId!= null);
}
/**
* Gets the value of the transactionStatus property.
*
* @return
* possible object is
* {@link TransactionStatus }
*
*/
public TransactionStatus getTransactionStatus() {
return transactionStatus;
}
/**
* Sets the value of the transactionStatus property.
*
* @param value
* allowed object is
* {@link TransactionStatus }
*
*/
public void setTransactionStatus(TransactionStatus value) {
this.transactionStatus = value;
}
public boolean isSetTransactionStatus() {
return (this.transactionStatus!= null);
}
/**
* Sets the value of the TransactionId property.
*
* @param value
* @return
* this instance
*/
public WriteOffDebtResult withTransactionId(String value) {
setTransactionId(value);
return this;
}
/**
* Sets the value of the TransactionStatus property.
*
* @param value
* @return
* this instance
*/
public WriteOffDebtResult withTransactionStatus(TransactionStatus value) {
setTransactionStatus(value);
return this;
}
/**
*
* XML fragment representation of this object
*
* @return XML fragment for this object. Name for outer
* tag expected to be set by calling method. This fragment
* returns inner properties representation only
*/
protected String toXMLFragment() {
StringBuffer xml = new StringBuffer();
if (isSetTransactionId()) {
xml.append("<TransactionId>");
xml.append(escapeXML(getTransactionId()));
xml.append("</TransactionId>");
}
if (isSetTransactionStatus()) {
xml.append("<TransactionStatus>");
xml.append(getTransactionStatus().value());
xml.append("</TransactionStatus>");
}
return xml.toString();
}
/**
*
* Escape XML special characters
*/
private String escapeXML(String string) {
StringBuffer sb = new StringBuffer();
int length = string.length();
for (int i = 0; i < length; ++i) {
char c = string.charAt(i);
switch (c) {
case '&':
sb.append("&");
break;
case '<':
sb.append("<");
break;
case '>':
sb.append(">");
break;
case '\'':
sb.append("'");
break;
case '"':
sb.append(""");
break;
default:
sb.append(c);
}
}
return sb.toString();
}
/**
*
* JSON fragment representation of this object
*
* @return JSON fragment for this object. Name for outer
* object expected to be set by calling method. This fragment
* returns inner properties representation only
*
*/
protected String toJSONFragment() {
StringBuffer json = new StringBuffer();
boolean first = true;
if (isSetTransactionId()) {
if (!first) json.append(", ");
json.append(quoteJSON("TransactionId"));
json.append(" : ");
json.append(quoteJSON(getTransactionId()));
first = false;
}
if (isSetTransactionStatus()) {
if (!first) json.append(", ");
json.append(quoteJSON("TransactionStatus"));
json.append(" : ");
json.append(quoteJSON(getTransactionStatus().value()));
first = false;
}
return json.toString();
}
/**
*
* Quote JSON string
*/
private String quoteJSON(String string) {
StringBuffer sb = new StringBuffer();
sb.append("\"");
int length = string.length();
for (int i = 0; i < length; ++i) {
char c = string.charAt(i);
switch (c) {
case '"':
sb.append("\\\"");
break;
case '\\':
sb.append("\\\\");
break;
case '/':
sb.append("\\/");
break;
case '\b':
sb.append("\\b");
break;
case '\f':
sb.append("\\f");
break;
case '\n':
sb.append("\\n");
break;
case '\r':
sb.append("\\r");
break;
case '\t':
sb.append("\\t");
break;
default:
if (c < ' ') {
sb.append("\\u" + String.format("%03x", Integer.valueOf(c)));
} else {
sb.append(c);
}
}
}
sb.append("\"");
return sb.toString();
}
}
| DomDerrien/amazon-fps-gaej | src/com/amazonaws/fps/model/WriteOffDebtResult.java | Java | apache-2.0 | 7,331 |
/*
Copyright 2010 Zhengmao HU (James)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package net.sf.jabb.util.db.impl;
import java.util.Properties;
import javax.sql.DataSource;
import net.sf.jabb.util.db.ConnectionUtility;
import net.sf.jabb.util.db.DataSourceProvider;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* 一个一个尝试。
* @author Zhengmao HU (James)
*
*/
public class TryDataSourceProvider implements DataSourceProvider {
private static final Log log = LogFactory.getLog(TryDataSourceProvider.class);
@Override
public DataSource createDataSource(String source, Properties configurationProperties, String config) {
log.warn("Properties argument ignored for: " + source);
return createDataSource(source, config);
}
@Override
public DataSource createDataSource(String source, String config) {
for (String subSource: config.split(ConnectionUtility.DELIMITORS)){
DataSource ds = ConnectionUtility.getDataSource(subSource);
if (ds != null){
log.debug("Data source '" + subSource + "' will be used for data source '" + source + "'.");
return ds;
}
}
log.error("No usable data source found for '" + source + "'.");
return null;
}
@Override
public boolean destroyDataSource(DataSource dataSource) {
return false;
}
}
| james-hu/jabb-core | src/main/java/net/sf/jabb/util/db/impl/TryDataSourceProvider.java | Java | apache-2.0 | 1,868 |
package org.jboss.resteasy.reactive.server.vertx.test.customproviders;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.ExceptionMapper;
import javax.ws.rs.ext.Provider;
@Provider
public class UniExceptionMapper implements ExceptionMapper<UniException> {
@Override
public Response toResponse(UniException exception) {
return Response.accepted(exception.getInput()).build();
}
}
| quarkusio/quarkus | independent-projects/resteasy-reactive/server/vertx/src/test/java/org/jboss/resteasy/reactive/server/vertx/test/customproviders/UniExceptionMapper.java | Java | apache-2.0 | 409 |
package utils
import (
"math/rand"
"time"
)
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
const (
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
var src = rand.NewSource(time.Now().UnixNano())
func RandomString(n int) string {
b := make([]byte, n)
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
return string(b)
}
func LetterByIndex(idx int) string {
idx = idx % len(letterBytes)
return letterBytes[idx : idx+1]
}
| fiatjaf/summadb | utils/random.go | GO | apache-2.0 | 964 |
# Copyright 2011 OpenStack Foundation
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import time
import eventlet
import greenlet
from oslo.config import cfg
import six
from logcollector.openstack.common import excutils
from logcollector.openstack.common.gettextutils import _
from logcollector.openstack.common import importutils
from logcollector.openstack.common import jsonutils
from logcollector.openstack.common import log as logging
from logcollector.openstack.common.rpc import amqp as rpc_amqp
from logcollector.openstack.common.rpc import common as rpc_common
qpid_codec = importutils.try_import("qpid.codec010")
qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname'),
cfg.IntOpt('qpid_port',
default=5672,
help='Qpid broker port'),
cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'],
help='Qpid HA cluster host:port pairs'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
cfg.StrOpt('qpid_password',
default='',
help='Password for qpid connection',
secret=True),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'"),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Disable Nagle algorithm'),
# NOTE(russellb) If any additional versions are added (beyond 1 and 2),
# this file could probably use some additional refactoring so that the
# differences between each version are split into different classes.
cfg.IntOpt('qpid_topology_version',
default=1,
help="The qpid topology version to use. Version 1 is what "
"was originally used by impl_qpid. Version 2 includes "
"some backwards-incompatible changes that allow broker "
"federation to work. Users should update to version 2 "
"when they are able to take everything down, as it "
"requires a clean break."),
]
cfg.CONF.register_opts(qpid_opts)
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
def raise_invalid_topology_version(conf):
msg = (_("Invalid value for qpid_topology_version: %d") %
conf.qpid_topology_version)
LOG.error(msg)
raise Exception(msg)
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, conf, session, callback, node_name, node_opts,
link_name, link_opts):
"""Declare a queue on an amqp session.
'session' is the amqp session to use
'callback' is the callback to call when messages are received
'node_name' is the first part of the Qpid address string, before ';'
'node_opts' will be applied to the "x-declare" section of "node"
in the address string.
'link_name' goes into the "name" field of the "link" in the address
string
'link_opts' will be applied to the "x-declare" section of "link"
in the address string.
"""
self.callback = callback
self.receiver = None
self.session = None
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"durable": True,
"x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
},
}
addr_opts["node"]["x-declare"].update(node_opts)
elif conf.qpid_topology_version == 2:
addr_opts = {
"link": {
"x-declare": {
"auto-delete": True,
"exclusive": False,
},
},
}
else:
raise_invalid_topology_version()
addr_opts["link"]["x-declare"].update(link_opts)
if link_name:
addr_opts["link"]["name"] = link_name
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.connect(session)
def connect(self, session):
"""Declare the receiver on connect."""
self._declare_receiver(session)
def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect."""
self._declare_receiver(session)
def _declare_receiver(self, session):
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = 1
def _unpack_json_msg(self, msg):
"""Load the JSON data in msg if msg.content_type indicates that it
is necessary. Put the loaded data back into msg.content and
update msg.content_type appropriately.
A Qpid Message containing a dict will have a content_type of
'amqp/map', whereas one containing a string that needs to be converted
back from JSON will have a content_type of JSON_CONTENT_TYPE.
:param msg: a Qpid Message object
:returns: None
"""
if msg.content_type == JSON_CONTENT_TYPE:
msg.content = jsonutils.loads(msg.content)
msg.content_type = 'amqp/map'
def consume(self):
"""Fetch the message and pass it to the callback object."""
message = self.receiver.fetch()
try:
self._unpack_json_msg(message)
msg = rpc_common.deserialize_msg(message.content)
self.callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
# TODO(sandy): Need support for optional ack_on_error.
self.session.acknowledge(message)
def get_receiver(self):
return self.receiver
def get_node_name(self):
return self.address.split(';')[0]
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue.
'session' is the amqp session to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
"""
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"exclusive": True,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (msg_id, msg_id)
node_opts = {"type": "direct"}
link_name = msg_id
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
link_name = None
else:
raise_invalid_topology_version()
super(DirectConsumer, self).__init__(conf, session, callback,
node_name, node_opts, link_name,
link_opts)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, session, topic, callback, name=None,
exchange_name=None):
"""Init a 'topic' queue.
:param session: the amqp session to use
:param topic: is the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param name: optional queue name, defaults to topic
"""
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicConsumer, self).__init__(conf, session, callback, node_name,
{}, name or topic, link_opts)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
self.conf = conf
link_opts = {"exclusive": True}
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"durable": False, "type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutConsumer, self).__init__(conf, session, callback,
node_name, node_opts, None,
link_opts)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, conf, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.sender = None
self.session = session
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
if node_opts:
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
elif conf.qpid_topology_version == 2:
self.address = node_name
else:
raise_invalid_topology_version()
self.reconnect(session)
def reconnect(self, session):
"""Re-establish the Sender after a reconnection."""
self.sender = session.sender(self.address)
def _pack_json_msg(self, msg):
"""Qpid cannot serialize dicts containing strings longer than 65535
characters. This function dumps the message content to a JSON
string, which Qpid is able to handle.
:param msg: May be either a Qpid Message object or a bare dict.
:returns: A Qpid Message with its content field JSON encoded.
"""
try:
msg.content = jsonutils.dumps(msg.content)
except AttributeError:
# Need to have a Qpid message so we can set the content_type.
msg = qpid_messaging.Message(jsonutils.dumps(msg))
msg.content_type = JSON_CONTENT_TYPE
return msg
def send(self, msg):
"""Send a message."""
try:
# Check if Qpid can encode the message
check_msg = msg
if not hasattr(check_msg, 'content_type'):
check_msg = qpid_messaging.Message(msg)
content_type = check_msg.content_type
enc, dec = qpid_messaging.message.get_codec(content_type)
enc(check_msg.content)
except qpid_codec.CodecException:
# This means the message couldn't be serialized as a dict.
msg = self._pack_json_msg(msg)
self.sender.send(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, session, msg_id):
"""Init a 'direct' publisher."""
if conf.qpid_topology_version == 1:
node_name = msg_id
node_opts = {"type": "direct"}
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
else:
raise_invalid_topology_version()
super(DirectPublisher, self).__init__(conf, session, node_name,
node_opts)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, session, topic):
"""Init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicPublisher, self).__init__(conf, session, node_name)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, session, topic):
"""Init a 'fanout' publisher.
"""
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutPublisher, self).__init__(conf, session, node_name,
node_opts)
class NotifyPublisher(Publisher):
"""Publisher class for notifications."""
def __init__(self, conf, session, topic):
"""Init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
node_opts = {"durable": True}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(NotifyPublisher, self).__init__(conf, session, node_name,
node_opts)
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
if not qpid_messaging:
raise ImportError("Failed to import qpid.messaging")
self.session = None
self.consumers = {}
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
if server_params and 'hostname' in server_params:
# NOTE(russellb) This enables support for cast_to_server.
server_params['qpid_hosts'] = [
'%s:%d' % (server_params['hostname'],
server_params.get('port', 5672))
]
params = {
'qpid_hosts': self.conf.qpid_hosts,
'username': self.conf.qpid_username,
'password': self.conf.qpid_password,
}
params.update(server_params or {})
self.brokers = params['qpid_hosts']
self.username = params['username']
self.password = params['password']
self.connection_create(self.brokers[0])
self.reconnect()
def connection_create(self, broker):
# Create the connection - this does not open the connection
self.connection = qpid_messaging.Connection(broker)
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = self.username
self.connection.password = self.password
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
# Reconnection is done by self.reconnect()
self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.transport = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
def _lookup_consumer(self, receiver):
return self.consumers[str(receiver)]
def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues."""
attempt = 0
delay = 1
while True:
# Close the session if necessary
if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.ConnectionError:
pass
broker = self.brokers[attempt % len(self.brokers)]
attempt += 1
try:
self.connection_create(broker)
self.connection.open()
except qpid_exceptions.ConnectionError as e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(2 * delay, 60)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
break
self.session = self.connection.session()
if self.consumers:
consumers = self.consumers
self.consumers = {}
for consumer in six.itervalues(consumers):
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug(_("Re-established AMQP queues"))
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (qpid_exceptions.Empty,
qpid_exceptions.ConnectionError) as e:
if error_callback:
error_callback(e)
self.reconnect()
def close(self):
"""Close/release this connection."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
try:
self.connection.close()
except Exception:
# NOTE(dripton) Logging exceptions that happen during cleanup just
# causes confusion; there's really nothing useful we can do with
# them.
pass
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.session.close()
self.session = self.connection.session()
self.consumers = {}
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.session, topic, callback)
self._register_consumer(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout)
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread."""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class."""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publisher_send():
publisher = cls(self.conf, self.session, topic)
publisher.send(msg)
return self.ensure(_connect_error, _publisher_send)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message."""
#
# We want to create a message with attributes, e.g. a TTL. We
# don't really need to keep 'msg' in its JSON format any longer
# so let's create an actual qpid message here and get some
# value-add on the go.
#
# WARNING: Request timeout happens to be in the same units as
# qpid's TTL (seconds). If this changes in the future, then this
# will need to be altered accordingly.
#
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
self.publisher_send(TopicPublisher, topic, qpid_message)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg)
def consume(self, limit=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit)
while True:
try:
six.next(it)
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
else:
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
self._register_consumer(consumer)
return consumer
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
name=pool_name)
self._register_consumer(consumer)
return consumer
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
wait_for_consumers=not ack_on_error
)
self.proxy_callbacks.append(callback_wrapper)
consumer = TopicConsumer(conf=self.conf,
session=self.session,
topic=topic,
callback=callback_wrapper,
name=pool_name,
exchange_name=exchange_name)
self._register_consumer(consumer)
return consumer
def create_connection(conf, new=True):
"""Create a connection."""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
| redhat-cip/openstack-logcollector | openstack-logcollector/openstack/common/rpc/impl_qpid.py | Python | apache-2.0 | 29,688 |
/*
* TestTracker.cs
* SnowplowTrackerTests
*
* Copyright (c) 2015 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*
* Authors: Joshua Beemster
* Copyright: Copyright (c) 2015 Snowplow Analytics Ltd
* License: Apache License Version 2.0
*/
using System;
using System.Collections.Generic;
using NUnit.Framework;
using SnowplowTrackerTests.TestHelpers;
using SnowplowTracker;
using SnowplowTracker.Emitters;
using SnowplowTracker.Enums;
using SnowplowTracker.Events;
using SnowplowTracker.Payloads;
namespace SnowplowTrackerTests
{
[TestFixture()]
public class TestTracker
{
[Test()]
public void TestTrackerInitMinimal()
{
Tracker t = new Tracker(new AsyncEmitter("acme.com", HttpProtocol.HTTP, HttpMethod.POST, 500, 52000L, 52000L), "aNamespace", "aAppId");
Assert.NotNull(t);
Assert.NotNull(t.GetEmitter());
Assert.Null(t.GetSubject());
Assert.Null(t.GetSession());
Assert.AreEqual("aNamespace", t.GetTrackerNamespace());
Assert.AreEqual("aAppId", t.GetAppId());
Assert.AreEqual(true, t.GetBase64Encoded());
Assert.AreEqual(DevicePlatforms.Mobile.Value, t.GetPlatform().Value);
}
[Test()]
public void TestTrackerInitException()
{
Tracker t = null;
try
{
t = new Tracker(null, "aNamespace", "aAppId");
}
catch (Exception e)
{
Assert.AreEqual("Emitter cannot be null.", e.Message);
}
Assert.Null(t);
}
[Test()]
public void TestTrackerSetterFunctions()
{
Subject s1 = new Subject();
Session sess1 = new Session(null);
IEmitter e1 = new AsyncEmitter("acme.com", HttpProtocol.HTTP, HttpMethod.POST, 500, 52000L, 52000L);
Tracker t = new Tracker(e1, "aNamespace", "aAppId", s1, sess1);
Assert.NotNull(t.GetEmitter());
Assert.AreEqual("http://acme.com/com.snowplowanalytics.snowplow/tp2", t.GetEmitter().GetCollectorUri().ToString());
Assert.NotNull(t.GetSubject());
Assert.NotNull(t.GetSession());
Assert.AreEqual("aNamespace", t.GetTrackerNamespace());
Assert.AreEqual("aAppId", t.GetAppId());
Assert.AreEqual(true, t.GetBase64Encoded());
Assert.AreEqual(DevicePlatforms.Mobile.Value, t.GetPlatform().Value);
IEmitter e2 = new AsyncEmitter("acme.com.au", HttpProtocol.HTTP, HttpMethod.POST, 500, 52000L, 52000L);
t.SetEmitter(e2);
Assert.AreEqual("http://acme.com.au/com.snowplowanalytics.snowplow/tp2", t.GetEmitter().GetCollectorUri().ToString());
t.SetSession(null);
Assert.Null(t.GetSession());
t.SetSubject(null);
Assert.Null(t.GetSubject());
t.SetTrackerNamespace("newNamespace");
Assert.AreEqual("newNamespace", t.GetTrackerNamespace());
t.SetAppId("newAppId");
Assert.AreEqual("newAppId", t.GetAppId());
t.SetBase64Encoded(false);
Assert.AreEqual(false, t.GetBase64Encoded());
t.SetPlatform(DevicePlatforms.Desktop);
Assert.AreEqual(DevicePlatforms.Desktop.Value, t.GetPlatform().Value);
}
[Test()]
public void TestTrackerSendEvent()
{
IEmitter e1 = new BaseEmitter();
Tracker t = new Tracker(e1, "aNamespace", "aAppId");
t.StartEventTracking();
t.Track(new PageView().SetPageTitle("title").SetPageUrl("url").SetReferrer("ref").SetTimestamp(1234567890).SetEventId("event-id-custom").Build());
t.Track(new PageView().SetPageTitle("title").SetPageUrl("url").SetReferrer("ref").SetTimestamp(1234567890).SetEventId("event-id-custom").Build());
BaseEmitter te1 = (BaseEmitter)t.GetEmitter();
Assert.AreEqual(2, te1.payloads.Count);
foreach (TrackerPayload payload in te1.payloads)
{
Dictionary<string, object> dict = payload.GetDictionary();
Assert.AreEqual(SnowplowTracker.Version.VERSION, dict[Constants.TRACKER_VERSION]);
Assert.AreEqual("1234567890", dict[Constants.TIMESTAMP]);
Assert.AreEqual("event-id-custom", dict[Constants.EID]);
Assert.AreEqual("aNamespace", dict[Constants.NAMESPACE]);
Assert.AreEqual("aAppId", dict[Constants.APP_ID]);
Assert.AreEqual("mob", dict[Constants.PLATFORM]);
Assert.AreEqual(Constants.EVENT_PAGE_VIEW, dict[Constants.EVENT]);
Assert.AreEqual("title", dict[Constants.PAGE_TITLE]);
Assert.AreEqual("url", dict[Constants.PAGE_URL]);
Assert.AreEqual("ref", dict[Constants.PAGE_REFR]);
}
}
}
}
| snowplow/snowplow-unity-tracker | SnowplowTracker.Tests/Assets/Tests/TestTracker.cs | C# | apache-2.0 | 5,574 |
package com.skyrocketgwt.core.client.layouts.layoutpanel.appearance;
import com.google.gwt.dom.client.Element;
/**
* Created by v on 2/22/2015.
*/
public interface SkySplitterAppearance {
Element render(double splitterSize);
}
| SkyRocketGWT/skyrocketgwt | skyrocket-core/src/main/java/com/skyrocketgwt/core/client/layouts/layoutpanel/appearance/SkySplitterAppearance.java | Java | apache-2.0 | 237 |
package com.example.chengqi.mycoderepo.layouts;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import com.example.chengqi.mycoderepo.R;
public class RelativeLayoutActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_relative_layout);
}
}
| firstbytegithub/MyCodeRepo | app/src/main/java/com/example/chengqi/mycoderepo/layouts/RelativeLayoutActivity.java | Java | apache-2.0 | 415 |
##
# Copyright (c) 2009-2014 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##
"""
Test memcacheprops.
"""
import os
from txweb2.http import HTTPError
from txdav.xml.base import encodeXMLName
from twistedcaldav.memcacheprops import MemcachePropertyCollection
from twistedcaldav.test.util import InMemoryPropertyStore
from twistedcaldav.test.util import TestCase
class StubCollection(object):
def __init__(self, path, childNames):
self.path = path
self.fp = StubFP(path)
self.children = {}
for childName in childNames:
self.children[childName] = StubResource(self, path, childName)
def listChildren(self):
return self.children.iterkeys()
def getChild(self, childName):
return self.children[childName]
def propertyCollection(self):
if not hasattr(self, "_propertyCollection"):
self._propertyCollection = MemcachePropertyCollection(self)
return self._propertyCollection
class StubResource(object):
def __init__(self, parent, path, name):
self.parent = parent
self.fp = StubFP(os.path.join(path, name))
def deadProperties(self):
if not hasattr(self, "_dead_properties"):
self._dead_properties = self.parent.propertyCollection().propertyStoreForChild(self, InMemoryPropertyStore())
return self._dead_properties
class StubFP(object):
def __init__(self, path):
self.path = path
def child(self, childName):
class _Child(object):
def __init__(self, path):
self.path = path
return _Child(os.path.join(self.path, childName))
def basename(self):
return os.path.basename(self.path)
class StubProperty(object):
def __init__(self, ns, name, value=None):
self.ns = ns
self.name = name
self.value = value
def qname(self):
return self.ns, self.name
def __repr__(self):
return "%s = %s" % (encodeXMLName(self.ns, self.name), self.value)
class MemcachePropertyCollectionTestCase(TestCase):
"""
Test MemcacheProprtyCollection
"""
def getColl(self):
return StubCollection("calendars", ["a", "b", "c"])
def test_setget(self):
child1 = self.getColl().getChild("a")
child1.deadProperties().set(StubProperty("ns1:", "prop1", value="val1"))
child2 = self.getColl().getChild("a")
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1")).value,
"val1")
child2.deadProperties().set(StubProperty("ns1:", "prop1", value="val2"))
# force memcache to be consulted (once per collection per request)
child1 = self.getColl().getChild("a")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop1")).value,
"val2")
def test_merge(self):
child1 = self.getColl().getChild("a")
child2 = self.getColl().getChild("a")
child1.deadProperties().set(StubProperty("ns1:", "prop1", value="val0"))
child1.deadProperties().set(StubProperty("ns1:", "prop2", value="val0"))
child1.deadProperties().set(StubProperty("ns1:", "prop3", value="val0"))
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1")).value,
"val0")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2")).value,
"val0")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3")).value,
"val0")
child2.deadProperties().set(StubProperty("ns1:", "prop1", value="val1"))
child1.deadProperties().set(StubProperty("ns1:", "prop3", value="val3"))
# force memcache to be consulted (once per collection per request)
child2 = self.getColl().getChild("a")
# verify properties
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1")).value,
"val1")
self.assertEquals(child2.deadProperties().get(("ns1:", "prop2")).value,
"val0")
self.assertEquals(child2.deadProperties().get(("ns1:", "prop3")).value,
"val3")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop1")).value,
"val1")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2")).value,
"val0")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3")).value,
"val3")
def test_delete(self):
child1 = self.getColl().getChild("a")
child2 = self.getColl().getChild("a")
child1.deadProperties().set(StubProperty("ns1:", "prop1", value="val0"))
child1.deadProperties().set(StubProperty("ns1:", "prop2", value="val0"))
child1.deadProperties().set(StubProperty("ns1:", "prop3", value="val0"))
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1")).value,
"val0")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2")).value,
"val0")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3")).value,
"val0")
child2.deadProperties().set(StubProperty("ns1:", "prop1", value="val1"))
child1.deadProperties().delete(("ns1:", "prop1"))
self.assertRaises(HTTPError, child1.deadProperties().get, ("ns1:", "prop1"))
self.assertFalse(child1.deadProperties().contains(("ns1:", "prop1")))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2")).value,
"val0")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3")).value,
"val0")
# force memcache to be consulted (once per collection per request)
child2 = self.getColl().getChild("a")
# verify properties
self.assertFalse(child2.deadProperties().contains(("ns1:", "prop1")))
self.assertEquals(child2.deadProperties().get(("ns1:", "prop2")).value,
"val0")
self.assertEquals(child2.deadProperties().get(("ns1:", "prop3")).value,
"val0")
def test_setget_uids(self):
for uid in (None, "123", "456"):
child1 = self.getColl().getChild("a")
child1.deadProperties().set(StubProperty("ns1:", "prop1", value="val1%s" % (uid if uid else "",)), uid=uid)
child2 = self.getColl().getChild("a")
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1"), uid=uid).value,
"val1%s" % (uid if uid else "",))
child2.deadProperties().set(StubProperty("ns1:", "prop1", value="val2%s" % (uid if uid else "",)), uid=uid)
# force memcache to be consulted (once per collection per request)
child1 = self.getColl().getChild("a")
self.assertEquals(child1.deadProperties().get(("ns1:", "prop1"), uid=uid).value,
"val2%s" % (uid if uid else "",))
def test_merge_uids(self):
for uid in (None, "123", "456"):
child1 = self.getColl().getChild("a")
child2 = self.getColl().getChild("a")
child1.deadProperties().set(StubProperty("ns1:", "prop1", value="val0%s" % (uid if uid else "",)), uid=uid)
child1.deadProperties().set(StubProperty("ns1:", "prop2", value="val0%s" % (uid if uid else "",)), uid=uid)
child1.deadProperties().set(StubProperty("ns1:", "prop3", value="val0%s" % (uid if uid else "",)), uid=uid)
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3"), uid=uid).value,
"val0%s" % (uid if uid else "",))
child2.deadProperties().set(StubProperty("ns1:", "prop1", value="val1%s" % (uid if uid else "",)), uid=uid)
child1.deadProperties().set(StubProperty("ns1:", "prop3", value="val3%s" % (uid if uid else "",)), uid=uid)
# force memcache to be consulted (once per collection per request)
child2 = self.getColl().getChild("a")
# verify properties
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1"), uid=uid).value,
"val1%s" % (uid if uid else "",))
self.assertEquals(child2.deadProperties().get(("ns1:", "prop2"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child2.deadProperties().get(("ns1:", "prop3"), uid=uid).value,
"val3%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop1"), uid=uid).value,
"val1%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3"), uid=uid).value,
"val3%s" % (uid if uid else "",))
def test_delete_uids(self):
for uid in (None, "123", "456"):
child1 = self.getColl().getChild("a")
child2 = self.getColl().getChild("a")
child1.deadProperties().set(StubProperty("ns1:", "prop1", value="val0%s" % (uid if uid else "",)), uid=uid)
child1.deadProperties().set(StubProperty("ns1:", "prop2", value="val0%s" % (uid if uid else "",)), uid=uid)
child1.deadProperties().set(StubProperty("ns1:", "prop3", value="val0%s" % (uid if uid else "",)), uid=uid)
self.assertEquals(child2.deadProperties().get(("ns1:", "prop1"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3"), uid=uid).value,
"val0%s" % (uid if uid else "",))
child2.deadProperties().set(StubProperty("ns1:", "prop1", value="val1%s" % (uid if uid else "",)), uid=uid)
child1.deadProperties().delete(("ns1:", "prop1"), uid=uid)
self.assertRaises(HTTPError, child1.deadProperties().get, ("ns1:", "prop1"), uid=uid)
self.assertFalse(child1.deadProperties().contains(("ns1:", "prop1"), uid=uid))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop2"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child1.deadProperties().get(("ns1:", "prop3"), uid=uid).value,
"val0%s" % (uid if uid else "",))
# force memcache to be consulted (once per collection per request)
child2 = self.getColl().getChild("a")
# verify properties
self.assertFalse(child2.deadProperties().contains(("ns1:", "prop1"), uid=uid))
self.assertEquals(child2.deadProperties().get(("ns1:", "prop2"), uid=uid).value,
"val0%s" % (uid if uid else "",))
self.assertEquals(child2.deadProperties().get(("ns1:", "prop3"), uid=uid).value,
"val0%s" % (uid if uid else "",))
def _stub_set_multi(self, values, time=None):
self.callCount += 1
for key, value in values.iteritems():
self.results[key] = value
def test_splitSetMulti(self):
self.callCount = 0
self.results = {}
mpc = MemcachePropertyCollection(None)
values = {}
for i in xrange(600):
values["key%d" % (i,)] = "value%d" % (i,)
mpc._split_set_multi(values, self._stub_set_multi)
self.assertEquals(self.callCount, 3)
self.assertEquals(self.results, values)
def test_splitSetMultiWithChunksize(self):
self.callCount = 0
self.results = {}
mpc = MemcachePropertyCollection(None)
values = {}
for i in xrange(13):
values["key%d" % (i,)] = "value%d" % (i,)
mpc._split_set_multi(values, self._stub_set_multi, chunksize=3)
self.assertEquals(self.callCount, 5)
self.assertEquals(self.results, values)
def _stub_gets_multi(self, keys):
self.callCount += 1
result = {}
for key in keys:
result[key] = self.expected[key]
return result
def test_splitGetsMulti(self):
self.callCount = 0
self.expected = {}
keys = []
for i in xrange(600):
keys.append("key%d" % (i,))
self.expected["key%d" % (i,)] = "value%d" % (i,)
mpc = MemcachePropertyCollection(None)
result = mpc._split_gets_multi(keys, self._stub_gets_multi)
self.assertEquals(self.callCount, 3)
self.assertEquals(self.expected, result)
def test_splitGetsMultiWithChunksize(self):
self.callCount = 0
self.expected = {}
keys = []
for i in xrange(600):
keys.append("key%d" % (i,))
self.expected["key%d" % (i,)] = "value%d" % (i,)
mpc = MemcachePropertyCollection(None)
result = mpc._split_gets_multi(keys, self._stub_gets_multi, chunksize=12)
self.assertEquals(self.callCount, 50)
self.assertEquals(self.expected, result)
| trevor/calendarserver | twistedcaldav/test/test_memcacheprops.py | Python | apache-2.0 | 14,385 |
package ru.stqa.pft.sandbox;
/**
* Created by Yulia on 3/1/2017.
*/
public class Equality {
public static void main(String [] args){
String s1 = "firefox";
String s2 = new String(s1);
System.out.println(s1 == s2);
System.out.println(s1.equals(s2));
}
}
| yvasilevskaya/java_pft | sandbox/src/main/java/ru/stqa/pft/sandbox/Equality.java | Java | apache-2.0 | 279 |
# -*- coding: utf-8 -*-
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.address import AddressHelper
class Application(object):
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == 'chrome':
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError(f"Unrecognized browser {browser}")
self.wd.implicitly_wait(3)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.address = AddressHelper(self)
self.base_url = base_url
self.open_home_page()
def open_home_page(self):
# open homepage
wd = self.wd
if not (wd.current_url.endswith("/addressbook/") and wd.find_element_by_name("searchstring")):
wd.get(self.base_url)
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def destroy(self):
self.wd.quit()
| vpalex999/python_training | fixture/application.py | Python | apache-2.0 | 1,165 |
jQuery(document).ready(function($){
$("a[data-upvote-id]").click(function(){
var id = $(this).attr('data-upvote-id');
$.ajax( {
url: WP_API_Settings.root + 'goodmorning-news/1.0/upvote/' + id,
method: 'GET',
beforeSend: function ( xhr ) {
xhr.setRequestHeader( 'X-WP-Nonce', WP_API_Settings.nonce );
}
} ).done( function ( response ) {
console.log( response );
} );
});
$("a[data-downvote-id]").click(function(){
var id = $(this).attr('data-downvote-id');
$.ajax( {
url: WP_API_Settings.root + 'goodmorning-news/1.0/downvote/' + id,
method: 'GET',
beforeSend: function ( xhr ) {
xhr.setRequestHeader( 'X-WP-Nonce', WP_API_Settings.nonce );
}
} ).done( function ( response ) {
console.log( response );
} );
});
}); | Luehrsen/good_morning_news | www/wp-content/plugins/goodmorning_plugin/js/admin.js | JavaScript | apache-2.0 | 787 |
<?php include 'db.php';?>
<?php
if(isset($_POST["login-submit"]))
{
$sql="SELECT
`email`,
`passwd`
FROM
`register`
WHERE
email ='".$_POST['email']."';";
if($res = $mysqli->query($sql))
{
if ($res->num_rows > 0) {
// output data of each row
while($row = $res->fetch_assoc()) {
if($row['email'] == $_POST['email'] && $row['passwd'] == $_POST['passwd'])
{
echo '<script> window.location.href = "type.html";';
echo '</script>';
}
else
{
echo "<script> alert('invalid username or password');";
echo 'window.location.href = "login.html";';
echo '</script>';
}
}
}
}
}
?> | hkbhkb7/online_complaitbox | login.php | PHP | apache-2.0 | 672 |
/**
*
*/
package edu.mycourses.adt.st;
/**
* @author Ibrahima Diarra
*
*/
public class BasicST<Key, Value> extends AbstractST<Key, Value> {
@Override
public void put(Key key, Value value) {
}
@Override
public Value get(Key key) {
return null;
}
@Override
public int size() {
return 0;
}
@Override
public Iterable<Key> keys() {
return null;
}
}
| githubdiarra/playground-repo | playground/src/main/java/edu/mycourses/adt/st/BasicST.java | Java | apache-2.0 | 407 |
#!/usr/bin/python
from __future__ import absolute_import, print_function
import argparse
import csv
import os
import re
import sys
try:
from plistlib import load as load_plist # Python 3
from plistlib import dump as dump_plist
except ImportError:
from plistlib import readPlist as load_plist # Python 2
from plistlib import writePlist as dump_plist
def getOptionsString(optionList):
# optionList should be a list item
optionsString = ''
for option in optionList:
if option == optionList[-1]:
optionsString += "\"%s\":\"%s\"" % (str(option.split('=')[0]), str(option.split('=')[1]))
else:
optionsString += "\"%s\":\"%s\"" % (str(option.split('=')[0]), str(option.split('=')[1])) + ', '
return optionsString
parser = argparse.ArgumentParser(description='Generate a Munki nopkg-style pkginfo for printer installation.')
parser.add_argument('--printername', help='Name of printer queue. May not contain spaces, tabs, # or /. Required.')
parser.add_argument('--driver', help='Name of driver file in /Library/Printers/PPDs/Contents/Resources/. Can be relative or full path. Required.')
parser.add_argument('--address', help='IP or DNS address of printer. If no protocol is specified, defaults to lpd://. Required.')
parser.add_argument('--location', help='Location name for printer. Optional. Defaults to printername.')
parser.add_argument('--displayname', help='Display name for printer (and Munki pkginfo). Optional. Defaults to printername.')
parser.add_argument('--desc', help='Description for Munki pkginfo only. Optional.')
parser.add_argument('--requires', help='Required packages in form of space-delimited \'CanonDriver1 CanonDriver2\'. Optional.')
parser.add_argument('--options', nargs='*', dest='options', help='Printer options in form of space-delimited \'Option1=Key Option2=Key Option3=Key\', etc. Optional.')
parser.add_argument('--version', help='Version number of Munki pkginfo. Optional. Defaults to 1.0.', default='1.0')
parser.add_argument('--icon', help='Specifies an existing icon in the Munki repo to display for the printer in Managed Software Center. Optional.')
parser.add_argument('--csv', help='Path to CSV file containing printer info. If CSV is provided, all other options are ignored.')
args = parser.parse_args()
pwd = os.path.dirname(os.path.realpath(__file__))
f = open(os.path.join(pwd, 'AddPrinter-Template.plist'), 'rb')
templatePlist = load_plist(f)
f.close()
if args.csv:
# A CSV was found, use that for all data.
with open(args.csv, mode='r') as infile:
reader = csv.reader(infile)
next(reader, None) # skip the header row
for row in reader:
newPlist = dict(templatePlist)
# each row contains 10 elements:
# Printer name, location, display name, address, driver, description, options, version, requires, icon
# options in the form of "Option=Value Option2=Value Option3=Value"
# requires in the form of "package1 package2" Note: the space seperator
theOptionString = ''
if row[6] != "":
theOptionString = getOptionsString(row[6].split(" "))
# First, change the plist keys in the pkginfo itself
newPlist['display_name'] = row[2]
newPlist['description'] = row[5]
newPlist['name'] = "AddPrinter_" + str(row[0]) # set to printer name
# Check for an icon
if row[9] != "":
newPlist['icon_name'] = row[9]
# Check for a version number
if row[7] != "":
# Assume the user specified a version number
version = row[7]
else:
# Use the default version of 1.0
version = "1.0"
newPlist['version'] = version
# Check for a protocol listed in the address
if '://' in row[3]:
# Assume the user passed in a full address and protocol
address = row[3]
else:
# Assume the user wants to use the default, lpd://
address = 'lpd://' + row[3]
# Append the driver path to the driver file specified in the csv
driver = '/Library/Printers/PPDs/Contents/Resources/%s' % row[4]
base_driver = row[4]
if row[4].endswith('.gz'):
base_driver = row[4].replace('.gz', '')
if base_driver.endswith('.ppd'):
base_driver = base_driver.replace('.ppd', '')
# Now change the variables in the installcheck_script
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("PRINTERNAME", row[0])
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("OPTIONS", theOptionString)
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("LOCATION", row[1].replace('"', ''))
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("DISPLAY_NAME", row[2].replace('"', ''))
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("ADDRESS", address)
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("DRIVER", base_driver)
# Now change the variables in the postinstall_script
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("PRINTERNAME", row[0])
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("LOCATION", row[1].replace('"', ''))
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("DISPLAY_NAME", row[2].replace('"', ''))
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("ADDRESS", address)
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("DRIVER", driver)
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("OPTIONS", theOptionString)
# Now change the one variable in the uninstall_script
newPlist['uninstall_script'] = newPlist['uninstall_script'].replace("PRINTERNAME", row[0])
# Add required packages if passed in the csv
if row[8] != "":
newPlist['requires'] = row[8].split(' ')
# Write out the file
newFileName = "AddPrinter-" + row[0] + "-" + version + ".pkginfo"
f = open(newFileName, 'wb')
dump_plist(newPlist, f)
f.close()
else:
if not args.printername:
print(os.path.basename(sys.argv[0]) + ': error: argument --printername is required', file=sys.stderr)
parser.print_usage()
sys.exit(1)
if not args.driver:
print(os.path.basename(sys.argv[0]) + ': error: argument --driver is required', file=sys.stderr)
parser.print_usage()
sys.exit(1)
if not args.address:
print(os.path.basename(sys.argv[0]) + ': error: argument --address is required', file=sys.stderr)
parser.print_usage()
sys.exit(1)
if re.search(r"[\s#/]", args.printername):
# printernames can't contain spaces, tabs, # or /. See lpadmin manpage for details.
print("ERROR: Printernames can't contain spaces, tabs, # or /.", file=sys.stderr)
sys.exit(1)
if args.desc:
description = args.desc
else:
description = ""
if args.displayname:
displayName = args.displayname
else:
displayName = str(args.printername)
if args.location:
location = args.location
else:
location = args.printername
if args.version:
version = str(args.version)
else:
version = "1.0"
if args.requires:
requires = args.requires
else:
requires = ""
if args.icon:
icon = args.icon
else:
icon = ""
if args.options:
optionsString = str(args.options[0]).split(' ')
optionsString = getOptionsString(optionsString)
else:
optionsString = ''
if args.driver.startswith('/Library'):
# Assume the user passed in a full path rather than a relative filename
driver = args.driver
else:
# Assume only a relative filename
driver = os.path.join('/Library/Printers/PPDs/Contents/Resources', args.driver)
if '://' in args.address:
# Assume the user passed in a full address and protocol
address = args.address
else:
# Assume the user wants to use the default, lpd://
address = 'lpd://' + args.address
newPlist = dict(templatePlist)
# root pkginfo variable replacement
newPlist['description'] = description
newPlist['display_name'] = displayName
newPlist['name'] = "AddPrinter_" + displayName.replace(" ", "")
newPlist['version'] = version
newPlist['icon_name'] = icon
# installcheck_script variable replacement
newPlist['installcheck_script'] = templatePlist['installcheck_script'].replace("PRINTERNAME", args.printername)
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("ADDRESS", address)
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("DISPLAY_NAME", displayName)
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("LOCATION", location.replace('"', ''))
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("DRIVER", os.path.splitext(os.path.basename(driver))[0].replace('"', ''))
newPlist['installcheck_script'] = newPlist['installcheck_script'].replace("OPTIONS", optionsString)
# postinstall_script variable replacement
newPlist['postinstall_script'] = templatePlist['postinstall_script'].replace("PRINTERNAME", args.printername)
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("ADDRESS", address)
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("DISPLAY_NAME", displayName)
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("LOCATION", location.replace('"', ''))
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("DRIVER", driver.replace('"', ''))
newPlist['postinstall_script'] = newPlist['postinstall_script'].replace("OPTIONS", optionsString)
# uninstall_script variable replacement
newPlist['uninstall_script'] = templatePlist['uninstall_script'].replace("PRINTERNAME", args.printername)
# required packages
if requires != "":
newPlist['requires'] = [r.replace('\\', '') for r in re.split(r"(?<!\\)\s", requires)]
newFileName = "AddPrinter-" + str(args.printername) + "-%s.pkginfo" % str(version)
f = open(newFileName, 'wb')
dump_plist(newPlist, f)
f.close()
| nmcspadden/PrinterGenerator | print_generator.py | Python | apache-2.0 | 10,809 |
package jp.co.altxt2db.dto;
/**
* 環境情報保持DTO
*
*/
public class EnvironmentDto {
/** 実行用アクションクラスパス */
public String actionClass;
/** 実行時引数 */
public String[] args;
}
| hisataka/altxt2db | src/main/java/jp/co/altxt2db/dto/EnvironmentDto.java | Java | apache-2.0 | 236 |
package org.gradle.test.performance.mediummonolithicjavaproject.p438;
import org.junit.Test;
import static org.junit.Assert.*;
public class Test8764 {
Production8764 objectUnderTest = new Production8764();
@Test
public void testProperty0() {
String value = "value";
objectUnderTest.setProperty0(value);
assertEquals(value, objectUnderTest.getProperty0());
}
@Test
public void testProperty1() {
String value = "value";
objectUnderTest.setProperty1(value);
assertEquals(value, objectUnderTest.getProperty1());
}
@Test
public void testProperty2() {
String value = "value";
objectUnderTest.setProperty2(value);
assertEquals(value, objectUnderTest.getProperty2());
}
@Test
public void testProperty3() {
String value = "value";
objectUnderTest.setProperty3(value);
assertEquals(value, objectUnderTest.getProperty3());
}
@Test
public void testProperty4() {
String value = "value";
objectUnderTest.setProperty4(value);
assertEquals(value, objectUnderTest.getProperty4());
}
@Test
public void testProperty5() {
String value = "value";
objectUnderTest.setProperty5(value);
assertEquals(value, objectUnderTest.getProperty5());
}
@Test
public void testProperty6() {
String value = "value";
objectUnderTest.setProperty6(value);
assertEquals(value, objectUnderTest.getProperty6());
}
@Test
public void testProperty7() {
String value = "value";
objectUnderTest.setProperty7(value);
assertEquals(value, objectUnderTest.getProperty7());
}
@Test
public void testProperty8() {
String value = "value";
objectUnderTest.setProperty8(value);
assertEquals(value, objectUnderTest.getProperty8());
}
@Test
public void testProperty9() {
String value = "value";
objectUnderTest.setProperty9(value);
assertEquals(value, objectUnderTest.getProperty9());
}
} | oehme/analysing-gradle-performance | my-app/src/test/java/org/gradle/test/performance/mediummonolithicjavaproject/p438/Test8764.java | Java | apache-2.0 | 2,111 |
#include <iostream>
#include <ctime>
class Stos
{
private:
int dane[100];
int n;
int id;
public:
Stos(int id){
this->id = id;
n=0;
std::cout << "["<< id <<"] Pojawiam sie!" << std::endl;
}
~Stos(){
std::cout << "["<< id <<"] Znikam!" << std::endl;
}
void push(int e) { dane[n++] = e; }
int pop() { return dane[--n]; }
int empty() { return n==0; }
int size(){return n;}
int getId() { return id;}
};
class Data{
private:
int day;
int month;
int year;
public:
Data(){
time_t t = time(0);
struct tm * now = localtime( & t );
this->year = now->tm_year + 1900;
this->month = now->tm_mon + 1;
this->day = now->tm_mday;
}
Data(int day, int month, int year){
this->day = day;
this->month = month;
this->year = year;
}
void set(int d, int m, int r){
this->day = d;
this->month = m;
this->year = r;
}
void print(){
std::cout << day <<"-" << month << "-" << year;
}
};
void ex7(){
std::cout <<"Startuje program" <<std::endl;
for (int i = 0; i < 12; i++){
Stos* stos = new Stos(i);
std::cout << "Stworzylem stos!" << std::endl;
stos->push(2); stos->push(5); stos->push(3);
while(!stos->empty()){
std::cout << stos->pop() << std::endl;
}
delete stos;
std::cout << "Usunalem obiekt!" << std::endl;
}
}
void ex10(){
for (int i = 1; i <= 3; i++){
Stos* stos = new Stos(i);
delete stos;
}
std::cout <<std::endl;
Stos* stacks[3];
for (int i = 1; i <= 3; i++){
stacks[i-1] = new Stos(i);
}
for (int i = 3; i >= 1; i--){
delete stacks[(i-1)] ;
}
std::cout <<std::endl;
for (int i = 1; i <= 3; i++){
stacks[i-1] = new Stos(i);
}
for (int i = 1; i <= 3; i++){
delete stacks[(i-1)] ;
}
}
void dataEx2(){
int day,month, year;
std::cout <<"Podaj swoja date urodzenia! " << std::endl;
std::cout <<"Podaj dzien! " << std::endl;
std::cin >> day;
std::cout <<"Podaj miesiac! " << std::endl;
std::cin >> month;
std::cout <<"Podaj rok! " << std::endl;
std::cin >> year;
Data* date = new Date(day, month, year);
delete date;
}
main()
{
Data date;
}
| grzegorz2047/UAMRepo | POB/po-c1/po-c1.cpp | C++ | apache-2.0 | 2,750 |
// Copyright (C) 2019 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License")
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resolve
import (
"context"
"testing"
"github.com/google/gapid/core/assert"
"github.com/google/gapid/core/log"
"github.com/google/gapid/core/os/device"
"github.com/google/gapid/core/os/device/bind"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/api/test"
"github.com/google/gapid/gapis/capture"
"github.com/google/gapid/gapis/database"
"github.com/google/gapid/gapis/service"
"github.com/google/gapid/gapis/service/path"
)
func createSingleCommandTrace(ctx context.Context) *path.Capture {
h := &capture.Header{ABI: device.WindowsX86_64}
cb := test.CommandBuilder{}
cmds := []api.Cmd{
cb.CmdTypeMix(0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, true, test.Voidᵖ(0x12345678), 2),
}
p, err := capture.NewGraphicsCapture(ctx, "test", h, nil, cmds)
if err != nil {
log.F(ctx, true, "Couldn't create capture: %v", err)
}
path, err := p.Path(ctx)
if err != nil {
log.F(ctx, true, "Couldn't get capture path: %v", err)
}
return path
}
func createMultipleCommandTrace(ctx context.Context) *path.Capture {
h := &capture.Header{ABI: device.WindowsX86_64}
cb := test.CommandBuilder{}
cmds := []api.Cmd{
cb.CmdTypeMix(0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, true, test.Voidᵖ(0x12345678), 2),
cb.CmdTypeMix(1, 15, 25, 35, 45, 55, 65, 75, 85, 95, 105, false, test.Voidᵖ(0x87654321), 3),
cb.CmdTypeMix(2, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, true, test.Voidᵖ(0xdeadfeed), 3),
}
p, err := capture.NewGraphicsCapture(ctx, "test", h, nil, cmds)
if err != nil {
log.F(ctx, true, "Couldn't create capture: %v", err)
}
path, err := p.Path(ctx)
if err != nil {
log.F(ctx, true, "Couldn't get capture path: %v", err)
}
return path
}
func TestDeleteSingleCommandTrace(t *testing.T) {
ctx := log.Testing(t)
ctx = bind.PutRegistry(ctx, bind.NewRegistry())
ctx = database.Put(ctx, database.NewInMemory(ctx))
p := createSingleCommandTrace(ctx)
ctx = capture.Put(ctx, p)
newTracePath, err := Delete(ctx, p.Command(0).Path(), nil)
assert.For(ctx, "Delete").ThatError(err).DeepEquals(nil)
newCapture := newTracePath.GetCapture()
newBoxedCommands, err := Get(ctx, newCapture.Commands().Path(), nil)
newCommands := newBoxedCommands.(*service.Commands).List
assert.For(ctx, "Deleted Commands").That(len(newCommands)).DeepEquals(0)
}
func TestDeleteMultipleCommandFirstElement(t *testing.T) {
ctx := log.Testing(t)
ctx = bind.PutRegistry(ctx, bind.NewRegistry())
ctx = database.Put(ctx, database.NewInMemory(ctx))
p := createMultipleCommandTrace(ctx)
ctx = capture.Put(ctx, p)
commandPathsBoxed, _ := Get(ctx, p.Commands().Path(), nil)
commandPaths := commandPathsBoxed.(*service.Commands).List
var commands []*api.Command
for i := 0; i < len(commandPaths); i++ {
command, _ := Get(ctx, commandPaths[i].Path(), nil)
commands = append(commands, command.(*api.Command))
}
newTracePath, err := Delete(ctx, commandPaths[0].Path(), nil)
assert.For(ctx, "Delete").ThatError(err).DeepEquals(nil)
newCapture := newTracePath.GetCapture()
newBoxedCommands, err := Get(ctx, newCapture.Commands().Path(), nil)
newCommands := newBoxedCommands.(*service.Commands).List
assert.For(ctx, "Deleted Commands").That(len(newCommands)).DeepEquals(len(commandPaths) - 1)
for i, test := range newCommands {
boxedCommand, err := Get(ctx, test.Path(), nil)
command := boxedCommand.(*api.Command)
assert.For(ctx, "Get(%v) value", test).That(command).DeepEquals(commands[i+1])
assert.For(ctx, "Get(%v) error", test).That(err).DeepEquals(nil)
}
}
func TestDeleteMultipleCommandLastElement(t *testing.T) {
ctx := log.Testing(t)
ctx = bind.PutRegistry(ctx, bind.NewRegistry())
ctx = database.Put(ctx, database.NewInMemory(ctx))
p := createMultipleCommandTrace(ctx)
ctx = capture.Put(ctx, p)
commandPathsBoxed, _ := Get(ctx, p.Commands().Path(), nil)
commandPaths := commandPathsBoxed.(*service.Commands).List
var commands []*api.Command
for i := 0; i < len(commandPaths); i++ {
command, _ := Get(ctx, commandPaths[i].Path(), nil)
commands = append(commands, command.(*api.Command))
}
newTracePath, err := Delete(ctx, commandPaths[len(commandPaths)-1].Path(), nil)
assert.For(ctx, "Delete").ThatError(err).DeepEquals(nil)
newCapture := newTracePath.GetCapture()
newBoxedCommands, err := Get(ctx, newCapture.Commands().Path(), nil)
newCommands := newBoxedCommands.(*service.Commands).List
assert.For(ctx, "Deleted Commands").That(len(newCommands)).DeepEquals(len(commandPaths) - 1)
for i, test := range newCommands {
boxedCommand, err := Get(ctx, test.Path(), nil)
command := boxedCommand.(*api.Command)
assert.For(ctx, "Get(%v) value", test).That(command).DeepEquals(commands[i])
assert.For(ctx, "Get(%v) error", test).That(err).DeepEquals(nil)
}
}
| google/agi | gapis/resolve/delete_test.go | GO | apache-2.0 | 5,365 |
package org.onvif.ver10.schema;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAnyElement;
import javax.xml.bind.annotation.XmlType;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.cxf.xjc.runtime.JAXBToStringStyle;
import org.w3c.dom.Element;
/**
* <p>Java class for IOCapabilitiesExtension2 complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="IOCapabilitiesExtension2">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <any processContents='lax' namespace='http://www.onvif.org/ver10/schema' maxOccurs="unbounded" minOccurs="0"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "IOCapabilitiesExtension2", propOrder = {
"any"
})
public class IOCapabilitiesExtension2 {
@XmlAnyElement(lax = true)
protected List<java.lang.Object> any;
/**
* Gets the value of the any property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the any property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getAny().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link Element }
* {@link java.lang.Object }
*
*
*/
public List<java.lang.Object> getAny() {
if (any == null) {
any = new ArrayList<java.lang.Object>();
}
return this.any;
}
/**
* Generates a String representation of the contents of this type.
* This is an extension method, produced by the 'ts' xjc plugin
*
*/
@Override
public String toString() {
return ToStringBuilder.reflectionToString(this, JAXBToStringStyle.DEFAULT_STYLE);
}
}
| fpompermaier/onvif | onvif-ws-client/src/main/java/org/onvif/ver10/schema/IOCapabilitiesExtension2.java | Java | apache-2.0 | 2,379 |
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.inputmethod.compat;
import java.lang.reflect.Method;
import java.util.Arrays;
public class ArraysCompatUtils {
private static final Method METHOD_Arrays_binarySearch = CompatUtils
.getMethod(Arrays.class, "binarySearch", int[].class, int.class, int.class, int.class);
public static int binarySearch(int[] array, int startIndex, int endIndex, int value) {
if (METHOD_Arrays_binarySearch != null) {
final Object index = CompatUtils.invoke(null, 0, METHOD_Arrays_binarySearch,
array, startIndex, endIndex, value);
return (Integer)index;
} else {
return compatBinarySearch(array, startIndex, endIndex, value);
}
}
/* package */ static int compatBinarySearch(int[] array, int startIndex, int endIndex,
int value) {
if (startIndex > endIndex) throw new IllegalArgumentException();
if (startIndex < 0 || endIndex > array.length) throw new ArrayIndexOutOfBoundsException();
final int work[] = new int[endIndex - startIndex];
System.arraycopy(array, startIndex, work, 0, work.length);
final int index = Arrays.binarySearch(work, value);
if (index >= 0) {
return index + startIndex;
} else {
return ~(~index + startIndex);
}
}
}
| soeminnminn/LatinIME_ICS_ported | src/com/android/inputmethod/compat/ArraysCompatUtils.java | Java | apache-2.0 | 2,027 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/inspector2/model/GetFindingsReportStatusResult.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/UnreferencedParam.h>
#include <utility>
using namespace Aws::Inspector2::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
using namespace Aws;
GetFindingsReportStatusResult::GetFindingsReportStatusResult() :
m_errorCode(ReportingErrorCode::NOT_SET),
m_status(ExternalReportStatus::NOT_SET)
{
}
GetFindingsReportStatusResult::GetFindingsReportStatusResult(const Aws::AmazonWebServiceResult<JsonValue>& result) :
m_errorCode(ReportingErrorCode::NOT_SET),
m_status(ExternalReportStatus::NOT_SET)
{
*this = result;
}
GetFindingsReportStatusResult& GetFindingsReportStatusResult::operator =(const Aws::AmazonWebServiceResult<JsonValue>& result)
{
JsonView jsonValue = result.GetPayload().View();
if(jsonValue.ValueExists("destination"))
{
m_destination = jsonValue.GetObject("destination");
}
if(jsonValue.ValueExists("errorCode"))
{
m_errorCode = ReportingErrorCodeMapper::GetReportingErrorCodeForName(jsonValue.GetString("errorCode"));
}
if(jsonValue.ValueExists("errorMessage"))
{
m_errorMessage = jsonValue.GetString("errorMessage");
}
if(jsonValue.ValueExists("filterCriteria"))
{
m_filterCriteria = jsonValue.GetObject("filterCriteria");
}
if(jsonValue.ValueExists("reportId"))
{
m_reportId = jsonValue.GetString("reportId");
}
if(jsonValue.ValueExists("status"))
{
m_status = ExternalReportStatusMapper::GetExternalReportStatusForName(jsonValue.GetString("status"));
}
return *this;
}
| aws/aws-sdk-cpp | aws-cpp-sdk-inspector2/source/model/GetFindingsReportStatusResult.cpp | C++ | apache-2.0 | 1,854 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import cloudfiles as swift_client
from django import http
from django import test as django_test
from django.conf import settings
from django.contrib.messages.storage import default_storage
from django.core.handlers import wsgi
from django.test.client import RequestFactory
from functools import wraps
from glanceclient.v1 import client as glance_client
from keystoneclient.v2_0 import client as keystone_client
from novaclient.v1_1 import client as nova_client
import quantumclient as quantum_client
import httplib2
import mox
from horizon import api
from horizon import context_processors
from horizon import middleware
from horizon import users
from horizon.tests.test_data.utils import load_test_data
from .time import time
from .time import today
from .time import utcnow
# Makes output of failing mox tests much easier to read.
wsgi.WSGIRequest.__repr__ = lambda self: "<class 'django.http.HttpRequest'>"
def create_stubs(stubs_to_create={}):
if not isinstance(stubs_to_create, dict):
raise TypeError, ("create_stub must be passed a dict, but a %s was " \
"given." % type(stubs_to_create).__name__)
def inner_stub_out(fn):
@wraps(fn)
def instance_stub_out(self):
for key in stubs_to_create:
if not (isinstance(stubs_to_create[key], tuple) or \
isinstance(stubs_to_create[key], list)):
raise TypeError, ("The values of the create_stub " \
"dict must be lists or tuples, but is a %s." %
type(stubs_to_create[key]).__name__)
for value in stubs_to_create[key]:
self.mox.StubOutWithMock(key, value)
return fn(self)
return instance_stub_out
return inner_stub_out
class RequestFactoryWithMessages(RequestFactory):
def get(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).get(*args, **kwargs)
req.session = []
req._messages = default_storage(req)
return req
def post(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).post(*args, **kwargs)
req.session = []
req._messages = default_storage(req)
return req
class TestCase(django_test.TestCase):
"""
Specialized base test case class for Horizon which gives access to
numerous additional features:
* A full suite of test data through various attached objects and
managers (e.g. ``self.servers``, ``self.user``, etc.). See the
docs for :class:`~horizon.tests.test_data.utils.TestData` for more
information.
* The ``mox`` mocking framework via ``self.mox``.
* A set of request context data via ``self.context``.
* A ``RequestFactory`` class which supports Django's ``contrib.messages``
framework via ``self.factory``.
* A ready-to-go request object via ``self.request``.
* The ability to override specific time data controls for easier testing.
* Several handy additional assertion methods.
"""
def setUp(self):
load_test_data(self)
self.mox = mox.Mox()
self.factory = RequestFactoryWithMessages()
self.context = {'authorized_tenants': self.tenants.list()}
def fake_conn_request(*args, **kwargs):
raise Exception("An external URI request tried to escape through "
"an httplib2 client. Args: %s, kwargs: %s"
% (args, kwargs))
self._real_conn_request = httplib2.Http._conn_request
httplib2.Http._conn_request = fake_conn_request
self._real_horizon_context_processor = context_processors.horizon
context_processors.horizon = lambda request: self.context
self._real_get_user_from_request = users.get_user_from_request
tenants = self.context['authorized_tenants']
self.setActiveUser(id=self.user.id,
token=self.token.id,
username=self.user.name,
tenant_id=self.tenant.id,
service_catalog=self.service_catalog,
authorized_tenants=tenants)
self.request = http.HttpRequest()
self.request.session = self.client._session()
self.request.session['token'] = self.token.id
middleware.HorizonMiddleware().process_request(self.request)
def tearDown(self):
self.mox.UnsetStubs()
httplib2.Http._conn_request = self._real_conn_request
context_processors.horizon = self._real_horizon_context_processor
users.get_user_from_request = self._real_get_user_from_request
self.mox.VerifyAll()
def setActiveUser(self, id=None, token=None, username=None, tenant_id=None,
service_catalog=None, tenant_name=None, roles=None,
authorized_tenants=None):
users.get_user_from_request = lambda x: \
users.User(id=id,
token=token,
user=username,
tenant_id=tenant_id,
service_catalog=service_catalog,
roles=roles,
authorized_tenants=authorized_tenants,
request=self.request)
def override_times(self):
""" Overrides the "current" time with immutable values. """
now = datetime.datetime.utcnow()
time.override_time = \
datetime.time(now.hour, now.minute, now.second)
today.override_time = datetime.date(now.year, now.month, now.day)
utcnow.override_time = now
return now
def reset_times(self):
""" Undoes the changes made by ``override_times``. """
time.override_time = None
today.override_time = None
utcnow.override_time = None
def assertRedirectsNoFollow(self, response, expected_url):
"""
Asserts that the given response issued a 302 redirect without
processing the view which is redirected to.
"""
assert (response.status_code / 100 == 3), \
"The response did not return a redirect."
self.assertEqual(response._headers.get('location', None),
('Location', settings.TESTSERVER + expected_url))
self.assertEqual(response.status_code, 302)
def assertNoMessages(self, response=None):
"""
Asserts that no messages have been attached by the ``contrib.messages``
framework.
"""
self.assertMessageCount(response, success=0, warn=0, info=0, error=0)
def assertMessageCount(self, response=None, **kwargs):
"""
Asserts that the specified number of messages have been attached
for various message types. Usage would look like
``self.assertMessageCount(success=1)``.
"""
temp_req = self.client.request(**{'wsgi.input': None})
temp_req.COOKIES = self.client.cookies
storage = default_storage(temp_req)
messages = []
if response is None:
# To gain early access to the messages we have to decode the
# cookie on the test client.
if 'messages' in self.client.cookies:
message_cookie = self.client.cookies['messages'].value
messages = storage._decode(message_cookie)
# Check for messages in the context
elif hasattr(response, "context") and "messages" in response.context:
messages = response.context["messages"]
# Check for messages attached to the request on a TemplateResponse
elif hasattr(response, "_request") and hasattr(response._request,
"_messages"):
messages = response._request._messages._queued_messages
# If we don't have messages and we don't expect messages, we're done.
if not any(kwargs.values()) and not messages:
return
# If we expected messages and have none, that's a problem.
if any(kwargs.values()) and not messages:
error_msg = "Messages were expected, but none were set."
assert 0 == sum(kwargs.values()), error_msg
# Otherwise, make sure we got the expected messages.
for msg_type, count in kwargs.items():
msgs = [m.message for m in messages if msg_type in m.tags]
assert len(msgs) == count, \
"%s messages not as expected: %s" % (msg_type.title(),
", ".join(msgs))
def assertNoFormErrors(self, response, context_name="form"):
"""
Asserts that the response either does not contain a form in it's
context, or that if it does, that form has no errors.
"""
context = getattr(response, "context", {})
if not context or context_name not in context:
return True
errors = response.context[context_name]._errors
assert len(errors) == 0, \
"Unexpected errors were found on the form: %s" % errors
def assertFormErrors(self, response, count=0, message=None,
context_name="form"):
"""
Asserts that the response does contain a form in it's
context, and that form has errors, if count were given,
it must match the exact numbers of errors
"""
context = getattr(response, "context", {})
assert (context and context_name in context), \
"The response did not contain a form."
errors = response.context[context_name]._errors
if count:
assert len(errors) == count, \
"%d errors were found on the form, %d expected" % \
(len(errors), count)
if message and message not in unicode(errors):
self.fail("Expected message not found, instead found: %s"
% ["%s: %s" % (key, [e for e in field_errors]) for
(key, field_errors) in errors.items()])
else:
assert len(errors) > 0, "No errors were found on the form"
class BaseAdminViewTests(TestCase):
"""
A ``TestCase`` subclass which sets an active user with the "admin" role
for testing admin-only views and functionality.
"""
def setActiveUser(self, *args, **kwargs):
if "roles" not in kwargs:
kwargs['roles'] = [self.roles.admin._info]
super(BaseAdminViewTests, self).setActiveUser(*args, **kwargs)
class APITestCase(TestCase):
"""
The ``APITestCase`` class is for use with tests which deal with the
underlying clients rather than stubbing out the horizon.api.* methods.
"""
def setUp(self):
super(APITestCase, self).setUp()
def fake_keystoneclient(request, username=None, password=None,
tenant_id=None, token_id=None, endpoint=None,
admin=False):
"""
Wrapper function which returns the stub keystoneclient. Only
necessary because the function takes too many arguments to
conveniently be a lambda.
"""
return self.stub_keystoneclient()
# Store the original clients
self._original_glanceclient = api.glance.glanceclient
self._original_keystoneclient = api.keystone.keystoneclient
self._original_novaclient = api.nova.novaclient
self._original_quantumclient = api.quantum.quantumclient
# Replace the clients with our stubs.
api.glance.glanceclient = lambda request: self.stub_glanceclient()
api.keystone.keystoneclient = fake_keystoneclient
api.nova.novaclient = lambda request: self.stub_novaclient()
api.quantum.quantumclient = lambda request: self.stub_quantumclient()
def tearDown(self):
super(APITestCase, self).tearDown()
api.glance.glanceclient = self._original_glanceclient
api.nova.novaclient = self._original_novaclient
api.keystone.keystoneclient = self._original_keystoneclient
def stub_novaclient(self):
if not hasattr(self, "novaclient"):
self.mox.StubOutWithMock(nova_client, 'Client')
self.novaclient = self.mox.CreateMock(nova_client.Client)
return self.novaclient
def stub_keystoneclient(self):
if not hasattr(self, "keystoneclient"):
self.mox.StubOutWithMock(keystone_client, 'Client')
self.keystoneclient = self.mox.CreateMock(keystone_client.Client)
return self.keystoneclient
def stub_glanceclient(self):
if not hasattr(self, "glanceclient"):
self.mox.StubOutWithMock(glance_client, 'Client')
self.glanceclient = self.mox.CreateMock(glance_client.Client)
return self.glanceclient
def stub_swiftclient(self, expected_calls=1):
if not hasattr(self, "swiftclient"):
self.mox.StubOutWithMock(swift_client, 'Connection')
self.swiftclient = self.mox.CreateMock(swift_client.Connection)
while expected_calls:
swift_client.Connection(auth=mox.IgnoreArg())\
.AndReturn(self.swiftclient)
expected_calls -= 1
return self.swiftclient
def stub_quantumclient(self):
if not hasattr(self, "quantumclient"):
self.mox.StubOutWithMock(quantum_client, 'Client')
self.quantumclient = self.mox.CreateMock(quantum_client.Client)
return self.quantumclient
| asomya/test | horizon/test.py | Python | apache-2.0 | 14,512 |
package com.joyue.tech.gankio.mvp.history;
import com.joyue.tech.core.mvp.listener.OnLoadDataListListener;
import com.joyue.tech.gankio.api.GankApi;
import rx.Observer;
public class HistoryModel implements HistoryContract.Model {
@Override
public void history(OnLoadDataListListener listener) {
GankApi.getInstance().history(new Observer<String[]>() {
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
//设置页面为加载错误
listener.onFailure(e);
}
@Override
public void onNext(String[] data) {
listener.onSuccess(data);
}
});
}
} | skyofthinking/AndRapid | gankio/src/main/java/com/joyue/tech/gankio/mvp/history/HistoryModel.java | Java | apache-2.0 | 764 |
module Amigrind
class Repo
include Amigrind::Core::Logging::Mixin
attr_reader :path
def initialize(path)
@path = File.expand_path path
raise "'path' (#{path}) is not a directory." unless Dir.exist?(path)
raise "'path' is not an Amigrind root (lacks .amigrind_root file)." \
unless File.exist?(File.join(path, '.amigrind_root'))
info_log "using Amigrind path: #{path}"
end
def environments_path
File.join(path, 'environments')
end
def blueprints_path
File.join(path, 'blueprints')
end
# TODO: Ruby DSL environments
def environment_names
yaml_environments =
Dir[File.join(environments_path, '*.yaml')] \
.map { |f| File.basename(f, '.yaml').to_s.strip.downcase }
rb_environments =
[].map { |f| File.basename(f, '.rb').to_s.strip.downcase }
duplicate_environments = yaml_environments & rb_environments
duplicate_environments.each do |dup_env_name|
warn_log "environment '#{dup_env_name}' found in both YAML and Ruby; skipping."
end
(yaml_environments + rb_environments - duplicate_environments).sort
end
# TODO: cache environments (but make configurable)
def environment(name)
yaml_path = yaml_path_if_exists(name)
rb_path = rb_path_if_exists(name)
raise "found multiple env files for same env #{name}." if !yaml_path.nil? && !rb_path.nil?
raise "TODO: implement Ruby environments." unless rb_path.nil?
env = Environments::Environment.load_yaml_file(yaml_path) unless yaml_path.nil?
raise "no env found for '#{name}'." if env.nil?
IceNine.deep_freeze(env)
env
end
def with_environment(environment_name, &block)
block.call(environment(environment_name))
end
def blueprint_names
Dir[File.join(blueprints_path, "*.rb")].map { |f| File.basename(f, ".rb") }
end
# TODO: cache blueprint/environment tuples (but make configurable)
def evaluate_blueprint(blueprint_name, env)
raise "'env' must be a String or an Environment." \
unless env.is_a?(String) || env.is_a?(Environments::Environment)
if env.is_a?(String)
env = environment(env)
end
ev = Amigrind::Blueprints::Evaluator.new(File.join(blueprints_path,
"#{blueprint_name}.rb"),
env)
ev.blueprint
end
# TODO: refactor these client-y things.
def add_to_channel(env, blueprint_name, id, channel)
raise "'env' must be a String or an Environment." \
unless env.is_a?(String) || env.is_a?(Environments::Environment)
raise "'blueprint_name' must be a String." unless blueprint_name.is_a?(String)
raise "'id' must be a Fixnum." unless id.is_a?(Fixnum)
raise "'channel' must be a String or Symbol." \
unless channel.is_a?(String) || channel.is_a?(Symbol)
if env.is_a?(String)
env = environment(env)
end
raise "channel '#{channel}' does not exist in environment '#{env.name}'." \
unless env.channels.key?(channel.to_s) || channel.to_sym == :latest
credentials = Amigrind::Config.aws_credentials(env)
amigrind_client = Amigrind::Core::Client.new(env.aws.region, credentials)
ec2 = Aws::EC2::Client.new(region: env.aws.region, credentials: credentials)
image = amigrind_client.get_image_by_id(name: blueprint_name, id: id)
tag_key = Amigrind::Core::AMIGRIND_CHANNEL_TAG % { channel_name: channel }
info_log "setting '#{tag_key}' on image #{image.id}..."
ec2.create_tags(
resources: [ image.id ],
tags: [
{
key: tag_key,
value: '1'
}
]
)
end
def remove_from_channel(env, blueprint_name, id, channel)
raise "'env' must be a String or an Environment." \
unless env.is_a?(String) || env.is_a?(Environments::Environment)
raise "'blueprint_name' must be a String." unless blueprint_name.is_a?(String)
raise "'id' must be a Fixnum." unless id.is_a?(Fixnum)
raise "'channel' must be a String or Symbol." \
unless channel.is_a?(String) || channel.is_a?(Symbol)
if env.is_a?(String)
env = environment(env)
end
raise "channel '#{channel}' does not exist in environment '#{env.name}'." \
unless env.channels.key?(channel.to_s) || channel.to_sym == :latest
credentials = Amigrind::Config.aws_credentials(env)
amigrind_client = Amigrind::Core::Client.new(env.aws.region, credentials)
ec2 = Aws::EC2::Client.new(region: env.aws.region, credentials: credentials)
image = amigrind_client.get_image_by_id(name: blueprint_name, id: id)
tag_key = Amigrind::Core::AMIGRIND_CHANNEL_TAG % { channel_name: channel }
info_log "clearing '#{tag_key}' on image #{image.id}..."
ec2.delete_tags(
resources: [ image.id ],
tags: [
{
key: tag_key,
value: nil
}
]
)
end
def get_image_by_channel(env, blueprint_name, channel, steps_back = 0)
raise "'env' must be a String or an Environment." \
unless env.is_a?(String) || env.is_a?(Environments::Environment)
raise "'blueprint_name' must be a String." unless blueprint_name.is_a?(String)
raise "'channel' must be a String or Symbol." \
unless channel.is_a?(String) || channel.is_a?(Symbol)
if env.is_a?(String)
env = environment(env)
end
raise "channel '#{channel}' does not exist in environment '#{env.name}'." \
unless env.channels.key?(channel.to_s) || channel.to_sym == :latest
credentials = Amigrind::Config.aws_credentials(env)
amigrind_client = Amigrind::Core::Client.new(env.aws.region, credentials)
amigrind_client.get_image_by_channel(name: blueprint_name, channel: channel, steps_back: steps_back)
end
class << self
def init(path:)
raise "TODO: implement"
end
def with_repo(path: nil, &block)
path = path || ENV['AMIGRIND_PATH'] || Dir.pwd
repo = Repo.new(path)
Dir.chdir path do
block.call(repo)
end
end
end
private
def yaml_path_if_exists(name)
matches = [
"#{environments_path}/#{name}.yml",
"#{environments_path}/#{name}.yaml",
"#{environments_path}/#{name}.yml.erb",
"#{environments_path}/#{name}.yaml.erb"
].select { |f| File.exist?(f) }
case matches.size
when 0
nil
when 1
matches.first
else
raise "found multiple env files for same env #{name}."
end
end
def rb_path_if_exists(name)
path = "#{environments_path}/#{name}.rb"
File.exist?(path) ? path : nil
end
end
end
| eropple/amigrind | lib/amigrind/repo.rb | Ruby | apache-2.0 | 6,902 |
require_relative '../../test_helper'
class TestVersion < Minitest::Test
def test_version
assert_equal( false, SendWithUs::VERSION.nil? )
end
end
| sendwithus/sendwithus_ruby | test/lib/send_with_us/version_test.rb | Ruby | apache-2.0 | 156 |
package com.google.ratel.deps.jackson.databind.ser.std;
import java.io.IOException;
import java.lang.reflect.Type;
import com.google.ratel.deps.jackson.core.*;
import com.google.ratel.deps.jackson.databind.JavaType;
import com.google.ratel.deps.jackson.databind.JsonMappingException;
import com.google.ratel.deps.jackson.databind.JsonNode;
import com.google.ratel.deps.jackson.databind.SerializerProvider;
import com.google.ratel.deps.jackson.databind.annotation.JacksonStdImpl;
import com.google.ratel.deps.jackson.databind.jsonFormatVisitors.JsonFormatVisitorWrapper;
/**
* This is the special serializer for regular {@link java.lang.String}s.
*<p>
* Since this is one of "native" types, no type information is ever
* included on serialization (unlike for most scalar types as of 1.5)
*/
@JacksonStdImpl
public final class StringSerializer
extends NonTypedScalarSerializerBase<String>
{
public StringSerializer() { super(String.class); }
/**
* For Strings, both null and Empty String qualify for emptiness.
*/
@Override
public boolean isEmpty(String value) {
return (value == null) || (value.length() == 0);
}
@Override
public void serialize(String value, JsonGenerator jgen, SerializerProvider provider)
throws IOException, JsonGenerationException
{
jgen.writeString(value);
}
@Override
public JsonNode getSchema(SerializerProvider provider, Type typeHint)
{
return createSchemaNode("string", true);
}
@Override
public void acceptJsonFormatVisitor(JsonFormatVisitorWrapper visitor, JavaType typeHint)
throws JsonMappingException
{
if (visitor != null) visitor.expectStringFormat(typeHint);
}
}
| sabob/ratel | ratel/src/com/google/ratel/deps/jackson/databind/ser/std/StringSerializer.java | Java | apache-2.0 | 1,752 |
/**
* Copyright 2016 Sebastien Pelletier
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.pellse.decorator.collection;
public abstract class InitializedBoundedList<E> extends BoundedList<E>{
public InitializedBoundedList() {
super(3);
}
} | pellse/decorator | src/test/java/io/github/pellse/decorator/collection/InitializedBoundedList.java | Java | apache-2.0 | 772 |
import Ember from 'ember';
import HasIdMixin from '../mixins/has-id';
const { computed, Mixin, assert, defineProperty } = Ember;
/*
A mixin that enriches a component that is attached to a model property.
The property name by default is taken from the formComponent, computed unless explictly
defined in the `property` variable.
This mixin also binds a property named `errors` to the model's `model.errors.@propertyName` array
*/
export default Mixin.create(HasIdMixin, {
property: undefined,
propertyName: computed('property', 'formComponent.property', {
get() {
if (this.get('property')) {
return this.get('property');
} else if (this.get('formComponent.property')) {
return this.get('formComponent.property');
} else {
return assert(false, 'Property could not be found.');
}
}
}),
init() {
this._super(...arguments);
defineProperty(this, 'errors', computed.alias((`model.errors.${this.get('propertyName')}`)));
}
});
| slannigan/computed_input_errors | addon/mixins/has-property.js | JavaScript | apache-2.0 | 1,005 |
using DotvvmAcademy.Meta.Syntax;
using System;
using System.Diagnostics;
using System.Linq;
using System.Reflection;
namespace DotvvmAcademy.Meta
{
internal class MetaMemberInfoVisitor : MemberInfoVisitor<NameNode>
{
public override NameNode DefaultVisit(MemberInfo info)
{
throw new NotSupportedException($"MemberInfo of type \"{info.GetType()}\" is not supported.");
}
public override NameNode VisitConstructor(ConstructorInfo info)
{
return VisitMember(info);
}
public override NameNode VisitEvent(EventInfo info)
{
return VisitMember(info);
}
public override NameNode VisitField(FieldInfo info)
{
return VisitMember(info);
}
public override NameNode VisitMethod(MethodInfo info)
{
return VisitMember(info);
}
public override NameNode VisitProperty(PropertyInfo info)
{
return VisitMember(info);
}
public override NameNode VisitType(Type info)
{
if (info.IsConstructedGenericType)
{
var arguments = info.GetGenericArguments()
.Select(a => Visit(a));
return NameFactory.ConstructedType(Visit(info.GetGenericTypeDefinition()), arguments);
}
else if (info.IsNested)
{
return NameFactory.NestedType(Visit(info.DeclaringType), info.Name, info.GetGenericArguments().Length);
}
else if (info.IsPointer)
{
return NameFactory.PointerType(Visit(info.GetElementType()));
}
else if (info.IsArray)
{
return NameFactory.ArrayType(Visit(info.GetElementType()), info.GetArrayRank());
}
else
{
if (info.Namespace == null)
{
return NameFactory.Simple(info.Name);
}
else
{
return NameFactory.Qualified(VisitNamespace(info.Namespace), NameFactory.Simple(info.Name));
}
}
}
private NameNode VisitNamespace(string @namespace)
{
var segments = @namespace.Split('.');
NameNode result = NameFactory.Identifier(segments[0]);
for (int i = 1; i < segments.Length; i++)
{
result = NameFactory.Qualified(result, segments[i]);
}
return result;
}
private NameNode VisitMember(MemberInfo info)
{
return NameFactory.Member(Visit(info.DeclaringType), info.Name);
}
}
} | riganti/dotvvm-samples-academy | src/DotvvmAcademy.Meta/MetaMemberInfoVisitor.cs | C# | apache-2.0 | 2,764 |
using System;
using System.Collections;
using System.Collections.Generic;
using System.Collections.Specialized;
using System.ComponentModel;
using System.Linq;
using System.Management.Automation;
using System.Text;
using System.Threading;
namespace NetworkUtility
{
#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member
public class UriPathSegmentList : IList<string>, IList, INotifyPropertyChanged, INotifyCollectionChanged, IEquatable<UriPathSegmentList>, IComparable<UriPathSegmentList>, IComparable
{
private static StringComparer _comparer = StringComparer.InvariantCultureIgnoreCase;
private object _syncRoot = new object();
private List<string> _segments = null;
private List<char> _separators = new List<char>();
private int _count = 0;
private string _fullPath = "";
private string _encodedFullPath = "";
public event PropertyChangedEventHandler PropertyChanged;
public event NotifyCollectionChangedEventHandler CollectionChanged;
public string FullPath
{
get
{
Monitor.Enter(_syncRoot);
try { return _fullPath; }
finally { Monitor.Exit(_syncRoot); }
}
}
public string EncodedFullPath
{
get
{
Monitor.Enter(_syncRoot);
try { return _encodedFullPath; }
finally { Monitor.Exit(_syncRoot); }
}
}
public bool IsEmpty
{
get
{
Monitor.Enter(_syncRoot);
try { return _segments == null; }
finally { Monitor.Exit(_syncRoot); }
}
}
public bool IsPathRooted
{
get
{
Monitor.Enter(_syncRoot);
try { return _segments != null && _separators.Count == _segments.Count; }
finally { Monitor.Exit(_syncRoot); }
}
set
{
Monitor.Enter(_syncRoot);
try
{
if (value)
{
if (_separators == null)
_separators = new List<char>();
else if (_separators.Count < _segments.Count)
_separators.Insert(0, _separators.DefaultIfEmpty('/').First());
}
else if (_separators != null)
{
if (_separators.Count == 0)
_separators = null;
else
_separators.RemoveAt(0);
}
}
finally { Monitor.Exit(_syncRoot); }
}
}
public string this[int index]
{
get
{
Monitor.Enter(_syncRoot);
try { return _segments[index]; }
finally { Monitor.Exit(_syncRoot); }
}
set
{
string oldValue;
Monitor.Enter(_syncRoot);
try
{
if (value == null)
throw new ArgumentNullException();
if (_segments[index] == value)
return;
oldValue = _segments[index];
_segments[index] = value;
}
finally { Monitor.Exit(_syncRoot); }
UpdateFullPath(() => RaiseCollectionChanged(new NotifyCollectionChangedEventArgs(NotifyCollectionChangedAction.Replace, value, oldValue, index)));
}
}
object IList.this[int index]
{
get { return this[index]; }
set
{
object obj = value;
if (obj != null && obj is PSObject)
obj = (obj as PSObject).BaseObject;
this[index] = (string)obj;
}
}
public int Count
{
get
{
Monitor.Enter(_syncRoot);
try { return _count; }
finally { Monitor.Exit(_syncRoot); }
}
}
bool ICollection<string>.IsReadOnly { get { return false; } }
bool IList.IsReadOnly { get { return false; } }
bool IList.IsFixedSize { get { return false; } }
object ICollection.SyncRoot { get { return _syncRoot; } }
bool ICollection.IsSynchronized { get { return true; } }
public static string EscapePathSegment(string value)
{
if (String.IsNullOrEmpty(value))
return "";
StringBuilder sb = new StringBuilder();
foreach (char c in value)
{
switch (c)
{
case '%':
sb.Append("%25");
break;
case '\\':
sb.Append("%5C");
break;
case '#':
sb.Append("%23");
break;
case '/':
sb.Append("%2F");
break;
case ':':
sb.Append("%3A");
break;
case '?':
sb.Append("%3F");
break;
default:
if (c < ' ' || c > 126)
sb.Append(Uri.HexEscape(c));
else
sb.Append(c);
break;
}
}
return sb.ToString();
}
public static string EncodePathSegment(string value)
{
if (String.IsNullOrEmpty(value))
return "";
StringBuilder sb = new StringBuilder();
foreach (char c in value)
{
switch (c)
{
case ' ':
sb.Append("%20");
break;
case '"':
sb.Append("%22");
break;
case '%':
sb.Append("%25");
break;
case '<':
sb.Append("%3C");
break;
case '>':
sb.Append("%3E");
break;
case '\\':
sb.Append("%5C");
break;
case '^':
sb.Append("%5E");
break;
case '`':
sb.Append("%60");
break;
case '{':
sb.Append("%7B");
break;
case '|':
sb.Append("%7C");
break;
case '}':
sb.Append("%7D");
break;
case '#':
sb.Append("%23");
break;
case '+':
sb.Append("%2B");
break;
case '/':
sb.Append("%2F");
break;
case ':':
sb.Append("%3A");
break;
case '?':
sb.Append("%3F");
break;
default:
if (c < ' ' || c > 126)
sb.Append(Uri.HexEscape(c));
else
sb.Append(c);
break;
}
}
return sb.ToString();
}
private void EnsureCount(Action action)
{
try { action(); }
finally { EnsureCount(); }
}
private void EnsureCount()
{
Monitor.Enter(_syncRoot);
try
{
if (_count == _segments.Count)
return;
_count = _segments.Count;
}
finally { Monitor.Exit(_syncRoot); }
RaisePropertyChanged("Count");
}
private void UpdateFullPath(Action action)
{
try { action(); }
finally { UpdateFullPath(); }
}
private void UpdateFullPath()
{
string encodedFullPath, escapedFullPath;
Monitor.Enter(_syncRoot);
try
{
if (_segments.Count == 0)
{
encodedFullPath = "";
escapedFullPath = "";
}
else
{
StringBuilder encoded = new StringBuilder();
StringBuilder escaped = new StringBuilder();
if (_segments.Count == _separators.Count)
{
for (int i = 0; i < _segments.Count; i++)
{
char c = _separators[i];
encoded.Append(c);
escaped.Append(c);
string s = _segments[i];
encoded.Append(EncodePathSegment(s));
escaped.Append(EscapePathSegment(s));
}
}
else
{
encoded.Append(EncodePathSegment(_segments[0]));
for (int i = 1; i < _segments.Count; i++)
{
char c = _separators[i - 1];
encoded.Append(c);
encoded.Append(c);
string s = _segments[i];
encoded.Append(EncodePathSegment(s));
escaped.Append(EscapePathSegment(s));
}
}
encodedFullPath = encoded.ToString();
escapedFullPath = escaped.ToString();
}
if (_encodedFullPath == encodedFullPath)
encodedFullPath = null;
else
_encodedFullPath = encodedFullPath;
if (_fullPath == escapedFullPath)
escapedFullPath = null;
else
_fullPath = escapedFullPath;
}
finally { Monitor.Exit(_syncRoot); }
if (escapedFullPath != null)
RaisePropertyChanged("FullPath");
if (encodedFullPath != null)
RaisePropertyChanged("EncodedFullPath");
}
protected virtual void OnPropertyChanged(PropertyChangedEventArgs args) { }
protected void RaisePropertyChanged(string propertyName)
{
PropertyChangedEventArgs args = new PropertyChangedEventArgs(propertyName);
try { OnPropertyChanged(args); }
finally
{
PropertyChangedEventHandler propertyChanged = PropertyChanged;
if (propertyChanged != null)
propertyChanged(this, args);
}
}
protected virtual void OnCollectionChanged(NotifyCollectionChangedEventArgs args) { }
private void RaiseCollectionChanged(NotifyCollectionChangedEventArgs args)
{
try { OnCollectionChanged(args); }
finally
{
NotifyCollectionChangedEventHandler collectionChanged = CollectionChanged;
if (collectionChanged != null)
collectionChanged(this, args);
}
}
public int Add(string item)
{
if (item == null)
throw new ArgumentNullException("item");
Monitor.Enter(_syncRoot);
int index;
try
{
index = _segments.Count;
if (_separators == null)
_separators = new List<char>();
else
_separators.Add(_separators.DefaultIfEmpty('/').Last());
_segments.Add(item);
}
finally { Monitor.Exit(_syncRoot); }
EnsureCount();
UpdateFullPath();
return index;
}
void ICollection<string>.Add(string item) { Add(item); }
int IList.Add(object value)
{
object obj = value;
if (obj != null && obj is PSObject)
obj = (obj as PSObject).BaseObject;
return Add((string)obj);
}
public void Clear()
{
Monitor.Enter(_syncRoot);
try
{
if (_segments.Count == 0)
return;
if (_separators.Count == _segments.Count)
_separators.Clear();
else
_separators = null;
_segments.Clear();
}
finally { Monitor.Exit(_syncRoot); }
EnsureCount();
UpdateFullPath();
}
public bool Contains(string item)
{
if (item == null)
return false;
Monitor.Enter(_syncRoot);
try
{
if (_segments.Count == 0)
return false;
for (int i = 0; i < _segments.Count; i++)
{
if (_comparer.Equals(_segments[i], item))
return true;
}
}
finally { Monitor.Exit(_syncRoot); }
return false;
}
bool IList.Contains(object value)
{
object obj = value;
if (obj != null && obj is PSObject)
obj = (obj as PSObject).BaseObject;
return Contains(obj as string);
}
public void CopyTo(string[] array, int arrayIndex)
{
Monitor.Enter(_syncRoot);
try { _segments.CopyTo(array, arrayIndex); }
finally { Monitor.Exit(_syncRoot); }
}
void ICollection.CopyTo(Array array, int index)
{
Monitor.Enter(_syncRoot);
try { _segments.ToArray().CopyTo(array, index); }
finally { Monitor.Exit(_syncRoot); }
}
public IEnumerator<string> GetEnumerator() { return _segments.GetEnumerator(); }
public char? GetSeparator(int index)
{
Monitor.Enter(_syncRoot);
try
{
if (index < 0 || index >= _segments.Count)
throw new ArgumentOutOfRangeException("index");
if (_separators != null && _separators.Count == _segments.Count)
return _separators[index];
if (index == 0)
return null;
return _separators[index - 1];
}
finally { Monitor.Exit(_syncRoot); }
}
public int IndexOf(string item)
{
if (item == null)
return -1;
Monitor.Enter(_syncRoot);
try
{
if (_segments.Count == 0)
return -1;
int index = _segments.IndexOf(item);
if (index < 0)
{
for (int i = 0; i < _segments.Count; i++)
{
if (_comparer.Equals(_segments[i], item))
return i;
}
}
return index;
}
finally { Monitor.Exit(_syncRoot); }
}
int IList.IndexOf(object value)
{
object obj = value;
if (obj != null && obj is PSObject)
obj = (obj as PSObject).BaseObject;
return IndexOf(obj as string);
}
public void Insert(int index, string item)
{
if (item == null)
throw new ArgumentNullException("item");
Monitor.Enter(_syncRoot);
try
{
if (index < 0 || index > _segments.Count)
throw new ArgumentOutOfRangeException("index");
if (index == _segments.Count)
{
_segments.Add(item);
if (_separators == null)
_separators = new List<char>();
else
_separators.Add(_separators.DefaultIfEmpty('/').Last());
}
else
{
if (_separators.Count == _segments.Count)
_separators.Insert(index, _separators[index]);
else if (index == _separators.Count)
_separators.Add(_separators.DefaultIfEmpty('/').Last());
else if (index == 0)
_separators.Insert(0, _separators.DefaultIfEmpty('/').First());
else
_separators.Insert(index - 1, _separators[index - 1]);
_segments.Insert(index, item);
}
}
finally { Monitor.Exit(_syncRoot); }
EnsureCount();
UpdateFullPath();
}
void IList.Insert(int index, object value)
{
object obj = value;
if (obj != null && obj is PSObject)
obj = (obj as PSObject).BaseObject;
Insert(index, (string)obj);
}
public bool Remove(string item)
{
if (item == null)
return false;
Monitor.Enter(_syncRoot);
try
{
int index = IndexOf(item);
if (index < 0)
return false;
if (_segments.Count == _separators.Count)
_separators.RemoveAt(index);
else
_separators.RemoveAt((index > 0) ? index - 1 : 0);
_segments.RemoveAt(index);
}
finally { Monitor.Exit(_syncRoot); }
EnsureCount();
UpdateFullPath();
return true;
}
void IList.Remove(object value)
{
object obj = value;
if (obj != null && obj is PSObject)
obj = (obj as PSObject).BaseObject;
Remove(obj as string);
}
public void RemoveAt(int index)
{
Monitor.Enter(_syncRoot);
try
{
if (index < 0 || index >= _segments.Count)
throw new ArgumentOutOfRangeException("index");
if (_segments.Count == _separators.Count)
_separators.RemoveAt(index);
else
_separators.RemoveAt((index > 0) ? index - 1 : 0);
_segments.RemoveAt(index);
}
finally { Monitor.Exit(_syncRoot); }
EnsureCount();
UpdateFullPath();
}
public void SetSeparator(int index, char separator)
{
if (!(separator == ':' || separator == '/' || separator == '\\'))
throw new ArgumentException("Invalid separator character", "separator");
Monitor.Enter(_syncRoot);
try
{
if (index < 0 || index >= _segments.Count)
throw new ArgumentOutOfRangeException("index");
if (_separators.Count == _segments.Count)
_separators[index] = separator;
else if (index == 0)
_separators.Insert(0, separator);
else
_separators[index - 1] = separator;
}
finally { Monitor.Exit(_syncRoot); }
UpdateFullPath();
}
IEnumerator IEnumerable.GetEnumerator() { return _segments.ToArray().GetEnumerator(); }
#warning Not implemented
public int CompareTo(UriPathSegmentList other)
{
throw new NotImplementedException();
}
public int CompareTo(object obj) { return CompareTo(obj as UriPathSegmentList); }
public bool Equals(UriPathSegmentList other)
{
throw new NotImplementedException();
}
public override bool Equals(object obj) { return Equals(obj as UriPathSegmentList); }
public override int GetHashCode() { return ToString().GetHashCode(); }
public override string ToString()
{
Monitor.Enter(_syncRoot);
try
{
throw new NotImplementedException();
}
finally { Monitor.Exit(_syncRoot); }
}
}
#pragma warning restore CS1591 // Missing XML comment for publicly visible type or member
} | lerwine/PowerShell-Modules | src/NetworkUtility/UriPathSegmentList.cs | C# | apache-2.0 | 21,377 |
package botservice.schedule;
import botservice.model.system.UserLogEntity;
import botservice.model.system.UserLogEntity_;
import botservice.properties.BotServiceProperty;
import botservice.properties.BotServicePropertyConst;
import botservice.service.SystemService;
import botservice.service.common.BaseParam;
import botservice.serviceException.ServiceException;
import botservice.serviceException.ServiceExceptionObject;
import botservice.util.BotMsgDirectionType;
import botservice.util.BotMsgTransportStatus;
import javax.annotation.PostConstruct;
import javax.annotation.Resource;
import javax.ejb.*;
import javax.enterprise.event.Event;
import javax.inject.Inject;
import java.io.Serializable;
import java.util.List;
/**
* Бин, вызываемый по расписанию и пытающийся доставить те сообщения в очередь ботам (конечным пользователям),
* которые по каким-либо причинам не удалось доставить сразу
*/
@Singleton
@Startup
public class MsgToUserResender implements Serializable {
@Resource
private TimerService timerService;
@Inject
SystemService systemService;
@Inject
@ServiceException
Event<ServiceExceptionObject> serviceExceptionEvent;
@Inject
@BotServiceProperty(name = BotServicePropertyConst.MSG_TO_USER_RESEND_TIMEAUT)
private int msgToUserResendTimeOut;
@PostConstruct
public void init(){
timerService.createIntervalTimer(0L, msgToUserResendTimeOut*1000,
new TimerConfig(this, false));
}
@Timeout
public void resendMsgToClntApp(Timer timer){
if (timer.getInfo() instanceof MsgToUserResender){
List<UserLogEntity> userLogEntityList = systemService.getEntityListByCriteria(UserLogEntity.class,
new BaseParam(UserLogEntity_.directionType, BotMsgDirectionType.TO_USER),
new BaseParam(UserLogEntity_.transportStatus, BotMsgTransportStatus.DEFERRED));
for(UserLogEntity userLogEntity: userLogEntityList){
try {
systemService.sendMessageToBotQueue(userLogEntity.getMsgBody(), userLogEntity.getUserKeyEntity());
userLogEntity.setTransportStatus(BotMsgTransportStatus.DELIVERED);
systemService.mergeEntity(userLogEntity);
} catch (Exception e){
serviceExceptionEvent.fire(new ServiceExceptionObject(
"Ошибка при попытке повторной отправки сообщения в очередь бота: " +
userLogEntity.getUserKeyEntity().getBotEntity().getName(), e));
}
}
}
}
}
| dev-comp/botservice | botservice-ejb/src/main/java/botservice/schedule/MsgToUserResender.java | Java | apache-2.0 | 2,890 |
package com.unit16.z.indexed;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.function.Function;
import java.util.function.Predicate;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.UnmodifiableIterator;
public abstract class DSL<B>
implements Indexed<B>, Iterable<B>
{
private static final class ListBacked<C>
extends DSL<C>
{
private final List<C> _list;
ListBacked(List<C> i)
{
_list = i;
}
ListBacked(DSL<C> i)
{
_list = new ArrayList<>(i.size());
for (C c : i) { _list.add(c); }
}
@Override public C get(int i) { return _list.get(i); }
@Override public int size() { return _list.size(); }
@Override public Iterator<C> iterator() { return _list.iterator(); }
@Override public String toString() { return getClass().getSimpleName() + ": " + _list.toString(); }
}
private static final class IndexedBacked<C>
extends DSL<C>
{
private final Indexed<C> _i;
IndexedBacked(Indexed<C> i) {
_i = i;
}
@Override
public C get(int i) { return _i.get(i); }
@Override
public int size() { return _i.size(); }
@Override
public Iterator<C> iterator() {
return new UnmodifiableIterator<C>() {
private int _j = 0;
@Override
public boolean hasNext() { return _j < size(); }
@Override
public C next() { _j++; return get(_j - 1);}
};
}
}
public static <C> DSL<C> from(List<C> c)
{
return new ListBacked<>(c);
}
public static <C> DSL<C> from(Indexed<C> c)
{
return (c instanceof DSL) ? (DSL<C>) c : new IndexedBacked<>(c);
}
public final <C> DSL<C> map(Function<? super B, ? extends C> f)
{
return new OnResultOf<>(this, f);
}
public final DSL<B> head(final int max)
{
final Indexed<B> w = this;
return new IndexedBacked<>(new Indexed<B>(){
@Override
public B get(int i) { return w.get(i); }
@Override
public int size() { return max; }});
}
public final DSL<B> tail(final int min)
{
final Indexed<B> w = this;
return new IndexedBacked<>(new Indexed<B>(){
@Override
public B get(int i) { return w.get(i + min); }
@Override
public int size() { return w.size() - min; }});
}
public final DSL<B> filter(Predicate<? super B> p)
{
if (size() > 0)
{
final Iterator<B> i = Iterators.filter(this.iterator(), x -> p.test(x));
if (i.hasNext())
{
return new ListBacked<>(Lists.newArrayList(i));
}
}
return new Empty<>();
}
public final DSL<B> strict()
{
return new ListBacked<>(this);
}
public final DSL<B> append(Indexed<B> snd)
{
return new Concat<>(this, snd);
}
private static final class Concat<C>
extends DSL<C>
{
private final DSL<C> fst_;
private final DSL<C> snd_;
private final int sf;
private final int ss;
Concat(Indexed<C> fst, Indexed<C> snd) {
fst_ = from(fst);
snd_ = from(snd);
sf = fst_.size();
ss = snd_.size();
}
@Override
public C get(int i) {
return i < sf ? fst_.get(i) : snd_.get(i - sf);
}
@Override
public int size() { return sf + ss; }
@Override
public Iterator<C> iterator() {
return Iterators.concat(fst_.iterator(), snd_.iterator()); }
}
}
| vincentk/unit16-z | src/main/java/com/unit16/z/indexed/DSL.java | Java | apache-2.0 | 3,250 |
<h1>This is an taxonomy</h1>
<ul>
<li>id: {{ $taxonomy->id }}</li>
<li>slug: {{ $taxonomy->slug }}</li>
<li>hierarchical: {{ $taxonomy->hierarchical }}</li>
{{ dump($taxonomy->terms) }}
</ul> | Datahjelpen/PEAK | resources/views/item/taxonomy/content-main.blade.php | PHP | apache-2.0 | 206 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Linq.Expressions;
using Elasticsearch.Net;
using Newtonsoft.Json;
using Newtonsoft.Json.Linq;
namespace Nest
{
[JsonConverter(typeof(GetAliasResponseConverter))]
public interface IGetAliasResponse : IResponse
{
IReadOnlyDictionary<string, IReadOnlyList<AliasDefinition>> Indices { get; }
/// <summary>
/// An additional error message if an error occurs.
/// </summary>
/// <remarks>Applies to Elasticsearch 5.5.0+</remarks>
string Error { get; }
int? StatusCode { get; }
}
public class GetAliasResponse : ResponseBase, IGetAliasResponse
{
public IReadOnlyDictionary<string, IReadOnlyList<AliasDefinition>> Indices { get; internal set; } = EmptyReadOnly<string, IReadOnlyList<AliasDefinition>>.Dictionary;
public override bool IsValid => this.Indices.Count > 0;
public string Error { get; internal set; }
public int? StatusCode { get; internal set; }
}
internal class GetAliasResponseConverter : JsonConverter
{
public override bool CanWrite => false;
public override void WriteJson(JsonWriter writer, object value, JsonSerializer serializer)
{
throw new NotSupportedException();
}
public override object ReadJson(JsonReader reader, Type objectType, object existingValue, JsonSerializer serializer)
{
var j = JObject.Load(reader);
var errorProperty =j.Property("error");
string error = null;
if (errorProperty?.Value?.Type == JTokenType.String)
{
error = errorProperty.Value.Value<string>();
errorProperty.Remove();
}
var statusProperty =j.Property("status");
int? statusCode = null;
if (statusProperty?.Value?.Type == JTokenType.Integer)
{
statusCode = statusProperty.Value.Value<int>();
statusProperty.Remove();
}
//Read the remaining properties as aliases
var dict = serializer.Deserialize<Dictionary<string, Dictionary<string, Dictionary<string, AliasDefinition>>>>(j.CreateReader());
var indices = new Dictionary<string, IReadOnlyList<AliasDefinition>>();
foreach (var kv in dict)
{
var indexDict = kv.Key;
var aliases = new List<AliasDefinition>();
if (kv.Value != null && kv.Value.ContainsKey("aliases"))
{
var aliasDict = kv.Value["aliases"];
if (aliasDict != null)
aliases = aliasDict.Select(kva =>
{
var alias = kva.Value;
alias.Name = kva.Key;
return alias;
}).ToList();
}
indices.Add(indexDict, aliases);
}
return new GetAliasResponse { Indices = indices, Error = error, StatusCode = statusCode};
}
public override bool CanConvert(Type objectType) => true;
}
}
| CSGOpenSource/elasticsearch-net | src/Nest/Indices/AliasManagement/GetAlias/GetAliasResponse.cs | C# | apache-2.0 | 2,661 |
/*
* #%L
* FlatPack Demonstration Client
* %%
* Copyright (C) 2012 Perka Inc.
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package com.getperka.flatpack.demo.client;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.math.BigDecimal;
import java.net.HttpURLConnection;
import java.nio.charset.Charset;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.UUID;
import javax.ws.rs.core.UriBuilder;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import com.getperka.flatpack.Configuration;
import com.getperka.flatpack.FlatPack;
import com.getperka.flatpack.FlatPackEntity;
import com.getperka.flatpack.client.StatusCodeException;
import com.getperka.flatpack.demo.server.DemoServer;
/**
* Contains a variety of smoke tests to demonstrate various facets of the FlatPack stack.
*/
public class ClientSmokeTest {
private static final int PORT = 8111;
/**
* Spin up an instance of the demo server.
*/
@BeforeClass
public static void beforeClass() {
assertTrue(new DemoServer().start(PORT));
}
private ClientApi api;
private Random random;
/**
* Creates an instance of the ClientApi.
*/
@Before
public void before() throws IOException {
random = new Random(0);
Configuration config = new Configuration()
.addTypeSource(ClientTypeSource.get());
api = new ClientApi(FlatPack.create(config));
api.setServerBase(UriBuilder.fromUri("http://localhost").port(PORT).build());
api.setVerbose(true);
assertEquals(204, api.resetPost().execute().getResponseCode());
}
/**
* Demonstrates ConstraintViolation handling. ConstraintViolations are returned as a simple
* {@code string:string} map in the FlatPackEntity's {@code error} block. The goal is to provide
* enough context for user interfaces to present the error message in a useful way, without
* assuming that the client is a Java app.
*/
@Test
public void testConstraintViolation() throws IOException {
Product p = makeProduct();
p.setPrice(BigDecimal.valueOf(-1));
FlatPackEntity<?> entity = null;
try {
api.productsPut(Collections.singletonList(p))
.queryParameter("isAdmin", "true")
.execute();
fail("Should have seen StatusCodeException");
} catch (StatusCodeException e) {
// The 400 status code is returned by the service method.
assertEquals(400, e.getStatusCode());
/*
* If the server returned a valid flatpack-encoded response, it can be retrieved from the
* StatusCodeException. Otherwise, this method will return null.
*/
entity = e.getEntity();
}
// Pull out the error messages
Map<String, String> errors = entity.getExtraErrors();
assertNotNull(errors);
assertEquals(1, errors.size());
Map.Entry<String, String> entry = errors.entrySet().iterator().next();
assertEquals("price", entry.getKey());
assertEquals("must be greater than or equal to 0", entry.getValue());
}
/**
* Demonstrates how generated client API will present a non-FlatPack resource (as an
* HttpUrlConnection).
*/
@Test
public void testNonFlatpackEndpoint() throws IOException {
// The query parameters are added as a builder-style pattern
HttpURLConnection conn = api.helloGet().withName("ClientSmokeTest").execute();
assertEquals(200, conn.getResponseCode());
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream(),
Charset.forName("UTF8")));
assertEquals("Hello ClientSmokeTest!", reader.readLine());
}
/**
* Demonstrates the use of roles to restrict property setters.
*/
@Test
public void testRolePropertyAccess() throws IOException {
// Create a Product
Product p = new Product();
UUID uuid = p.getUuid();
p.setName("Product");
p.setNotes("Some notes");
p.setPrice(BigDecimal.valueOf(42));
api.productsPut(Collections.singletonList(p))
.queryParameter("isAdmin", "true")
.execute();
// Try to update it with a non-admin request
p = new Product();
p.setUuid(uuid);
p.setPrice(BigDecimal.valueOf(1));
api.productsPut(Collections.singletonList(p)).execute();
// Verify that nothing changed, as nobody
List<Product> products = api.productsGet().execute().getValue();
assertEquals(1, products.size());
p = products.get(0);
// Same UUID
assertEquals(uuid, p.getUuid());
// Unchanged price
assertEquals(BigDecimal.valueOf(42), p.getPrice());
// Can't see the notes
assertNull(p.getNotes());
// Now try the update again, as an admin
p = new Product();
p.setUuid(uuid);
p.setPrice(BigDecimal.valueOf(99));
api.productsPut(Collections.singletonList(p))
.queryParameter("isAdmin", "true")
.execute();
// Verify the changes, as nobody
products = api.productsGet().execute().getValue();
assertEquals(1, products.size());
p = products.get(0);
// Same UUID
assertEquals(uuid, p.getUuid());
// Unchanged price
assertEquals(BigDecimal.valueOf(99), p.getPrice());
}
/**
* Demonstrates a couple of round-trips to the server.
*/
@Test
public void testSimpleGetAndPut() throws IOException {
List<Product> products = api.productsGet().execute().getValue();
assertEquals(0, products.size());
// A server error would be reported as a StatusCodeException, a subclass of IOException
api.productsPut(Arrays.asList(makeProduct(), makeProduct()))
.queryParameter("isAdmin", "true")
.execute();
/*
* The object returned from productsGet() is an endpoint-specific interface that may contain
* additional fluid setters for declared query parameters. It also provides access to some
* request internals, including the FlatPackEntity that will be sent as part of the request.
* This allows callers to further customize outgoing requests, in the above case to add the
* isAdmin query parameter that interacts with the DummyAuthenticator. The call to execute()
* triggers payload serialization and execution of the HTTP request. This returns a
* FlatPackEntity describing the response, and getValue() returns the primary value object
* contained in the payload.
*/
FlatPackEntity<List<Product>> entity = api.productsGet().execute();
assertTrue(entity.getExtraErrors().toString(), entity.getExtraErrors().isEmpty());
assertTrue(entity.getExtraWarnings().toString(), entity.getExtraWarnings().isEmpty());
products = entity.getValue();
assertEquals(2, products.size());
assertEquals(BigDecimal.valueOf(360), products.get(0).getPrice());
assertEquals(BigDecimal.valueOf(948), products.get(1).getPrice());
assertTrue(products.get(0).wasPersistent());
assertTrue(products.get(1).wasPersistent());
// Try to update one of the objects
Product p = products.get(0);
p.setPrice(BigDecimal.valueOf(99));
assertEquals(Collections.singleton("price"), p.dirtyPropertyNames());
api.productsPut(Collections.singletonList(p)).queryParameter("isAdmin", "true").execute();
// Re-fetch and verify update
products = api.productsGet().execute().getValue();
assertEquals(99, products.get(0).getPrice().intValue());
assertTrue(products.get(0).dirtyPropertyNames().isEmpty());
}
private Product makeProduct() {
Product p = new Product();
p.setName("ClientSmokeTest");
p.setPrice(BigDecimal.valueOf(random.nextInt(1000)));
return p;
}
}
| perka/flatpack-java | demo-client/src/test/java/com/getperka/flatpack/demo/client/ClientSmokeTest.java | Java | apache-2.0 | 8,420 |
#!/usr/bin/env python
# Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Utility functions for dealing with VMDKs and datastores
import os
import os.path
import glob
import re
import logging
import fnmatch
from pyVim import vmconfig
from pyVmomi import vim
import pyVim
from pyVim.invt import GetVmFolder, FindChild
from error_code import *
import threadutils
import vmdk_ops
import auth_data_const
import auth
import auth_api
import log_config
from error_code import *
# datastores should not change during 'vmdkops_admin' run,
# so using global to avoid multiple scans of /vmfs/volumes
datastores = None
# we assume files smaller that that to be descriptor files
MAX_DESCR_SIZE = 5000
# regexp for finding "snapshot" (aka delta disk) descriptor names
SNAP_NAME_REGEXP = r"^.*-[0-9]{6}$" # used for names without .vmdk suffix
SNAP_VMDK_REGEXP = r"^.*-[0-9]{6}\.vmdk$" # used for file names
# regexp for finding 'special' vmdk files (they are created by ESXi)
SPECIAL_FILES_REGEXP = r"\A.*-(delta|ctk|digest|flat)\.vmdk$"
# glob expression to match end of 'delta' (aka snapshots) file names.
SNAP_SUFFIX_GLOB = "-[0-9][0-9][0-9][0-9][0-9][0-9].vmdk"
# regexp for finding datastore path "[datastore] path/to/file.vmdk" from full vmdk path
DATASTORE_PATH_REGEXP = r"^/vmfs/volumes/([^/]+)/(.*\.vmdk)$"
# lsof command
LSOF_CMD = "/bin/vmkvsitools lsof"
# Number of times and sleep time to retry on IOError EBUSY
VMDK_RETRY_COUNT = 5
VMDK_RETRY_SLEEP = 1
# root for all the volumes
VOLUME_ROOT = "/vmfs/volumes/"
# For managing resource locks.
lockManager = threadutils.LockManager()
def init_datastoreCache(force=False):
"""
Initializes the datastore cache with the list of datastores accessible
from local ESX host. force=True will force it to ignore current cache
and force init
"""
with lockManager.get_lock("init_datastoreCache"):
global datastores
logging.debug("init_datastoreCache: %s", datastores)
if datastores and not force:
return
si = vmdk_ops.get_si()
# We are connected to ESX so childEntity[0] is current DC/Host
ds_objects = si.content.rootFolder.childEntity[0].datastoreFolder.childEntity
tmp_ds = []
for datastore in ds_objects:
dockvols_path, err = vmdk_ops.get_vol_path(datastore.info.name)
if err:
logging.error(" datastore %s is being ignored as the dockvol path can't be created on it", datastore.info.name)
continue
tmp_ds.append((datastore.info.name,
datastore.info.url,
dockvols_path))
datastores = tmp_ds
def validate_datastore(datastore):
"""
Checks if the datastore is part of datastoreCache.
If not it will update the datastore cache and check if datastore
is a part of the updated cache.
"""
init_datastoreCache()
if datastore in [i[0] for i in datastores]:
return True
else:
init_datastoreCache(force=True)
if datastore in [i[0] for i in datastores]:
return True
return False
def get_datastores():
"""
Returns a list of (name, url, dockvol_path), with an element per datastore
where:
'name' is datastore name (e.g. 'vsanDatastore') ,
'url' is datastore URL (e.g. '/vmfs/volumes/vsan:572904f8c031435f-3513e0db551fcc82')
'dockvol-path; is a full path to 'dockvols' folder on datastore
"""
init_datastoreCache()
return datastores
def get_volumes(tenant_re):
""" Return dicts of docker volumes, their datastore and their paths
"""
# Assume we have two tenants "tenant1" and "tenant2"
# volumes for "tenant1" are in /vmfs/volumes/datastore1/dockervol/tenant1
# volumes for "tenant2" are in /vmfs/volumes/datastore1/dockervol/tenant2
# volumes does not belongs to any tenants are under /vmfs/volumes/dockervol
# tenant_re = None : only return volumes which do not belong to a tenant
# tenant_re = "tenant1" : only return volumes which belongs to tenant1
# tenant_re = "tenant*" : return volumes which belong to tenant1 or tenant2
# tenant_re = "*" : return all volumes under /vmfs/volumes/datastore1/dockervol
logging.debug("get_volumes: tenant_pattern(%s)", tenant_re)
volumes = []
for (datastore, url, path) in get_datastores():
logging.debug("get_volumes: %s %s %s", datastore, url, path)
if not tenant_re:
for file_name in list_vmdks(path):
# path : docker_vol path
volumes.append({'path': path,
'filename': file_name,
'datastore': datastore})
else:
for root, dirs, files in os.walk(path):
# walkthough all files under docker_vol path
# root is the current directory which is traversing
# root = /vmfs/volumes/datastore1/dockervol/tenant1_uuid
# path = /vmfs/volumes/datastore1/dockervol
# sub_dir get the string "/tenant1_uuid"
# sub_dir_name is "tenant1_uuid"
# call get_tenant_name with "tenant1_uuid" to find corresponding
# tenant_name which will be used to match
# pattern specified by tenant_re
logging.debug("get_volumes: path=%s root=%s", path, root)
sub_dir = root.replace(path, "")
sub_dir_name = sub_dir[1:]
# sub_dir_name is the tenant uuid
error_info, tenant_name = auth_api.get_tenant_name(sub_dir_name)
if not error_info:
logging.debug("get_volumes: path=%s root=%s sub_dir_name=%s tenant_name=%s",
path, root, sub_dir_name, tenant_name)
if fnmatch.fnmatch(tenant_name, tenant_re):
for file_name in list_vmdks(root):
volumes.append({'path': root,
'filename': file_name,
'datastore': datastore,
'tenant': tenant_name})
else:
# cannot find this tenant, this tenant was removed
# mark those volumes created by "orphan" tenant
logging.debug("get_volumes: cannot find tenant_name for tenant_uuid=%s", sub_dir_name)
logging.debug("get_volumes: path=%s root=%s sub_dir_name=%s",
path, root, sub_dir_name)
# return orphan volumes only in case when volumes from any tenants are asked
if tenant_re == "*":
for file_name in list_vmdks(root):
volumes.append({'path': root,
'filename': file_name,
'datastore': datastore,
'tenant' : auth_data_const.ORPHAN_TENANT})
logging.debug("volumes %s", volumes)
return volumes
def get_vmdk_path(path, vol_name):
"""
If the volume-related VMDK exists, returns full path to the latest
VMDK disk in the disk chain, be it volume-NNNNNN.vmdk or volume.vmdk.
If the disk does not exists, returns full path to the disk for create().
"""
# Get a delta disk list, and if it's empty - return the full path for volume
# VMDK base file.
# Note: we rely on NEVER allowing '-NNNNNN' in end of a volume name and on
# the fact that ESXi always creates deltadisks as <name>-NNNNNN.vmdk (N is a
# digit, and there are exactly 6 digits there) for delta disks
#
# see vmdk_ops.py:parse_vol_name() which enforces the volume name rules.
delta_disks = glob.glob("{0}/{1}{2}".format(path, vol_name, SNAP_SUFFIX_GLOB))
if not delta_disks:
return os.path.join(path, "{0}.vmdk".format(vol_name))
# this funky code gets the name of the latest delta disk:
latest = sorted([(vmdk, os.stat(vmdk).st_ctime) for vmdk in delta_disks], key=lambda d: d[1], reverse=True)[0][0]
logging.debug("The latest delta disk is %s. All delta disks: %s", latest, delta_disks)
return latest
def get_datastore_path(vmdk_path):
"""Returns a string datastore path "[datastore] path/to/file.vmdk"
from a full vmdk path.
"""
match = re.search(DATASTORE_PATH_REGEXP, vmdk_path)
datastore, path = match.groups()
return "[{0}] {1}".format(datastore, path)
def get_datastore_from_vmdk_path(vmdk_path):
"""Returns a string representing the datastore from a full vmdk path.
"""
match = re.search(DATASTORE_PATH_REGEXP, vmdk_path)
datastore, path = match.groups()
return datastore
def get_volname_from_vmdk_path(vmdk_path):
"""Returns the volume name from a full vmdk path.
"""
match = re.search(DATASTORE_PATH_REGEXP, vmdk_path)
_, path = match.groups()
vmdk = path.split("/")[-1]
return strip_vmdk_extension(vmdk)
def list_vmdks(path, volname="", show_snapshots=False):
""" Return a list of VMDKs in a given path. Filters out non-descriptor
files and delta disks.
Params:
path - where the VMDKs are looked for
volname - if passed, only files related to this VMDKs will be returned. Useful when
doing volume snapshot inspect
show_snapshots - if set to True, all VMDKs (including delta files) will be returned
"""
# dockvols may not exists on a datastore - this is normal.
if not os.path.exists(path):
return []
logging.debug("list_vmdks: dockvol existed on datastore")
vmdks = [f for f in os.listdir(path) if vmdk_is_a_descriptor(path, f)]
if volname:
vmdks = [f for f in vmdks if f.startswith(volname)]
if not show_snapshots:
expr = re.compile(SNAP_VMDK_REGEXP)
vmdks = [f for f in vmdks if not expr.match(f)]
logging.debug("vmdks %s", vmdks)
return vmdks
def vmdk_is_a_descriptor(path, file_name):
"""
Is the file a vmdk descriptor file? We assume any file that ends in .vmdk,
does not have -delta or -flat or -digest or -ctk at the end of filename,
and has a size less than MAX_DESCR_SIZE is a descriptor file.
"""
name = file_name.lower()
# filter out all files with wrong extention
# also filter out -delta, -flat, -digest and -ctk VMDK files
if not name.endswith('.vmdk') or re.match(SPECIAL_FILES_REGEXP, name):
return False
# Check the size. It's a cheap(ish) way to check for descriptor,
# without actually checking the file content and risking lock conflicts
try:
if os.stat(os.path.join(path, file_name)).st_size > MAX_DESCR_SIZE:
return False
except OSError:
pass # if file does not exist, assume it's small enough
return True
def strip_vmdk_extension(filename):
""" Remove the .vmdk file extension from a string """
return filename.replace(".vmdk", "")
def get_vm_uuid_by_name(vm_name):
""" Returns vm_uuid for given vm_name, or None """
si = vmdk_ops.get_si()
try:
vm = FindChild(GetVmFolder(), vm_name)
return vm.config.uuid
except:
return None
def get_vm_name_by_uuid(vm_uuid):
""" Returns vm_name for given vm_uuid, or None """
si = vmdk_ops.get_si()
try:
return vmdk_ops.vm_uuid2name(vm_uuid)
except:
return None
def get_vm_config_path(vm_name):
"""Returns vm_uuid for given vm_name, or None """
si = vmdk_ops.get_si()
try:
vm = FindChild(GetVmFolder(), vm_name)
config_path = vm.summary.config.vmPathName
except:
return None
# config path has the format like this "[datastore1] test_vm1/test_vm1/test_vm1.vmx"
datastore, path = config_path.split()
datastore = datastore[1:-1]
datastore_path = os.path.join("/vmfs/volumes/", datastore)
# datastore_path has the format like this /vmfs/volumes/datastore_name
vm_config_path = os.path.join(datastore_path, path)
return vm_config_path
def get_attached_volume_path(vm, volname, datastore):
"""
Returns full path for docker volume "volname", residing on "datastore" and attached to "VM"
Files a warning and returns None if the volume is not attached
"""
# Find the attached disk with backing matching "[datastore] dockvols/[.*]/volname[-ddddddd]?.vmdk"
# SInce we don't know the vmgroup (the path after dockvols), we'll just pick the first match (and yell if
# there is more than one match)
# Yes, it is super redundant - we will find VM, scan disks and find a matching one here, then return the path
# and it will likely be used to do the same steps - find VM, scan the disks, etc.. It's a hack and it's a corner
# case, so we'll live with this
# Note that if VM is moved to a different vmgroup in flight, we may fail here and it's fine.
# Note that if there is a volume with the same name in 2 different vmgroup folders and both are attached
# and VM is moved between the groups we may end up returning the wrong volume but not possible, as the user
# would need to change VMgroup in-flight and admin tool would block it when volumes are attached.
if not datastore:
# we rely on datastore always being a part of volume name passed to detach.
# if this contract breaks, or we are called from somewhere else - bail out
logging.error("get_attached_volume_path internal error - empty datastore")
return None
# look for '[datastore] dockvols/tenant/volume.vmdk' name
# and account for delta disks (e.g. volume-000001.vmdk)
prog = re.compile('\[%s\] %s/[^/]+/%s(-[0-9]{6})?\.vmdk$' %
(datastore, vmdk_ops.DOCK_VOLS_DIR, volname))
attached = [d for d in vm.config.hardware.device \
if isinstance(d, vim.VirtualDisk) and \
isinstance(d.backing, vim.VirtualDisk.FlatVer2BackingInfo) and \
prog.match(d.backing.fileName)]
if len(attached) == 0:
logging.error("Can't find device attached to '%s' for volume '%s' on [%s].",
vm.config.name, volname, datastore)
return None
if len(attached) > 1:
logging.warning("More than 1 device attached to '%s' for volume '%s' on [%s].",
vm.config.name, volname, datastore)
path = find_dvs_volume(attached[0])
logging.warning("Found path: %s", path)
return path
def find_dvs_volume(dev):
"""
If the @param dev (type is vim.vm.device) a vDVS managed volume, return its vmdk path
"""
# if device is not a virtual disk, skip this device
if type(dev) != vim.vm.device.VirtualDisk:
return False
# Filename format is as follows:
# "[<datastore name>] <parent-directory>/tenant/<vmdk-descriptor-name>"
# Trim the datastore name and keep disk path.
datastore_name, disk_path = dev.backing.fileName.rsplit("]", 1)
logging.info("backing disk name is %s", disk_path)
# name formatting to remove unwanted characters
datastore_name = datastore_name[1:]
disk_path = disk_path.lstrip()
# find the dockvols dir on current datastore and resolve symlinks if any
dvol_dir_path = os.path.realpath(os.path.join(VOLUME_ROOT,
datastore_name, vmdk_ops.DOCK_VOLS_DIR))
dvol_dir = os.path.basename(dvol_dir_path)
if disk_path.startswith(dvol_dir):
# returning the vmdk path for vDVS volume
return os.path.join(VOLUME_ROOT, datastore_name, disk_path)
return None
def check_volumes_mounted(vm_list):
"""
Return error_info if any vm in @param vm_list have docker volume mounted
"""
for vm_id, _ in vm_list:
vm = vmdk_ops.findVmByUuid(vm_id)
if vm:
for d in vm.config.hardware.device:
if find_dvs_volume(d):
error_info = generate_error_info(ErrorCode.VM_WITH_MOUNTED_VOLUMES,
vm.config.name)
return error_info
else:
error_info = generate_error_info(ErrorCode.VM_NOT_FOUND, vm_id)
return error_info
return None
def log_volume_lsof(vol_name):
"""Log volume open file descriptors"""
rc, out = vmdk_ops.RunCommand(LSOF_CMD)
if rc != 0:
logging.error("Error running lsof for %s: %s", vol_name, out)
return
for line in out.splitlines():
# Make sure we only match the lines pertaining to that volume files.
if re.search(r".*/vmfs/volumes/.*{0}.*".format(vol_name), line):
cartel, name, ftype, fd, desc = line.split()
msg = "cartel={0}, name={1}, type={2}, fd={3}, desc={4}".format(
cartel, name, ftype, fd, desc)
logging.info("Volume open descriptor: %s", msg)
def get_datastore_objects():
""" return all datastore objects """
si = vmdk_ops.get_si()
return si.content.rootFolder.childEntity[0].datastore
def get_datastore_url(datastore_name):
""" return datastore url for given datastore name """
# Return datastore url for datastore name "_VM_DS""
if datastore_name == auth_data_const.VM_DS:
return auth_data_const.VM_DS_URL
# Return datastore url for datastore name "_ALL_DS""
if datastore_name == auth_data_const.ALL_DS:
return auth_data_const.ALL_DS_URL
# validate_datastore will refresh the cache if datastore_name is not in cache
if not validate_datastore(datastore_name):
return None
# Query datastore URL from VIM API
# get_datastores() return a list of tuple
# each tuple has format like (datastore_name, datastore_url, dockvol_path)
res = [d[1] for d in get_datastores() if d[0] == datastore_name]
return res[0]
def get_datastore_name(datastore_url):
""" return datastore name for given datastore url """
# Return datastore name for datastore url "_VM_DS_URL""
if datastore_url == auth_data_const.VM_DS_URL:
return auth_data_const.VM_DS
# Return datastore name for datastore url "_ALL_DS_URL""
if datastore_url == auth_data_const.ALL_DS_URL:
return auth_data_const.ALL_DS
# Query datastore name from VIM API
# get_datastores() return a list of tuple
# each tuple has format like (datastore_name, datastore_url, dockvol_path)
res = [d[0] for d in get_datastores() if d[1] == datastore_url]
logging.debug("get_datastore_name: res=%s", res)
return res[0] if res else None
def get_datastore_url_from_config_path(config_path):
"""Returns datastore url in config_path """
# path can be /vmfs/volumes/<datastore_url_name>/...
# or /vmfs/volumes/datastore_name/...
# so extract datastore_url_name:
config_ds_url = os.path.join("/vmfs/volumes/",
os.path.realpath(config_path).split("/")[3])
logging.debug("get_datastore_url_from_config_path: config_path=%s config_ds_url=%s"
% (config_path, config_ds_url))
return config_ds_url
def main():
log_config.configure()
if __name__ == "__main__":
main()
| shivanshu21/docker-volume-vsphere | esx_service/utils/vmdk_utils.py | Python | apache-2.0 | 19,851 |
package com.example.testconnectionappmart;
public class Service {
private int id;
private String discountEndDt;
private String discountPrice;
private String logoImagePath;
private String discountAmount;
private String saveType;
private String appmartPrice;
private String serviceName;
private String appName;
private String exp;
private String price;
private String serviceId;
private String cntCycle;
private String saleType;
private String discountStartDt;
private String policy;
private String developId;
private String discountRate;
private String dayCycle;
private String setlType;
private String monthCycle;
//constructor
//constructor
public Service(){}
public Service(String serviceId){
this.serviceId= serviceId;
}
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getServiceId() {
return serviceId;
}
public void setServiceId(String serviceId) {
this.serviceId = serviceId;
}
public String getDiscountEndDt() {
return discountEndDt;
}
public void setDiscountEndDt(String discountEndDt) {
this.discountEndDt = discountEndDt;
}
public String getDiscountPrice() {
return discountPrice;
}
public void setDiscountPrice(String discountPrice) {
this.discountPrice = discountPrice;
}
public String getLogoImagePath() {
return logoImagePath;
}
public void setLogoImagePath(String logoImagePath) {
this.logoImagePath = logoImagePath;
}
public String getDiscountAmount() {
return discountAmount;
}
public void setDiscountAmount(String discountAmount) {
this.discountAmount = discountAmount;
}
public String getSaveType() {
return saveType;
}
public void setSaveType(String saveType) {
this.saveType = saveType;
}
public String getAppmartPrice() {
return appmartPrice;
}
public void setAppmartPrice(String appmartPrice) {
this.appmartPrice = appmartPrice;
}
public String getServiceName() {
return serviceName;
}
public void setServiceName(String serviceName) {
this.serviceName = serviceName;
}
public String getAppName() {
return appName;
}
public void setAppName(String appName) {
this.appName = appName;
}
public String getExp() {
return exp;
}
public void setExp(String exp) {
this.exp = exp;
}
public String getPrice() {
return price;
}
public void setPrice(String price) {
this.price = price;
}
public String getCntCycle() {
return cntCycle;
}
public void setCntCycle(String cntCycle) {
this.cntCycle = cntCycle;
}
public String getSaleType() {
return saleType;
}
public void setSaleType(String saleType) {
this.saleType = saleType;
}
public String getDiscountStartDt() {
return discountStartDt;
}
public void setDiscountStartDt(String discountStartDt) {
this.discountStartDt = discountStartDt;
}
public String getPolicy() {
return policy;
}
public void setPolicy(String policy) {
this.policy = policy;
}
public String getDevelopId() {
return developId;
}
public void setDevelopId(String developId) {
this.developId = developId;
}
public String getDiscountRate() {
return discountRate;
}
public void setDiscountRate(String discountRate) {
this.discountRate = discountRate;
}
public String getDayCycle() {
return dayCycle;
}
public void setDayCycle(String dayCycle) {
this.dayCycle = dayCycle;
}
public String getSetlType() {
return setlType;
}
public void setSetlType(String setlType) {
this.setlType = setlType;
}
public String getMonthCycle() {
return monthCycle;
}
public void setMonthCycle(String monthCycle) {
this.monthCycle = monthCycle;
}
}
| info-appmart/OthersMethods | src/com/example/testconnectionappmart/Service.java | Java | apache-2.0 | 3,650 |
package com.animerom.filemanager.commands.shell;
import com.animerom.filemanager.commands.ChangePermissionsExecutable;
import com.animerom.filemanager.console.CommandNotFoundException;
import com.animerom.filemanager.console.ExecutionException;
import com.animerom.filemanager.console.InsufficientPermissionsException;
import com.animerom.filemanager.model.MountPoint;
import com.animerom.filemanager.model.Permissions;
import com.animerom.filemanager.util.MountPointHelper;
import java.text.ParseException;
/**
* A class for change the permissions of an object.
*
* {@link "http://unixhelp.ed.ac.uk/CGI/man-cgi?chmod"}
*/
public class ChangePermissionsCommand
extends SyncResultProgram implements ChangePermissionsExecutable {
private static final String ID = "chmod"; //$NON-NLS-1$
private Boolean mRet;
private final String mFileName;
/**
* Constructor of <code>ChangePermissionsCommand</code>.
*
* @param fileName The name of the file or directory to be moved
* @param newPermissions The new permissions to apply to the object
* @throws InvalidCommandDefinitionException If the command has an invalid definition
*/
public ChangePermissionsCommand(
String fileName, Permissions newPermissions) throws InvalidCommandDefinitionException {
super(ID, newPermissions.toOctalString(), fileName);
this.mFileName = fileName;
}
/**
* {@inheritDoc}
*/
@Override
public void parse(String in, String err) throws ParseException {
//Release the return object
this.mRet = Boolean.TRUE;
}
/**
* {@inheritDoc}
*/
@Override
public Boolean getResult() {
return this.mRet;
}
/**
* {@inheritDoc}
*/
@Override
public void checkExitCode(int exitCode)
throws InsufficientPermissionsException, CommandNotFoundException, ExecutionException {
if (exitCode != 0) {
throw new ExecutionException("exitcode != 0"); //$NON-NLS-1$
}
}
/**
* {@inheritDoc}
*/
@Override
public MountPoint getSrcWritableMountPoint() {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public MountPoint getDstWritableMountPoint() {
return MountPointHelper.getMountPointFromDirectory(this.mFileName);
}
}
| AnimeROM/android_package_AnimeManager | src/com/animerom/filemanager/commands/shell/ChangePermissionsCommand.java | Java | apache-2.0 | 2,364 |
<?php
/**
* This example updates a proposal's notes. To determine which proposals exist,
* run GetAllProposals.php.
*
* PHP version 5
*
* Copyright 2014, Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @package GoogleApiAdsDfp
* @subpackage v201702
* @category WebServices
* @copyright 2014, Google Inc. All Rights Reserved.
* @license http://www.apache.org/licenses/LICENSE-2.0 Apache License,
* Version 2.0
*/
error_reporting(E_STRICT | E_ALL);
// You can set the include path to src directory or reference
// DfpUser.php directly via require_once.
// $path = '/path/to/dfp_api_php_lib/src';
$path = dirname(__FILE__) . '/../../../../src';
set_include_path(get_include_path() . PATH_SEPARATOR . $path);
require_once 'Google/Api/Ads/Dfp/Lib/DfpUser.php';
require_once 'Google/Api/Ads/Dfp/Util/v201702/StatementBuilder.php';
require_once dirname(__FILE__) . '/../../../Common/ExampleUtils.php';
// Set the ID of the proposal to update.
$proposalId = 'INSERT_PROPOSAL_ID_HERE';
try {
// Get DfpUser from credentials in "../auth.ini"
// relative to the DfpUser.php file's directory.
$user = new DfpUser();
// Log SOAP XML request and response.
$user->LogDefaults();
// Get the ProposalService.
$proposalService = $user->GetService('ProposalService', 'v201702');
// Create a statement to select a single proposal by ID.
$statementBuilder = new StatementBuilder();
$statementBuilder->Where('id = :id')
->OrderBy('id ASC')
->Limit(1)
->WithBindVariableValue('id', $proposalId);
// Get the proposal.
$page = $proposalService->getProposalsByStatement(
$statementBuilder->ToStatement());
$proposal = $page->results[0];
// Update the proposal's notes.
$proposal->internalNotes = 'Proposal needs further review before approval.';
// Update the proposal on the server.
$proposals = $proposalService->updateProposals(array($proposal));
foreach ($proposals as $updatedProposal) {
printf("Proposal with ID %d and name '%s' was updated.\n",
$updatedProposal->id, $updatedProposal->name);
}
} catch (OAuth2Exception $e) {
ExampleUtils::CheckForOAuth2Errors($e);
} catch (ValidationException $e) {
ExampleUtils::CheckForOAuth2Errors($e);
} catch (Exception $e) {
printf("%s\n", $e->getMessage());
}
| Getsidecar/googleads-php-lib | examples/Dfp/v201702/ProposalService/UpdateProposals.php | PHP | apache-2.0 | 2,856 |
/*
* Copyright © 2014 - 2019 Leipzig University (Database Research Group)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradoop.flink.model.impl.operators.matching.single.cypher.common.functions;
import org.gradoop.common.model.impl.id.GradoopId;
import org.gradoop.flink.model.impl.operators.matching.single.cypher.functions.ReverseEdgeEmbedding;
import org.gradoop.flink.model.impl.operators.matching.single.cypher.pojos.Embedding;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class ReverseEdgeEmbeddingTest {
@Test
public void testReversingAnEdgeEmbedding() throws Exception {
GradoopId a = GradoopId.get();
GradoopId e = GradoopId.get();
GradoopId b = GradoopId.get();
Embedding edge = new Embedding();
edge.add(a);
edge.add(e);
edge.add(b);
ReverseEdgeEmbedding op = new ReverseEdgeEmbedding();
Embedding reversed = op.map(edge);
assertEquals(b, reversed.getId(0));
assertEquals(e, reversed.getId(1));
assertEquals(a, reversed.getId(2));
}
}
| rostam/gradoop | gradoop-flink/src/test/java/org/gradoop/flink/model/impl/operators/matching/single/cypher/common/functions/ReverseEdgeEmbeddingTest.java | Java | apache-2.0 | 1,568 |
/*
* Copyright 2013 Gunnar Kappei.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.opengis.gml;
/**
* An XML MultiSurfaceDomainType(@http://www.opengis.net/gml).
*
* This is a complex type.
*/
public interface MultiSurfaceDomainType extends net.opengis.gml.DomainSetType
{
public static final org.apache.xmlbeans.SchemaType type = (org.apache.xmlbeans.SchemaType)
org.apache.xmlbeans.XmlBeans.typeSystemForClassLoader(MultiSurfaceDomainType.class.getClassLoader(), "schemaorg_apache_xmlbeans.system.s6E28D279B6C224D74769DB8B98AF1665").resolveHandle("multisurfacedomaintype70a9type");
/**
* Gets the "MultiSurface" element
*/
net.opengis.gml.MultiSurfaceType getMultiSurface();
/**
* True if has "MultiSurface" element
*/
boolean isSetMultiSurface();
/**
* Sets the "MultiSurface" element
*/
void setMultiSurface(net.opengis.gml.MultiSurfaceType multiSurface);
/**
* Appends and returns a new empty "MultiSurface" element
*/
net.opengis.gml.MultiSurfaceType addNewMultiSurface();
/**
* Unsets the "MultiSurface" element
*/
void unsetMultiSurface();
/**
* A factory class with static methods for creating instances
* of this type.
*/
public static final class Factory
{
public static net.opengis.gml.MultiSurfaceDomainType newInstance() {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().newInstance( type, null ); }
public static net.opengis.gml.MultiSurfaceDomainType newInstance(org.apache.xmlbeans.XmlOptions options) {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().newInstance( type, options ); }
/** @param xmlAsString the string value to parse */
public static net.opengis.gml.MultiSurfaceDomainType parse(java.lang.String xmlAsString) throws org.apache.xmlbeans.XmlException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( xmlAsString, type, null ); }
public static net.opengis.gml.MultiSurfaceDomainType parse(java.lang.String xmlAsString, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( xmlAsString, type, options ); }
/** @param file the file from which to load an xml document */
public static net.opengis.gml.MultiSurfaceDomainType parse(java.io.File file) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( file, type, null ); }
public static net.opengis.gml.MultiSurfaceDomainType parse(java.io.File file, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( file, type, options ); }
public static net.opengis.gml.MultiSurfaceDomainType parse(java.net.URL u) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( u, type, null ); }
public static net.opengis.gml.MultiSurfaceDomainType parse(java.net.URL u, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( u, type, options ); }
public static net.opengis.gml.MultiSurfaceDomainType parse(java.io.InputStream is) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( is, type, null ); }
public static net.opengis.gml.MultiSurfaceDomainType parse(java.io.InputStream is, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( is, type, options ); }
public static net.opengis.gml.MultiSurfaceDomainType parse(java.io.Reader r) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( r, type, null ); }
public static net.opengis.gml.MultiSurfaceDomainType parse(java.io.Reader r, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException, java.io.IOException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( r, type, options ); }
public static net.opengis.gml.MultiSurfaceDomainType parse(javax.xml.stream.XMLStreamReader sr) throws org.apache.xmlbeans.XmlException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( sr, type, null ); }
public static net.opengis.gml.MultiSurfaceDomainType parse(javax.xml.stream.XMLStreamReader sr, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( sr, type, options ); }
public static net.opengis.gml.MultiSurfaceDomainType parse(org.w3c.dom.Node node) throws org.apache.xmlbeans.XmlException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( node, type, null ); }
public static net.opengis.gml.MultiSurfaceDomainType parse(org.w3c.dom.Node node, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( node, type, options ); }
/** @deprecated {@link org.apache.xmlbeans.xml.stream.XMLInputStream} */
@Deprecated
public static net.opengis.gml.MultiSurfaceDomainType parse(org.apache.xmlbeans.xml.stream.XMLInputStream xis) throws org.apache.xmlbeans.XmlException, org.apache.xmlbeans.xml.stream.XMLStreamException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( xis, type, null ); }
/** @deprecated {@link org.apache.xmlbeans.xml.stream.XMLInputStream} */
@Deprecated
public static net.opengis.gml.MultiSurfaceDomainType parse(org.apache.xmlbeans.xml.stream.XMLInputStream xis, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException, org.apache.xmlbeans.xml.stream.XMLStreamException {
return (net.opengis.gml.MultiSurfaceDomainType) org.apache.xmlbeans.XmlBeans.getContextTypeLoader().parse( xis, type, options ); }
/** @deprecated {@link org.apache.xmlbeans.xml.stream.XMLInputStream} */
@Deprecated
public static org.apache.xmlbeans.xml.stream.XMLInputStream newValidatingXMLInputStream(org.apache.xmlbeans.xml.stream.XMLInputStream xis) throws org.apache.xmlbeans.XmlException, org.apache.xmlbeans.xml.stream.XMLStreamException {
return org.apache.xmlbeans.XmlBeans.getContextTypeLoader().newValidatingXMLInputStream( xis, type, null ); }
/** @deprecated {@link org.apache.xmlbeans.xml.stream.XMLInputStream} */
@Deprecated
public static org.apache.xmlbeans.xml.stream.XMLInputStream newValidatingXMLInputStream(org.apache.xmlbeans.xml.stream.XMLInputStream xis, org.apache.xmlbeans.XmlOptions options) throws org.apache.xmlbeans.XmlException, org.apache.xmlbeans.xml.stream.XMLStreamException {
return org.apache.xmlbeans.XmlBeans.getContextTypeLoader().newValidatingXMLInputStream( xis, type, options ); }
private Factory() { } // No instance of this class allowed
}
}
| moosbusch/xbLIDO | src/net/opengis/gml/MultiSurfaceDomainType.java | Java | apache-2.0 | 8,916 |
package o;
import android.content.Context;
import android.content.res.Resources;
import android.graphics.Bitmap;
import android.graphics.drawable.Drawable;
import android.util.TypedValue;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AbsListView.LayoutParams;
import android.widget.BaseAdapter;
import android.widget.ImageView;
import com.quizup.core.QuizApplication;
import com.quizup.core.activities.WallpaperActivity;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
public final class ฯ extends BaseAdapter
{
public ArrayList<Υ> ˊ;
private WallpaperActivity ˋ;
private Bitmap ˎ;
private int ˏ;
public ฯ(WallpaperActivity paramWallpaperActivity)
{
this.ˋ = paramWallpaperActivity;
this.ˊ = ϟ.ˊ(QuizApplication.ᐝ());
this.ˏ = ((int)TypedValue.applyDimension(1, 120.0F, Resources.getSystem().getDisplayMetrics()));
Υ localΥ = new Υ(paramWallpaperActivity);
localΥ.ˊ = "file://wallpaper:pattern_1:purple";
this.ˎ = ϟ.ˊ(paramWallpaperActivity, localΥ, this.ˏ);
}
public final int getCount()
{
return this.ˊ.size();
}
public final long getItemId(int paramInt)
{
return 0L;
}
public final View getView(int paramInt, View paramView, ViewGroup paramViewGroup)
{
ImageView localImageView2;
if (paramView == null)
{
ImageView localImageView1 = new ImageView(this.ˋ);
localImageView2 = localImageView1;
localImageView1.setLayoutParams(new AbsListView.LayoutParams(this.ˏ, this.ˏ));
}
else
{
localImageView2 = (ImageView)paramView;
}
int i;
if (((Υ)this.ˊ.get(paramInt)).ˋ == null)
i = 1;
else
i = 0;
if (i != 0)
{
Υ localΥ = (Υ)this.ˊ.get(paramInt);
ImageView localImageView3 = localImageView2;
if (localImageView3 != null)
{
Drawable localDrawable = localImageView3.getDrawable();
if ((localDrawable instanceof ฯ.if))
{
localʇ1 = (ʇ)((ฯ.if)localDrawable).ˊ.get();
break label140;
}
}
ʇ localʇ1 = null;
label140: ʇ localʇ2 = localʇ1;
if (localʇ1 != null)
if (localʇ2.ˊ.equals(localΥ))
{
localʇ2.cancel(true);
}
else
{
j = 0;
break label181;
}
int j = 1;
label181: if (j != 0)
{
ʇ localʇ3 = new ʇ(this.ˋ, localImageView3, localΥ, this.ˏ);
localImageView3.setImageDrawable(new ฯ.if(this.ˋ.getResources(), this.ˎ, localʇ3));
localʇ3.execute(new Void[0]);
}
return localImageView2;
}
localImageView2.setImageBitmap(((Υ)this.ˊ.get(paramInt)).ˋ);
return localImageView2;
}
}
/* Location: /Users/vikas/Documents/Mhacks_Real_app/classes-dex2jar.jar
* Qualified Name: o.ฯ
* JD-Core Version: 0.6.2
*/ | mmmsplay10/QuizUpWinner | quizup/o/ฯ.java | Java | apache-2.0 | 2,925 |
<?php
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This layout outputs events in a JSON-encoded GELF format.
*
* This class was originally contributed by Dmitry Ulyanov.
*
* ## Configurable parameters: ##
*
* - **host** - Server on which logs are collected.
* - **shortMessageLength** - Maximum length of short message.
* - **locationInfo** - If set to true, adds the file name and line number at
* which the log statement originated. Slightly slower, defaults to false.
*
* @package log4php
* @subpackage layouts
* @since 2.4.0
* @license http://www.apache.org/licenses/LICENSE-2.0 Apache License, Version 2.0
* @link http://logging.apache.org/log4php/docs/layouts/html.html Layout documentation
* @link http://github.com/d-ulyanov/log4php-graylog2 Dmitry Ulyanov's original submission.
* @link http://graylog2.org/about/gelf GELF documentation.
*/
class LoggerLayoutGelf extends LoggerLayout {
/**
* GELF log levels according to syslog priority
*/
const LEVEL_EMERGENCY = 0;
const LEVEL_ALERT = 1;
const LEVEL_CRITICAL = 2;
const LEVEL_ERROR = 3;
const LEVEL_WARNING = 4;
const LEVEL_NOTICE = 5;
const LEVEL_INFO = 6;
const LEVEL_DEBUG = 7;
/**
* Version of Graylog2 GELF protocol (1.1 since 11/2013)
*/
const GELF_PROTOCOL_VERSION = '1.1';
/**
* Whether to log location information (file and line number).
* @var boolean
*/
protected $locationInfo = false;
/**
* Maximum length of short message
* @var int
*/
protected $shortMessageLength = 255;
/**
* Server on which logs are collected
* @var string
*/
protected $host;
/**
* Maps log4php levels to equivalent Gelf levels
* @var array
*/
protected $levelMap = array(
LoggerLevel::TRACE => self::LEVEL_DEBUG,
LoggerLevel::DEBUG => self::LEVEL_DEBUG,
LoggerLevel::INFO => self::LEVEL_INFO,
LoggerLevel::WARN => self::LEVEL_WARNING,
LoggerLevel::ERROR => self::LEVEL_ERROR,
LoggerLevel::FATAL => self::LEVEL_CRITICAL,
);
public function activateOptions() {
if (!$this->getHost()) {
$this->setHost(gethostname());
}
return parent::activateOptions();
}
/**
* @param LoggerLoggingEvent $event
* @return string
*/
public function format(LoggerLoggingEvent $event) {
$messageAsArray = array(
// Basic fields
'version' => self::GELF_PROTOCOL_VERSION,
'host' => $this->getHost(),
'short_message' => $this->getShortMessage($event),
'full_message' => $this->getFullMessage($event),
'timestamp' => $event->getTimeStamp(),
'level' => $this->getGelfLevel($event->getLevel()),
// Additional fields
'_facility' => $event->getLoggerName(),
'_thread' => $event->getThreadName(),
);
if ($this->getLocationInfo()) {
$messageAsArray += $this->getEventLocationFields($event);
}
$messageAsArray += $this->getEventMDCFields($event);
return json_encode($messageAsArray);
}
/**
* Returns event location information as array
* @param LoggerLoggingEvent $event
* @return array
*/
public function getEventLocationFields(LoggerLoggingEvent $event) {
$locInfo = $event->getLocationInformation();
return array(
'_file' => $locInfo->getFileName(),
'_line' => $locInfo->getLineNumber(),
'_class' => $locInfo->getClassName(),
'_method' => $locInfo->getMethodName()
);
}
/**
* Returns event MDC data as array
* @param LoggerLoggingEvent $event
* @return array
*/
public function getEventMDCFields(LoggerLoggingEvent $event) {
$fields = array();
foreach ($event->getMDCMap() as $key => $value) {
$fieldName = "_".$key;
if ($this->isAdditionalFieldNameValid($fieldName)) {
$fields[$fieldName] = $value;
}
}
return $fields;
}
/**
* Checks is field name valid according to Gelf specification
* @param string $fieldName
* @return bool
*/
public function isAdditionalFieldNameValid($fieldName) {
return (preg_match("@^_[\w\.\-]*$@", $fieldName) AND $fieldName != '_id');
}
/**
* Sets the 'locationInfo' parameter.
* @param boolean $locationInfo
*/
public function setLocationInfo($locationInfo) {
$this->setBoolean('locationInfo', $locationInfo);
}
/**
* Returns the value of the 'locationInfo' parameter.
* @return boolean
*/
public function getLocationInfo() {
return $this->locationInfo;
}
/**
* @param LoggerLoggingEvent $event
* @return string
*/
public function getShortMessage(LoggerLoggingEvent $event) {
$shortMessage = mb_substr($event->getRenderedMessage(), 0, $this->getShortMessageLength());
return $this->cleanNonUtfSymbols($shortMessage);
}
/**
* @param LoggerLoggingEvent $event
* @return string
*/
public function getFullMessage(LoggerLoggingEvent $event) {
return $this->cleanNonUtfSymbols(
$event->getRenderedMessage()
);
}
/**
* @param LoggerLevel $level
* @return int
*/
public function getGelfLevel(LoggerLevel $level) {
$int = $level->toInt();
if (isset($this->levelMap[$int])) {
return $this->levelMap[$int];
} else {
return self::LEVEL_ALERT;
}
}
/**
* @param int $shortMessageLength
*/
public function setShortMessageLength($shortMessageLength) {
$this->setPositiveInteger('shortMessageLength', $shortMessageLength);
}
/**
* @return int
*/
public function getShortMessageLength() {
return $this->shortMessageLength;
}
/**
* @param string $host
*/
public function setHost($host) {
$this->setString('host', $host);
}
/**
* @return string
*/
public function getHost() {
return $this->host;
}
/**
* @param string $message
* @return string
*/
protected function cleanNonUtfSymbols($message) {
/**
* Reject overly long 2 byte sequences, as well as characters
* above U+10000 and replace with ?
*/
$message = preg_replace(
'/[\x00-\x08\x10\x0B\x0C\x0E-\x19\x7F]'.
'|[\x00-\x7F][\x80-\xBF]+'.
'|([\xC0\xC1]|[\xF0-\xFF])[\x80-\xBF]*'.
'|[\xC2-\xDF]((?![\x80-\xBF])|[\x80-\xBF]{2,})'.
'|[\xE0-\xEF](([\x80-\xBF](?![\x80-\xBF]))|(?![\x80-\xBF]{2})|[\x80-\xBF]{3,})/S',
'?',
$message
);
/**
* Reject overly long 3 byte sequences and UTF-16 surrogates
* and replace with ?
*/
$message = preg_replace(
'/\xE0[\x80-\x9F][\x80-\xBF]'.
'|\xED[\xA0-\xBF][\x80-\xBF]/S',
'?',
$message
);
return $message;
}
}
| d-ulyanov/log4php-graylog2 | src/main/php/layouts/LoggerLayoutGelf.php | PHP | apache-2.0 | 8,053 |
package com.datagre.apps.omicron.core.dto;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.ToString;
/**
* Created by zengxiaobo on 2017/3/24.
*/
@Data
@NoArgsConstructor
@ToString
public class OmicronConfigNotification {
private String namespaceName;
private long notificationId;
public OmicronConfigNotification(String namespaceName, long notificationId) {
this.namespaceName = namespaceName;
this.notificationId = notificationId;
}
}
| ycaihua/omicron | omicron-core/src/main/java/com/datagre/apps/omicron/core/dto/OmicronConfigNotification.java | Java | apache-2.0 | 476 |
<?php
namespace MonologCreator\Processor;
/**
* Class ExtraFieldProcessor
*
* Allows adding additional high-level or special fields to the log output.
*
* @package MonologCreator\Processor
* @author Sebastian Götze <s.goetze@bigpoint.net>
*/
class ExtraFieldProcessor implements \Monolog\Processor\ProcessorInterface
{
/**
* Array to hold additional fields
*
* @var array
*/
private $extraFields = array();
public function __construct(array $extraFields = array())
{
$this->extraFields = $extraFields;
}
/**
* Invoke processor
*
* Adds fields to record before returning it.
*
* @param array $record
* @return array
*/
public function __invoke(array $record)
{
if (false === \is_array($record['extra'])) {
$record['extra'] = array();
}
// Add fields to record
$record['extra'] = \array_merge($record['extra'], $this->extraFields);
return $record;
}
}
| Bigpoint/monolog-creator | src/MonologCreator/Processor/ExtraFieldProcessor.php | PHP | apache-2.0 | 1,014 |
#include "common/router/upstream_request.h"
#include <chrono>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include "envoy/event/dispatcher.h"
#include "envoy/event/timer.h"
#include "envoy/grpc/status.h"
#include "envoy/http/conn_pool.h"
#include "envoy/runtime/runtime.h"
#include "envoy/upstream/cluster_manager.h"
#include "envoy/upstream/upstream.h"
#include "common/common/assert.h"
#include "common/common/empty_string.h"
#include "common/common/enum_to_int.h"
#include "common/common/scope_tracker.h"
#include "common/common/utility.h"
#include "common/grpc/common.h"
#include "common/http/codes.h"
#include "common/http/header_map_impl.h"
#include "common/http/headers.h"
#include "common/http/message_impl.h"
#include "common/http/utility.h"
#include "common/network/application_protocol.h"
#include "common/network/transport_socket_options_impl.h"
#include "common/network/upstream_server_name.h"
#include "common/network/upstream_subject_alt_names.h"
#include "common/router/config_impl.h"
#include "common/router/debug_config.h"
#include "common/router/router.h"
#include "common/stream_info/uint32_accessor_impl.h"
#include "common/tracing/http_tracer_impl.h"
#include "extensions/common/proxy_protocol/proxy_protocol_header.h"
#include "extensions/filters/http/well_known_names.h"
namespace Envoy {
namespace Router {
UpstreamRequest::UpstreamRequest(RouterFilterInterface& parent,
std::unique_ptr<GenericConnPool>&& conn_pool)
: parent_(parent), conn_pool_(std::move(conn_pool)), grpc_rq_success_deferred_(false),
stream_info_(parent_.callbacks()->dispatcher().timeSource()),
start_time_(parent_.callbacks()->dispatcher().timeSource().monotonicTime()),
calling_encode_headers_(false), upstream_canary_(false), decode_complete_(false),
encode_complete_(false), encode_trailers_(false), retried_(false), awaiting_headers_(true),
outlier_detection_timeout_recorded_(false),
create_per_try_timeout_on_request_complete_(false), paused_for_connect_(false),
record_timeout_budget_(parent_.cluster()->timeoutBudgetStats().has_value()) {
if (parent_.config().start_child_span_) {
span_ = parent_.callbacks()->activeSpan().spawnChild(
parent_.callbacks()->tracingConfig(), "router " + parent.cluster()->name() + " egress",
parent.timeSource().systemTime());
if (parent.attemptCount() != 1) {
// This is a retry request, add this metadata to span.
span_->setTag(Tracing::Tags::get().RetryCount, std::to_string(parent.attemptCount() - 1));
}
}
stream_info_.healthCheck(parent_.callbacks()->streamInfo().healthCheck());
if (conn_pool_->protocol().has_value()) {
stream_info_.protocol(conn_pool_->protocol().value());
}
}
UpstreamRequest::~UpstreamRequest() {
if (span_ != nullptr) {
Tracing::HttpTracerUtility::finalizeUpstreamSpan(*span_, upstream_headers_.get(),
upstream_trailers_.get(), stream_info_,
Tracing::EgressConfig::get());
}
if (per_try_timeout_ != nullptr) {
// Allows for testing.
per_try_timeout_->disableTimer();
}
if (max_stream_duration_timer_ != nullptr) {
max_stream_duration_timer_->disableTimer();
}
clearRequestEncoder();
// If desired, fire the per-try histogram when the UpstreamRequest
// completes.
if (record_timeout_budget_) {
Event::Dispatcher& dispatcher = parent_.callbacks()->dispatcher();
const MonotonicTime end_time = dispatcher.timeSource().monotonicTime();
const std::chrono::milliseconds response_time =
std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time_);
Upstream::ClusterTimeoutBudgetStatsOptRef tb_stats = parent_.cluster()->timeoutBudgetStats();
tb_stats->get().upstream_rq_timeout_budget_per_try_percent_used_.recordValue(
FilterUtility::percentageOfTimeout(response_time, parent_.timeout().per_try_timeout_));
}
stream_info_.setUpstreamTiming(upstream_timing_);
stream_info_.onRequestComplete();
for (const auto& upstream_log : parent_.config().upstream_logs_) {
upstream_log->log(parent_.downstreamHeaders(), upstream_headers_.get(),
upstream_trailers_.get(), stream_info_);
}
while (downstream_data_disabled_ != 0) {
parent_.callbacks()->onDecoderFilterBelowWriteBufferLowWatermark();
parent_.cluster()->stats().upstream_flow_control_drained_total_.inc();
--downstream_data_disabled_;
}
}
void UpstreamRequest::decode100ContinueHeaders(Http::ResponseHeaderMapPtr&& headers) {
ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher());
ASSERT(100 == Http::Utility::getResponseStatus(*headers));
parent_.onUpstream100ContinueHeaders(std::move(headers), *this);
}
void UpstreamRequest::decodeHeaders(Http::ResponseHeaderMapPtr&& headers, bool end_stream) {
ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher());
// We drop 1xx other than 101 on the floor; 101 upgrade headers need to be passed to the client as
// part of the final response. 100-continue headers are handled in onUpstream100ContinueHeaders.
//
// We could in principle handle other headers here, but this might result in the double invocation
// of decodeHeaders() (once for informational, again for non-informational), which is likely an
// easy to miss corner case in the filter and HCM contract.
//
// This filtering is done early in upstream request, unlike 100 coalescing which is performed in
// the router filter, since the filtering only depends on the state of a single upstream, and we
// don't want to confuse accounting such as onFirstUpstreamRxByteReceived() with informational
// headers.
const uint64_t response_code = Http::Utility::getResponseStatus(*headers);
if (Http::CodeUtility::is1xx(response_code) &&
response_code != enumToInt(Http::Code::SwitchingProtocols)) {
return;
}
// TODO(rodaine): This is actually measuring after the headers are parsed and not the first
// byte.
upstream_timing_.onFirstUpstreamRxByteReceived(parent_.callbacks()->dispatcher().timeSource());
maybeEndDecode(end_stream);
awaiting_headers_ = false;
if (!parent_.config().upstream_logs_.empty()) {
upstream_headers_ = Http::createHeaderMap<Http::ResponseHeaderMapImpl>(*headers);
}
stream_info_.response_code_ = static_cast<uint32_t>(response_code);
if (paused_for_connect_ && response_code == 200) {
encodeBodyAndTrailers();
paused_for_connect_ = false;
}
parent_.onUpstreamHeaders(response_code, std::move(headers), *this, end_stream);
}
void UpstreamRequest::decodeData(Buffer::Instance& data, bool end_stream) {
ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher());
maybeEndDecode(end_stream);
stream_info_.addBytesReceived(data.length());
parent_.onUpstreamData(data, *this, end_stream);
}
void UpstreamRequest::decodeTrailers(Http::ResponseTrailerMapPtr&& trailers) {
ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher());
maybeEndDecode(true);
if (!parent_.config().upstream_logs_.empty()) {
upstream_trailers_ = Http::createHeaderMap<Http::ResponseTrailerMapImpl>(*trailers);
}
parent_.onUpstreamTrailers(std::move(trailers), *this);
}
const RouteEntry& UpstreamRequest::routeEntry() const { return *parent_.routeEntry(); }
const Network::Connection& UpstreamRequest::connection() const {
return *parent_.callbacks()->connection();
}
void UpstreamRequest::decodeMetadata(Http::MetadataMapPtr&& metadata_map) {
parent_.onUpstreamMetadata(std::move(metadata_map));
}
void UpstreamRequest::maybeEndDecode(bool end_stream) {
if (end_stream) {
upstream_timing_.onLastUpstreamRxByteReceived(parent_.callbacks()->dispatcher().timeSource());
decode_complete_ = true;
}
}
void UpstreamRequest::onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) {
stream_info_.onUpstreamHostSelected(host);
upstream_host_ = host;
parent_.callbacks()->streamInfo().onUpstreamHostSelected(host);
parent_.onUpstreamHostSelected(host);
}
void UpstreamRequest::encodeHeaders(bool end_stream) {
ASSERT(!encode_complete_);
encode_complete_ = end_stream;
conn_pool_->newStream(this);
}
void UpstreamRequest::encodeData(Buffer::Instance& data, bool end_stream) {
ASSERT(!encode_complete_);
encode_complete_ = end_stream;
if (!upstream_ || paused_for_connect_) {
ENVOY_STREAM_LOG(trace, "buffering {} bytes", *parent_.callbacks(), data.length());
if (!buffered_request_body_) {
buffered_request_body_ = std::make_unique<Buffer::WatermarkBuffer>(
[this]() -> void { this->enableDataFromDownstreamForFlowControl(); },
[this]() -> void { this->disableDataFromDownstreamForFlowControl(); },
[]() -> void { /* TODO(adisuissa): Handle overflow watermark */ });
buffered_request_body_->setWatermarks(parent_.callbacks()->decoderBufferLimit());
}
buffered_request_body_->move(data);
} else {
ASSERT(downstream_metadata_map_vector_.empty());
ENVOY_STREAM_LOG(trace, "proxying {} bytes", *parent_.callbacks(), data.length());
stream_info_.addBytesSent(data.length());
upstream_->encodeData(data, end_stream);
if (end_stream) {
upstream_timing_.onLastUpstreamTxByteSent(parent_.callbacks()->dispatcher().timeSource());
}
}
}
void UpstreamRequest::encodeTrailers(const Http::RequestTrailerMap& trailers) {
ASSERT(!encode_complete_);
encode_complete_ = true;
encode_trailers_ = true;
if (!upstream_) {
ENVOY_STREAM_LOG(trace, "buffering trailers", *parent_.callbacks());
} else {
ASSERT(downstream_metadata_map_vector_.empty());
ENVOY_STREAM_LOG(trace, "proxying trailers", *parent_.callbacks());
upstream_->encodeTrailers(trailers);
upstream_timing_.onLastUpstreamTxByteSent(parent_.callbacks()->dispatcher().timeSource());
}
}
void UpstreamRequest::encodeMetadata(Http::MetadataMapPtr&& metadata_map_ptr) {
if (!upstream_) {
ENVOY_STREAM_LOG(trace, "upstream_ not ready. Store metadata_map to encode later: {}",
*parent_.callbacks(), *metadata_map_ptr);
downstream_metadata_map_vector_.emplace_back(std::move(metadata_map_ptr));
} else {
ENVOY_STREAM_LOG(trace, "Encode metadata: {}", *parent_.callbacks(), *metadata_map_ptr);
Http::MetadataMapVector metadata_map_vector;
metadata_map_vector.emplace_back(std::move(metadata_map_ptr));
upstream_->encodeMetadata(metadata_map_vector);
}
}
void UpstreamRequest::onResetStream(Http::StreamResetReason reason,
absl::string_view transport_failure_reason) {
ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher());
if (span_ != nullptr) {
// Add tags about reset.
span_->setTag(Tracing::Tags::get().Error, Tracing::Tags::get().True);
span_->setTag(Tracing::Tags::get().ErrorReason, Http::Utility::resetReasonToString(reason));
}
clearRequestEncoder();
awaiting_headers_ = false;
if (!calling_encode_headers_) {
stream_info_.setResponseFlag(Filter::streamResetReasonToResponseFlag(reason));
parent_.onUpstreamReset(reason, transport_failure_reason, *this);
} else {
deferred_reset_reason_ = reason;
}
}
void UpstreamRequest::resetStream() {
// Don't reset the stream if we're already done with it.
if (encode_complete_ && decode_complete_) {
return;
}
if (span_ != nullptr) {
// Add tags about the cancellation.
span_->setTag(Tracing::Tags::get().Canceled, Tracing::Tags::get().True);
}
if (conn_pool_->cancelAnyPendingStream()) {
ENVOY_STREAM_LOG(debug, "canceled pool request", *parent_.callbacks());
ASSERT(!upstream_);
}
if (upstream_) {
ENVOY_STREAM_LOG(debug, "resetting pool request", *parent_.callbacks());
upstream_->resetStream();
clearRequestEncoder();
}
}
void UpstreamRequest::setupPerTryTimeout() {
ASSERT(!per_try_timeout_);
if (parent_.timeout().per_try_timeout_.count() > 0) {
per_try_timeout_ =
parent_.callbacks()->dispatcher().createTimer([this]() -> void { onPerTryTimeout(); });
per_try_timeout_->enableTimer(parent_.timeout().per_try_timeout_);
}
}
void UpstreamRequest::onPerTryTimeout() {
// If we've sent anything downstream, ignore the per try timeout and let the response continue
// up to the global timeout
if (!parent_.downstreamResponseStarted()) {
ENVOY_STREAM_LOG(debug, "upstream per try timeout", *parent_.callbacks());
stream_info_.setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout);
parent_.onPerTryTimeout(*this);
} else {
ENVOY_STREAM_LOG(debug,
"ignored upstream per try timeout due to already started downstream response",
*parent_.callbacks());
}
}
void UpstreamRequest::onPoolFailure(ConnectionPool::PoolFailureReason reason,
absl::string_view transport_failure_reason,
Upstream::HostDescriptionConstSharedPtr host) {
Http::StreamResetReason reset_reason = Http::StreamResetReason::ConnectionFailure;
switch (reason) {
case ConnectionPool::PoolFailureReason::Overflow:
reset_reason = Http::StreamResetReason::Overflow;
break;
case ConnectionPool::PoolFailureReason::RemoteConnectionFailure:
FALLTHRU;
case ConnectionPool::PoolFailureReason::LocalConnectionFailure:
reset_reason = Http::StreamResetReason::ConnectionFailure;
break;
case ConnectionPool::PoolFailureReason::Timeout:
reset_reason = Http::StreamResetReason::LocalReset;
}
// Mimic an upstream reset.
onUpstreamHostSelected(host);
onResetStream(reset_reason, transport_failure_reason);
}
void UpstreamRequest::onPoolReady(
std::unique_ptr<GenericUpstream>&& upstream, Upstream::HostDescriptionConstSharedPtr host,
const Network::Address::InstanceConstSharedPtr& upstream_local_address,
const StreamInfo::StreamInfo& info) {
// This may be called under an existing ScopeTrackerScopeState but it will unwind correctly.
ScopeTrackerScopeState scope(&parent_.callbacks()->scope(), parent_.callbacks()->dispatcher());
ENVOY_STREAM_LOG(debug, "pool ready", *parent_.callbacks());
upstream_ = std::move(upstream);
if (parent_.requestVcluster()) {
// The cluster increases its upstream_rq_total_ counter right before firing this onPoolReady
// callback. Hence, the upstream request increases the virtual cluster's upstream_rq_total_ stat
// here.
parent_.requestVcluster()->stats().upstream_rq_total_.inc();
}
host->outlierDetector().putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess);
onUpstreamHostSelected(host);
stream_info_.setUpstreamFilterState(std::make_shared<StreamInfo::FilterStateImpl>(
info.filterState().parent()->parent(), StreamInfo::FilterState::LifeSpan::Request));
stream_info_.setUpstreamLocalAddress(upstream_local_address);
parent_.callbacks()->streamInfo().setUpstreamLocalAddress(upstream_local_address);
stream_info_.setUpstreamSslConnection(info.downstreamSslConnection());
parent_.callbacks()->streamInfo().setUpstreamSslConnection(info.downstreamSslConnection());
if (parent_.downstreamEndStream()) {
setupPerTryTimeout();
} else {
create_per_try_timeout_on_request_complete_ = true;
}
// Make sure the connection manager will inform the downstream watermark manager when the
// downstream buffers are overrun. This may result in immediate watermark callbacks referencing
// the encoder.
parent_.callbacks()->addDownstreamWatermarkCallbacks(downstream_watermark_manager_);
calling_encode_headers_ = true;
auto* headers = parent_.downstreamHeaders();
if (parent_.routeEntry()->autoHostRewrite() && !host->hostname().empty()) {
parent_.downstreamHeaders()->setHost(host->hostname());
}
if (span_ != nullptr) {
span_->injectContext(*parent_.downstreamHeaders());
}
upstream_timing_.onFirstUpstreamTxByteSent(parent_.callbacks()->dispatcher().timeSource());
// Make sure that when we are forwarding CONNECT payload we do not do so until
// the upstream has accepted the CONNECT request.
if (conn_pool_->protocol().has_value() &&
headers->getMethodValue() == Http::Headers::get().MethodValues.Connect) {
paused_for_connect_ = true;
}
if (upstream_host_->cluster().commonHttpProtocolOptions().has_max_stream_duration()) {
const auto max_stream_duration = std::chrono::milliseconds(DurationUtil::durationToMilliseconds(
upstream_host_->cluster().commonHttpProtocolOptions().max_stream_duration()));
if (max_stream_duration.count()) {
max_stream_duration_timer_ = parent_.callbacks()->dispatcher().createTimer(
[this]() -> void { onStreamMaxDurationReached(); });
max_stream_duration_timer_->enableTimer(max_stream_duration);
}
}
upstream_->encodeHeaders(*parent_.downstreamHeaders(), shouldSendEndStream());
calling_encode_headers_ = false;
if (!paused_for_connect_) {
encodeBodyAndTrailers();
}
}
void UpstreamRequest::encodeBodyAndTrailers() {
// It is possible to get reset in the middle of an encodeHeaders() call. This happens for
// example in the HTTP/2 codec if the frame cannot be encoded for some reason. This should never
// happen but it's unclear if we have covered all cases so protect against it and test for it.
// One specific example of a case where this happens is if we try to encode a total header size
// that is too big in HTTP/2 (64K currently).
if (deferred_reset_reason_) {
onResetStream(deferred_reset_reason_.value(), absl::string_view());
} else {
// Encode metadata after headers and before any other frame type.
if (!downstream_metadata_map_vector_.empty()) {
ENVOY_STREAM_LOG(debug, "Send metadata onPoolReady. {}", *parent_.callbacks(),
downstream_metadata_map_vector_);
upstream_->encodeMetadata(downstream_metadata_map_vector_);
downstream_metadata_map_vector_.clear();
if (shouldSendEndStream()) {
Buffer::OwnedImpl empty_data("");
upstream_->encodeData(empty_data, true);
}
}
if (buffered_request_body_) {
stream_info_.addBytesSent(buffered_request_body_->length());
upstream_->encodeData(*buffered_request_body_, encode_complete_ && !encode_trailers_);
}
if (encode_trailers_) {
upstream_->encodeTrailers(*parent_.downstreamTrailers());
}
if (encode_complete_) {
upstream_timing_.onLastUpstreamTxByteSent(parent_.callbacks()->dispatcher().timeSource());
}
}
}
void UpstreamRequest::onStreamMaxDurationReached() {
upstream_host_->cluster().stats().upstream_rq_max_duration_reached_.inc();
// The upstream had closed then try to retry along with retry policy.
parent_.onStreamMaxDurationReached(*this);
}
void UpstreamRequest::clearRequestEncoder() {
// Before clearing the encoder, unsubscribe from callbacks.
if (upstream_) {
parent_.callbacks()->removeDownstreamWatermarkCallbacks(downstream_watermark_manager_);
}
upstream_.reset();
}
void UpstreamRequest::DownstreamWatermarkManager::onAboveWriteBufferHighWatermark() {
ASSERT(parent_.upstream_);
// There are two states we should get this callback in: 1) the watermark was
// hit due to writes from a different filter instance over a shared
// downstream connection, or 2) the watermark was hit due to THIS filter
// instance writing back the "winning" upstream request. In either case we
// can disable reads from upstream.
ASSERT(!parent_.parent_.finalUpstreamRequest() ||
&parent_ == parent_.parent_.finalUpstreamRequest());
// The downstream connection is overrun. Pause reads from upstream.
// If there are multiple calls to readDisable either the codec (H2) or the underlying
// Network::Connection (H1) will handle reference counting.
parent_.parent_.cluster()->stats().upstream_flow_control_paused_reading_total_.inc();
parent_.upstream_->readDisable(true);
}
void UpstreamRequest::DownstreamWatermarkManager::onBelowWriteBufferLowWatermark() {
ASSERT(parent_.upstream_);
// One source of connection blockage has buffer available. Pass this on to the stream, which
// will resume reads if this was the last remaining high watermark.
parent_.parent_.cluster()->stats().upstream_flow_control_resumed_reading_total_.inc();
parent_.upstream_->readDisable(false);
}
void UpstreamRequest::disableDataFromDownstreamForFlowControl() {
// If there is only one upstream request, we can be assured that
// disabling reads will not slow down other upstream requests. If we've
// already seen the full downstream request (downstream_end_stream_) then
// disabling reads is a noop.
// This assert condition must be true because
// parent_.upstreamRequests().size() can only be greater than 1 in the
// case of a per-try-timeout with hedge_on_per_try_timeout enabled, and
// the per try timeout timer is started only after downstream_end_stream_
// is true.
ASSERT(parent_.upstreamRequests().size() == 1 || parent_.downstreamEndStream());
parent_.cluster()->stats().upstream_flow_control_backed_up_total_.inc();
parent_.callbacks()->onDecoderFilterAboveWriteBufferHighWatermark();
++downstream_data_disabled_;
}
void UpstreamRequest::enableDataFromDownstreamForFlowControl() {
// If there is only one upstream request, we can be assured that
// disabling reads will not overflow any write buffers in other upstream
// requests. If we've already seen the full downstream request
// (downstream_end_stream_) then enabling reads is a noop.
// This assert condition must be true because
// parent_.upstreamRequests().size() can only be greater than 1 in the
// case of a per-try-timeout with hedge_on_per_try_timeout enabled, and
// the per try timeout timer is started only after downstream_end_stream_
// is true.
ASSERT(parent_.upstreamRequests().size() == 1 || parent_.downstreamEndStream());
parent_.cluster()->stats().upstream_flow_control_drained_total_.inc();
parent_.callbacks()->onDecoderFilterBelowWriteBufferLowWatermark();
ASSERT(downstream_data_disabled_ != 0);
if (downstream_data_disabled_ > 0) {
--downstream_data_disabled_;
}
}
} // namespace Router
} // namespace Envoy
| envoyproxy/envoy-wasm | source/common/router/upstream_request.cc | C++ | apache-2.0 | 22,548 |
package com.planet_ink.coffee_mud.Abilities.Spells;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.core.collections.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Commands.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
/*
Copyright 2001-2022 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
public class Spell_Erase extends Spell
{
@Override
public String ID()
{
return "Spell_Erase";
}
private final static String localizedName = CMLib.lang().L("Erase Scroll");
@Override
public String name()
{
return localizedName;
}
@Override
protected int canTargetCode()
{
return CAN_ITEMS;
}
@Override
public int classificationCode()
{
return Ability.ACODE_SPELL|Ability.DOMAIN_ALTERATION;
}
@Override
public int abstractQuality()
{
return Ability.QUALITY_INDIFFERENT;
}
@Override
public boolean invoke(final MOB mob, final List<String> commands, final Physical givenTarget, final boolean auto, final int asLevel)
{
if((commands.size()<1)&&(givenTarget==null))
{
mob.tell(L("Erase what?."));
return false;
}
final Item target=getTarget(mob,mob.location(),givenTarget,commands,Wearable.FILTER_ANY);
if(target==null)
return false;
if(!(target instanceof Scroll)&&(!target.isReadable()))
{
mob.tell(L("You can't erase that."));
return false;
}
if(!super.invoke(mob,commands,givenTarget,auto,asLevel))
return false;
final boolean success=proficiencyCheck(mob,0,auto);
if(success)
{
final CMMsg msg=CMClass.getMsg(mob,target,this,verbalCastCode(mob,target,auto),auto?L("The words on <T-NAME> fade."):L("^S<S-NAME> whisper(s), and then rub(s) on <T-NAMESELF>, making the words fade.^?"));
if(mob.location().okMessage(mob,msg))
{
mob.location().send(mob,msg);
if(target instanceof Scroll)
((Scroll)target).setSpellList("");
else
target.setReadableText("");
}
}
else
beneficialWordsFizzle(mob,target,L("<S-NAME> whisper(s), and then rub(s) on <T-NAMESELF>, but nothing happens."));
// return whether it worked
return success;
}
}
| bozimmerman/CoffeeMud | com/planet_ink/coffee_mud/Abilities/Spells/Spell_Erase.java | Java | apache-2.0 | 3,338 |
/*******************************************************************************
* Copyright 2020 Tremolo Security, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package com.tremolosecurity.unison.gitlab.provisioning.targets;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.beanutils.BeanUtils;
import org.apache.http.Header;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.config.CookieSpecs;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.BasicHttpClientConnectionManager;
import org.apache.http.message.BasicHeader;
import org.apache.http.util.EntityUtils;
import org.apache.logging.log4j.Logger;
import org.gitlab4j.api.GitLabApi;
import org.gitlab4j.api.GitLabApiException;
import org.gitlab4j.api.GroupApi;
import org.gitlab4j.api.UserApi;
import org.gitlab4j.api.models.AccessLevel;
import org.gitlab4j.api.models.Group;
import org.gitlab4j.api.models.Identity;
import org.json.simple.JSONArray;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException;
import com.tremolosecurity.config.util.ConfigManager;
import com.tremolosecurity.provisioning.core.ProvisioningException;
import com.tremolosecurity.provisioning.core.User;
import com.tremolosecurity.provisioning.core.UserStoreProviderWithAddGroup;
import com.tremolosecurity.provisioning.core.Workflow;
import com.tremolosecurity.provisioning.util.GenPasswd;
import com.tremolosecurity.provisioning.core.ProvisioningUtil.ActionType;
import com.tremolosecurity.saml.Attribute;
public class GitlabUserProvider implements UserStoreProviderWithAddGroup {
static Logger logger = org.apache.logging.log4j.LogManager.getLogger(GitlabUserProvider.class.getName());
ConfigManager cfgMgr;
String name;
String token;
String url;
GitLabApi gitLabApi;
UserApi userApi;
GroupApi groupApi;
BeanUtils beanUtils = new BeanUtils();
public static final String GITLAB_IDENTITIES = "com.tremolosecurity.unison.gitlab.itentities";
public static final String GITLAB_GROUP_ENTITLEMENTS = "com.tremolosecurity.unison.gitlab.group-entitlements";
@Override
public void createUser(User user, Set<String> attributes, Map<String, Object> request)
throws ProvisioningException {
int approvalID = 0;
if (request.containsKey("APPROVAL_ID")) {
approvalID = (Integer) request.get("APPROVAL_ID");
}
Workflow workflow = (Workflow) request.get("WORKFLOW");
org.gitlab4j.api.models.User newUser = new org.gitlab4j.api.models.User();
newUser.setUsername(user.getUserID());
for (String attrName : attributes) {
Attribute attr = user.getAttribs().get(attrName);
if (attr != null) {
try {
this.beanUtils.setProperty(newUser, attrName, attr.getValues().get(0));
} catch (IllegalAccessException | InvocationTargetException e) {
throw new ProvisioningException("Could not set " + attrName + " for " + user.getUserID(),e);
}
}
}
try {
this.userApi.createUser(newUser, new GenPasswd(50).getPassword(), false);
} catch (GitLabApiException e) {
throw new ProvisioningException("Could not create user",e);
}
newUser = this.findUserByName(user.getUserID());
int numTries = 0;
while (newUser == null) {
if (numTries > 10) {
throw new ProvisioningException("User " + user.getUserID() + " never created");
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
newUser = this.findUserByName(user.getUserID());
numTries++;
}
this.cfgMgr.getProvisioningEngine().logAction(this.name,true, ActionType.Add, approvalID, workflow, "id", newUser.getId().toString());
for (String attrName : attributes) {
Attribute attr = user.getAttribs().get(attrName);
if (attr != null) {
this.cfgMgr.getProvisioningEngine().logAction(this.name,false, ActionType.Add, approvalID, workflow, attrName, attr.getValues().get(0));
}
}
List<GitlabFedIdentity> ids = (List<GitlabFedIdentity>) request.get(GitlabUserProvider.GITLAB_IDENTITIES);
if (ids != null) {
ArrayList<Header> defheaders = new ArrayList<Header>();
defheaders.add(new BasicHeader("Private-Token", this.token));
BasicHttpClientConnectionManager bhcm = new BasicHttpClientConnectionManager(
cfgMgr.getHttpClientSocketRegistry());
RequestConfig rc = RequestConfig.custom().setCookieSpec(CookieSpecs.STANDARD).setRedirectsEnabled(false)
.build();
CloseableHttpClient http = HttpClients.custom()
.setConnectionManager(bhcm)
.setDefaultHeaders(defheaders)
.setDefaultRequestConfig(rc)
.build();
try {
for (GitlabFedIdentity id : ids) {
HttpPut getmembers = new HttpPut(new StringBuilder().append(this.url).append("/api/v4/users/").append(newUser.getId()).append("?provider=").append(id.getProvider()).append("&extern_uid=").append(URLEncoder.encode(user.getUserID(), "UTF-8")).toString());
CloseableHttpResponse resp = http.execute(getmembers);
if (resp.getStatusLine().getStatusCode() != 200) {
throw new IOException("Invalid response " + resp.getStatusLine().getStatusCode());
}
this.cfgMgr.getProvisioningEngine().logAction(this.name,false, ActionType.Add, approvalID, workflow, "identity-provider", id.getProvider());
this.cfgMgr.getProvisioningEngine().logAction(this.name,false, ActionType.Add, approvalID, workflow, "identity-externid", id.getExternalUid());
}
} catch (IOException e) {
throw new ProvisioningException("Could not set identity",e);
} finally {
try {
http.close();
} catch (IOException e) {
}
bhcm.close();
}
}
HashMap<String,Integer> groupmap = (HashMap<String, Integer>) request.get(GitlabUserProvider.GITLAB_GROUP_ENTITLEMENTS);
if (groupmap == null) {
groupmap = new HashMap<String, Integer>();
}
for (String group : user.getGroups()) {
try {
Group groupObj = this.findGroupByName(group);
if (groupObj == null) {
logger.warn("Group " + group + " does not exist");
} else {
int accessLevel = AccessLevel.DEVELOPER.ordinal();
if (groupmap.containsKey(group)) {
accessLevel = groupmap.get(group);
}
this.groupApi.addMember(groupObj.getId(), newUser.getId(), accessLevel);
this.cfgMgr.getProvisioningEngine().logAction(this.name,false, ActionType.Add, approvalID, workflow, "group", group);
}
} catch (GitLabApiException e) {
throw new ProvisioningException("Could not find group " + group,e);
}
}
}
@Override
public void setUserPassword(User user, Map<String, Object> request) throws ProvisioningException {
// TODO Auto-generated method stub
}
@Override
public void syncUser(User user, boolean addOnly, Set<String> attributes, Map<String, Object> request)
throws ProvisioningException {
List<GitlabFedIdentity> ids = (List<GitlabFedIdentity>) request.get(GitlabUserProvider.GITLAB_IDENTITIES);
int approvalID = 0;
if (request.containsKey("APPROVAL_ID")) {
approvalID = (Integer) request.get("APPROVAL_ID");
}
Workflow workflow = (Workflow) request.get("WORKFLOW");
User fromGitlab = this.findUser(user.getUserID(), attributes, request);
if (fromGitlab == null) {
this.createUser(user, attributes, request);
return;
}
List<GitlabFedIdentity> idsFromGitlab = (List<GitlabFedIdentity>) request.get(GitlabUserProvider.GITLAB_IDENTITIES);
HashMap<String,String> toSet = new HashMap<String,String>();
HashSet<String> toDelete = new HashSet<String>();
for (String attrName : attributes) {
Attribute attrFromGitlab = fromGitlab.getAttribs().get(attrName);
Attribute attrIn = user.getAttribs().get(attrName);
if ((attrIn != null && attrFromGitlab == null) || (attrIn != null && attrFromGitlab != null && ! attrIn.getValues().get(0).equals(attrFromGitlab.getValues().get(0)))) {
toSet.put(attrName,attrIn.getValues().get(0));
} else if (! addOnly) {
if (attrIn == null && attrFromGitlab != null) {
toDelete.add(attrName);
}
}
}
org.gitlab4j.api.models.User toSave = this.findUserByName(user.getUserID());
for (String attrName : toSet.keySet()) {
try {
this.beanUtils.setProperty(toSave, attrName, toSet.get(attrName));
} catch (IllegalAccessException | InvocationTargetException e) {
throw new ProvisioningException("Could not update user " + user.getUserID(),e);
}
}
for (String attrName : toDelete) {
try {
this.beanUtils.setProperty(toSave, attrName, "");
} catch (IllegalAccessException | InvocationTargetException e) {
throw new ProvisioningException("Could not update user " + user.getUserID(),e);
}
}
if (ids != null) {
ArrayList<Header> defheaders = new ArrayList<Header>();
defheaders.add(new BasicHeader("Private-Token", this.token));
BasicHttpClientConnectionManager bhcm = new BasicHttpClientConnectionManager(
cfgMgr.getHttpClientSocketRegistry());
RequestConfig rc = RequestConfig.custom().setCookieSpec(CookieSpecs.STANDARD).setRedirectsEnabled(false)
.build();
CloseableHttpClient http = HttpClients.custom()
.setConnectionManager(bhcm)
.setDefaultHeaders(defheaders)
.setDefaultRequestConfig(rc)
.build();
try {
for (GitlabFedIdentity id : ids) {
boolean found = false;
for (GitlabFedIdentity idfromgl : idsFromGitlab) {
if (id.getExternalUid().equals(idfromgl.getExternalUid()) && id.getProvider().equals(idfromgl.getProvider()) ) {
found = true;
break;
}
}
if (! found) {
HttpPut getmembers = new HttpPut(new StringBuilder().append(this.url).append("/api/v4/users/").append(toSave.getId()).append("?provider=").append(id.getProvider()).append("&extern_uid=").append(URLEncoder.encode(user.getUserID(), "UTF-8")).toString());
CloseableHttpResponse resp = http.execute(getmembers);
if (resp.getStatusLine().getStatusCode() != 200) {
throw new IOException("Invalid response " + resp.getStatusLine().getStatusCode());
}
this.cfgMgr.getProvisioningEngine().logAction(this.name,false, ActionType.Add, approvalID, workflow, "identity-provider", id.getProvider());
this.cfgMgr.getProvisioningEngine().logAction(this.name,false, ActionType.Add, approvalID, workflow, "identity-externid", id.getExternalUid());
}
}
} catch (IOException e) {
throw new ProvisioningException("Could not set identity",e);
} finally {
try {
http.close();
} catch (IOException e) {
}
bhcm.close();
}
}
try {
this.userApi.updateUser(toSave, null);
} catch (GitLabApiException e) {
throw new ProvisioningException("Could not save user " + user.getUserID(),e);
}
for (String attrName : toSet.keySet()) {
this.cfgMgr.getProvisioningEngine().logAction(this.name,false, ActionType.Replace, approvalID, workflow, attrName, toSet.get(attrName));
}
for (String attrName : toDelete) {
this.cfgMgr.getProvisioningEngine().logAction(this.name,false, ActionType.Replace, approvalID, workflow, attrName, "");
}
HashMap<String,Integer> groupmap = (HashMap<String, Integer>) request.get(GitlabUserProvider.GITLAB_GROUP_ENTITLEMENTS);
if (groupmap == null) {
groupmap = new HashMap<String, Integer>();
}
for (String inGroup : user.getGroups()) {
if (! fromGitlab.getGroups().contains(inGroup)) {
try {
Group groupObj = this.findGroupByName(inGroup);
if (groupObj == null) {
logger.warn("Group " + inGroup + " does not exist");
} else {
int accessLevel = AccessLevel.DEVELOPER.ordinal();
if (groupmap.containsKey(inGroup)) {
accessLevel = groupmap.get(inGroup);
}
this.groupApi.addMember(groupObj.getId(), toSave.getId(), accessLevel);
this.cfgMgr.getProvisioningEngine().logAction(this.name,false, ActionType.Add, approvalID, workflow, "group", inGroup);
}
} catch (GitLabApiException e) {
if (e.getMessage().equalsIgnoreCase("Member already exists")) {
continue;
} else {
throw new ProvisioningException("Could not find group " + inGroup,e);
}
}
}
}
if (! addOnly) {
for (String groupFromGitlab : fromGitlab.getGroups()) {
if (! user.getGroups().contains(groupFromGitlab)) {
try {
Group groupObj = this.findGroupByName(groupFromGitlab);
if (groupObj == null) {
logger.warn("Group " + groupFromGitlab + " does not exist");
} else {
this.groupApi.removeMember(groupObj.getId(), toSave.getId());
this.cfgMgr.getProvisioningEngine().logAction(this.name,false, ActionType.Delete, approvalID, workflow, "group", groupFromGitlab);
}
} catch (GitLabApiException e) {
throw new ProvisioningException("Could not find group " + groupFromGitlab);
}
}
}
}
}
@Override
public void deleteUser(User user, Map<String, Object> request) throws ProvisioningException {
int approvalID = 0;
if (request.containsKey("APPROVAL_ID")) {
approvalID = (Integer) request.get("APPROVAL_ID");
}
Workflow workflow = (Workflow) request.get("WORKFLOW");
org.gitlab4j.api.models.User fromGitlab = this.findUserByName(user.getUserID());
if (fromGitlab == null) {
return;
}
try {
this.userApi.deleteUser(fromGitlab.getId(),false);
} catch (GitLabApiException e) {
throw new ProvisioningException("Could not delete " + user.getUserID(),e);
}
this.cfgMgr.getProvisioningEngine().logAction(this.name,true, ActionType.Delete, approvalID, workflow, "id", fromGitlab.getId().toString());
}
@Override
public User findUser(String userID, Set<String> attributes, Map<String, Object> request)
throws ProvisioningException {
org.gitlab4j.api.models.User fromGitlab = findUserByName(userID);
if (fromGitlab == null) {
return null;
}
User forUnison = new User(userID);
for (String attrName : attributes) {
try {
String val = beanUtils.getProperty(fromGitlab, attrName);
if (val != null) {
Attribute attr = new Attribute(attrName,val);
forUnison.getAttribs().put(attrName, attr);
}
} catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException e) {
throw new ProvisioningException("Couldn't load attribute " + attrName,e);
}
}
if (fromGitlab.getIdentities() != null) {
ArrayList<GitlabFedIdentity> ids = new ArrayList<GitlabFedIdentity>();
for (Identity fedid : fromGitlab.getIdentities()) {
GitlabFedIdentity id = new GitlabFedIdentity();
id.setExternalUid(fedid.getExternUid());
id.setProvider(fedid.getProvider());
ids.add(id);
}
request.put(GitlabUserProvider.GITLAB_IDENTITIES,ids);
}
ArrayList<Header> defheaders = new ArrayList<Header>();
defheaders.add(new BasicHeader("Private-Token", this.token));
BasicHttpClientConnectionManager bhcm = new BasicHttpClientConnectionManager(
cfgMgr.getHttpClientSocketRegistry());
RequestConfig rc = RequestConfig.custom().setCookieSpec(CookieSpecs.STANDARD).setRedirectsEnabled(false)
.build();
CloseableHttpClient http = HttpClients.custom()
.setConnectionManager(bhcm)
.setDefaultHeaders(defheaders)
.setDefaultRequestConfig(rc)
.build();
try {
HttpGet getmembers = new HttpGet(new StringBuilder().append(this.url).append("/api/v4/users/").append(fromGitlab.getId()).append("/memberships").toString());
CloseableHttpResponse resp = http.execute(getmembers);
if (resp.getStatusLine().getStatusCode() != 200) {
throw new IOException("Invalid response " + resp.getStatusLine().getStatusCode());
}
String json = EntityUtils.toString(resp.getEntity());
JSONArray members = (JSONArray) new JSONParser().parse(json);
for (Object o : members) {
JSONObject member = (JSONObject) o;
String sourceType = (String) member.get("source_type");
String sourceName = (String) member.get("source_name");
if (sourceType.equalsIgnoreCase("Namespace")) {
forUnison.getGroups().add(sourceName);
}
}
} catch (IOException | ParseException e) {
throw new ProvisioningException("Could not get group memebers",e);
} finally {
try {
http.close();
} catch (IOException e) {
}
bhcm.close();
}
return forUnison;
}
private org.gitlab4j.api.models.User findUserByName(String userID) throws ProvisioningException {
org.gitlab4j.api.models.User fromGitlab;
try {
List<org.gitlab4j.api.models.User> users = this.userApi.findUsers(userID);
if (users.size() == 0) {
return null;
} else if (users.size() > 1) {
int count = 0;
org.gitlab4j.api.models.User foundUser = null;
for (org.gitlab4j.api.models.User user : users) {
if (user.getUsername().equals(userID)) {
count++;
foundUser = user;
}
}
if (count > 1) {
throw new ProvisioningException(userID + " maps to multiple users");
} else if (count == 0) {
return null;
} else {
return foundUser;
}
} else {
fromGitlab = users.get(0);
}
} catch (GitLabApiException e) {
throw new ProvisioningException("Could not load user",e);
}
return fromGitlab;
}
@Override
public void init(Map<String, Attribute> cfg, ConfigManager cfgMgr, String name) throws ProvisioningException {
this.token = cfg.get("token").getValues().get(0);
this.url = cfg.get("url").getValues().get(0);
this.name = name;
this.gitLabApi = new GitLabApi(this.url, this.token);
this.userApi = new UserApi(this.gitLabApi);
this.groupApi = new GroupApi(this.gitLabApi);
this.cfgMgr = cfgMgr;
}
@Override
public void addGroup(String name, Map<String, String> additionalAttributes, User user, Map<String, Object> request)
throws ProvisioningException {
if (this.isGroupExists(name, null, request)) {
return;
}
int approvalID = 0;
if (request.containsKey("APPROVAL_ID")) {
approvalID = (Integer) request.get("APPROVAL_ID");
}
Workflow workflow = (Workflow) request.get("WORKFLOW");
Group groupToCreate = new Group();
groupToCreate.setName(name);
groupToCreate.setPath(name);
for (String prop : additionalAttributes.keySet()) {
try {
this.beanUtils.setProperty(groupToCreate, prop, additionalAttributes.get(prop));
} catch (IllegalAccessException | InvocationTargetException e) {
throw new ProvisioningException("Could not set properties",e);
}
}
try {
this.groupApi.addGroup(groupToCreate);
} catch (GitLabApiException e) {
throw new ProvisioningException("Could not create group " + name,e);
}
this.cfgMgr.getProvisioningEngine().logAction(this.name,true, ActionType.Add, approvalID, workflow, "group-object", name);
}
@Override
public void deleteGroup(String name, User user, Map<String, Object> request) throws ProvisioningException {
if (! this.isGroupExists(name, null, request)) {
return;
}
int approvalID = 0;
if (request.containsKey("APPROVAL_ID")) {
approvalID = (Integer) request.get("APPROVAL_ID");
}
Workflow workflow = (Workflow) request.get("WORKFLOW");
try {
this.groupApi.deleteGroup(name);
} catch (GitLabApiException e) {
throw new ProvisioningException("Could not delete group " + name,e);
}
this.cfgMgr.getProvisioningEngine().logAction(this.name,true, ActionType.Delete, approvalID, workflow, "group-object", name);
}
@Override
public boolean isGroupExists(String name, User user, Map<String, Object> request) throws ProvisioningException {
try {
Group group = this.findGroupByName(name);
return group != null;
} catch (GitLabApiException e) {
throw new ProvisioningException("Could not search for groups",e);
}
}
public Group findGroupByName(String name) throws GitLabApiException {
List<Group> groups = this.groupApi.getGroups(name);
for (Group group : groups) {
if (group.getName().equalsIgnoreCase(name)) {
return group;
}
}
return null;
}
public GitLabApi getApi() {
return this.gitLabApi;
}
public String getName() {
return this.name;
}
@Override
public void shutdown() throws ProvisioningException {
this.gitLabApi.close();
}
}
| TremoloSecurity/OpenUnison | unison/unison-applications-gitlab/src/main/java/com/tremolosecurity/unison/gitlab/provisioning/targets/GitlabUserProvider.java | Java | apache-2.0 | 21,837 |
package com.xebia.xtime.test.shared.model;
import android.os.Parcel;
import com.xebia.xtime.shared.model.WorkType;
import junit.framework.TestCase;
public class WorkTypeTest extends TestCase {
private WorkType mWorkType;
@Override
protected void setUp() throws Exception {
super.setUp();
mWorkType = new WorkType("id", "name");
}
public void testEquals() {
assertTrue(mWorkType.equals(new WorkType("id", "name")));
assertFalse(mWorkType.equals(new WorkType("not id", "name")));
assertFalse(mWorkType.equals(new WorkType("id", "not name")));
}
public void testParcelable() {
Parcel in = Parcel.obtain();
Parcel out = Parcel.obtain();
WorkType result = null;
try {
in.writeParcelable(mWorkType, 0);
byte[] bytes = in.marshall();
out.unmarshall(bytes, 0, bytes.length);
out.setDataPosition(0);
result = out.readParcelable(WorkType.class.getClassLoader());
} finally {
in.recycle();
out.recycle();
}
assertNotNull(result);
assertEquals(mWorkType, result);
}
}
| smuldr/xtime-android | app/src/androidTest/java/com/xebia/xtime/test/shared/model/WorkTypeTest.java | Java | apache-2.0 | 1,185 |
package com.github.gserv.serv.wx.message;
/**
* 消息解析异常
*
* @author shiying
*
*/
public class XmlMessageParseException extends RuntimeException {
/**
*
*/
private static final long serialVersionUID = 6648701329524322380L;
public XmlMessageParseException() {
super();
// TODO Auto-generated constructor stub
}
public XmlMessageParseException(String arg0, Throwable arg1) {
super(arg0, arg1);
// TODO Auto-generated constructor stub
}
public XmlMessageParseException(String arg0) {
super(arg0);
// TODO Auto-generated constructor stub
}
public XmlMessageParseException(Throwable arg0) {
super(arg0);
// TODO Auto-generated constructor stub
}
}
| gserv/serv | serv-wx/src/main/java/com/github/gserv/serv/wx/message/XmlMessageParseException.java | Java | apache-2.0 | 700 |
/*
* Copyright (c) 2020 EmeraldPay Inc, All Rights Reserved.
* Copyright (c) 2016-2017 Infinitape Inc, All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.emeraldpay.etherjar.abi;
import java.math.BigDecimal;
import java.util.Collections;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class UFixedType extends DecimalType {
public final static UFixedType DEFAULT = new UFixedType();
final static Map<Integer, UFixedType> CACHED_INSTANCES =
Stream.of(8, 16, 32, 64, 128).collect(Collectors.collectingAndThen(
Collectors.toMap(Function.identity(), UFixedType::new), Collections::unmodifiableMap));
final static String NAME_PREFIX = "ufixed";
final static Pattern NAME_PATTERN = Pattern.compile("ufixed((\\d{1,3})x(\\d{1,3}))?");
/**
* Try to parse a {@link UFixedType} string representation (either canonical form or not).
*
* @param str a string
* @return a {@link UFixedType} instance is packed as {@link Optional} value,
* or {@link Optional#empty()} instead
* @throws NullPointerException if a {@code str} is {@code null}
* @throws IllegalArgumentException if a {@link IntType} has invalid input
* @see #getCanonicalName()
*/
public static Optional<UFixedType> from(String str) {
if (!str.startsWith(NAME_PREFIX))
return Optional.empty();
Matcher matcher = NAME_PATTERN.matcher(str);
if (!matcher.matches())
throw new IllegalArgumentException("Wrong 'ufixed' type format: " + str);
if (Objects.isNull(matcher.group(1)))
return Optional.of(DEFAULT);
int mBits = Integer.parseInt(matcher.group(2));
int nBits = Integer.parseInt(matcher.group(3));
return Optional.of(mBits == nBits && CACHED_INSTANCES.containsKey(mBits) ?
CACHED_INSTANCES.get(mBits) : new UFixedType(mBits, nBits));
}
private final BigDecimal minValue;
private final BigDecimal maxValue;
private final NumericType numericType;
public UFixedType() {
this(128, 128);
}
public UFixedType(int bits) {
this(bits, bits);
}
public UFixedType(int mBits, int nBits) {
super(mBits, nBits);
numericType = new UIntType(mBits + nBits);
minValue = new BigDecimal(
numericType.getMinValue().shiftRight(nBits));
maxValue = new BigDecimal(
numericType.getMaxValue().shiftRight(nBits));
}
@Override
public BigDecimal getMinValue() {
return minValue;
}
@Override
public BigDecimal getMaxValue() {
return maxValue;
}
@Override
public NumericType getNumericType() {
return numericType;
}
@Override
public String getCanonicalName() {
return String.format("ufixed%dx%d", getMBits(), getNBits());
}
}
| ethereumproject/etherjar | etherjar-abi/src/main/java/io/emeraldpay/etherjar/abi/UFixedType.java | Java | apache-2.0 | 3,629 |
<?php
/**
* @var \Ecommerce\Item $item ;
*/
?>
<div class="ecommerce">
<div class="row">
<div class="col-md-3 item-sidebar">
<div class="sidebar-block">
<div class="items">
<?php $this->widget('Ecommerce\categorys'); ?>
</div>
</div>
</div>
<div class="col-md-9">
<div class="detail_item content">
<div class="row">
<div class="col-sm-5">
<img src="<?= Statics::file($item->image ? $item->image->path : false, '350x800'); ?>"
class="img-responsive"/>
</div>
<div class="col-sm-7">
<h1><?= $item->name(); ?></h1>
<ul class="item-options">
<?php
foreach ($item->options as $param) {
if (!$param->item_option_view || !$param->value) {
continue;
}
if ($param->item_option_type == 'select') {
if (empty($param->option->items[$param->value])) {
continue;
}
$value = $param->option->items[$param->value]->value;
} else {
$value = $param->value;
}
$paramName = $param->item_option_name;
echo "<li>{$paramName}: {$value} {$param->item_option_postfix}</li>";
}
?>
</ul>
<div class="item-actions">
<div class="item-price">
<span class="item-price-caption">Цена: </span>
<span class="item-price-amount"><?= number_format($item->getPrice()->price, 2, '.', ' '); ?></span>
<span class="item-price-currency">руб</span>
</div>
<div class="btn btn-primary item-addtocart"
onclick="inji.Ecommerce.Cart.addItem(<?= $item->getPrice()->id; ?>, 1);">
<i class="glyphicon glyphicon-shopping-cart"></i> Добавить в корзину
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-xs-12">
<div class="item-description">
<?= $item->description; ?>
</div>
</div>
</div>
</div>
</div>
</div>
</div> | injitools/cms-Inji | system/modules/Ecommerce/appControllers/content/view.php | PHP | apache-2.0 | 3,001 |
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using Google.Apis.Appengine.v1.Data;
using GoogleCloudExtension.Utils;
namespace GoogleCloudExtension.CloudExplorerSources.Gae
{
/// <summary>
/// This class represents a GAE service in the Properties Window.
/// </summary>
internal class VersionItem : PropertyWindowItemBase
{
private readonly Version _version;
public VersionItem(Version version) : base(className: Resources.CloudExplorerGaeVersionCategory, componentName: version.Id)
{
_version = version;
}
[LocalizedCategory(nameof(Resources.CloudExplorerGaeVersionCategory))]
public string Name => _version.Name;
[LocalizedCategory(nameof(Resources.CloudExplorerGaeVersionCategory))]
public string Id => _version.Id;
[LocalizedCategory(nameof(Resources.CloudExplorerGaeVersionCategory))]
public string Status => _version.ServingStatus;
[LocalizedCategory(nameof(Resources.CloudExplorerGaeVersionCategory))]
public string Deployer => _version.CreatedBy;
[LocalizedCategory(nameof(Resources.CloudExplorerGaeVersionCategory))]
public string Url => _version.VersionUrl;
[LocalizedCategory(nameof(Resources.CloudExplorerGaeVersionCategory))]
public string Runtime => _version.Runtime;
[LocalizedCategory(nameof(Resources.CloudExplorerGaeVersionCategory))]
public string Environment => _version.Env;
[LocalizedDisplayName(nameof(Resources.CloudExplorerGaeVersionInstanceClassDisplayName))]
[LocalizedCategory(nameof(Resources.CloudExplorerGaeVersionCategory))]
public string InstanceClass => _version.InstanceClass;
[LocalizedDisplayName(nameof(Resources.CloudExplorerGaeVersionCreationTimeDisplayName))]
[LocalizedCategory(nameof(Resources.CloudExplorerGaeVersionCategory))]
public string CreationTime => _version.CreateTime;
[LocalizedDisplayName(nameof(Resources.CloudExplorerGaeVersionVirtualMachineDisplayName))]
[LocalizedCategory(nameof(Resources.CloudExplorerGaeVersionCategory))]
public bool? VirtualMachine => _version.Vm;
[LocalizedCategory(nameof(Resources.CloudExplorerGaeVersionResourcesCategory))]
public double? CPU => _version.Resources?.Cpu;
[LocalizedDisplayName(nameof(Resources.CloudExplorerGaeVersionResoucesDiskDisplayName))]
[LocalizedCategory(nameof(Resources.CloudExplorerGaeVersionResourcesCategory))]
public double? Disk => _version.Resources?.DiskGb;
[LocalizedDisplayName(nameof(Resources.CloudExplorerGaeVersionResoucesMemoryDisplayName))]
[LocalizedCategory(nameof(Resources.CloudExplorerGaeVersionResourcesCategory))]
public double? Memory => _version.Resources?.MemoryGb;
public override string ToString() => _version.Id;
}
}
| ivannaranjo/google-cloud-visualstudio | GoogleCloudExtension/GoogleCloudExtension/CloudExplorerSources/Gae/VersionItem.cs | C# | apache-2.0 | 3,463 |