Dataset Preview
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed
Error code: DatasetGenerationError
Exception: ArrowInvalid
Message: JSON parse error: Missing a closing quotation mark in string. in row 910
Traceback: Traceback (most recent call last):
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/json/json.py", line 145, in _generate_tables
dataset = json.load(f)
File "/usr/local/lib/python3.9/json/__init__.py", line 293, in load
return loads(fp.read(),
File "/usr/local/lib/python3.9/json/__init__.py", line 346, in loads
return _default_decoder.decode(s)
File "/usr/local/lib/python3.9/json/decoder.py", line 340, in decode
raise JSONDecodeError("Extra data", s, end)
json.decoder.JSONDecodeError: Extra data: line 2 column 1 (char 836)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1995, in _prepare_split_single
for _, table in generator:
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/json/json.py", line 148, in _generate_tables
raise e
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/json/json.py", line 122, in _generate_tables
pa_table = paj.read_json(
File "pyarrow/_json.pyx", line 308, in pyarrow._json.read_json
File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status
File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status
pyarrow.lib.ArrowInvalid: JSON parse error: Missing a closing quotation mark in string. in row 910
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1529, in compute_config_parquet_and_info_response
parquet_operations = convert_to_parquet(builder)
File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1154, in convert_to_parquet
builder.download_and_prepare(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1027, in download_and_prepare
self._download_and_prepare(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1122, in _download_and_prepare
self._prepare_split(split_generator, **prepare_split_kwargs)
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1882, in _prepare_split
for job_id, done, content in self._prepare_split_single(
File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 2038, in _prepare_split_single
raise DatasetGenerationError("An error occurred while generating the dataset") from e
datasets.exceptions.DatasetGenerationError: An error occurred while generating the datasetNeed help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
text
string | meta
dict |
|---|---|
package com.kza.common.annotations;
import java.lang.annotation.*;
/**
* Created by kza on 2015/9/16.
*/
@Documented
@Inherited
@Target({ElementType.TYPE, ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
public @interface Logged {
}
|
{
"content_hash": "cd81e1c73df94f02f0ad62b9d9924e97",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 47,
"avg_line_length": 17.571428571428573,
"alnum_prop": 0.7479674796747967,
"repo_name": "Qhongk/rsite",
"id": "476015b6b932daff48ba6511004319c554c7d2d3",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rcommon/src/main/java/com/kza/common/annotations/Logged.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "59"
},
{
"name": "Java",
"bytes": "120175"
}
]
}
|
// AUTOGENERATED FILE - DO NOT MODIFY!
// This file generated by Djinni from example.djinni
package com.dropbox.textsort;
/*package*/ interface TextboxListener extends SuperListtener {
public static final int VERSION = 1;
public static final String STRINGTEST = "123123";
/**update(items: item_list); */
public void updateNew(ItemList items);
}
|
{
"content_hash": "f2cf0f8c4428bb8bde614cd6c7d9134b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 28.076923076923077,
"alnum_prop": 0.726027397260274,
"repo_name": "ragnraok/djinni",
"id": "6e02a4145e6555ff5be2b41b60915b2cd406797c",
"size": "365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/generated-src/java/com/dropbox/textsort/TextboxListener.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "364279"
},
{
"name": "CMake",
"bytes": "2802"
},
{
"name": "Java",
"bytes": "258252"
},
{
"name": "Lex",
"bytes": "4569"
},
{
"name": "Makefile",
"bytes": "3281"
},
{
"name": "Objective-C",
"bytes": "102038"
},
{
"name": "Objective-C++",
"bytes": "145442"
},
{
"name": "Python",
"bytes": "6187"
},
{
"name": "Scala",
"bytes": "215930"
},
{
"name": "Shell",
"bytes": "12633"
}
]
}
|
set -e
set -x
DB=$1
TRAVIS_PHP_VERSION=$2
# Packages update
sudo apt-get update -qq
# Install Apache, PHP and DB support if any
if [ "$DB" == "postgres" ]
then
sudo apt-get -qq -y --force-yes install apache2 libapache2-mod-php5 php5-pgsql php5-curl > /dev/null
elif [ "$DB" == "mysqli" ]
then
sudo apt-get -qq -y --force-yes install apache2 libapache2-mod-php5 php5-mysql php5-curl > /dev/null
else
sudo apt-get -qq -y --force-yes install apache2 libapache2-mod-php5 php5-curl > /dev/null
fi
# clean up
sudo apt-get -qq -y autoremove > /dev/null
# Apache webserver configuration
sudo sed -i -e "/var/www" /etc/apache2/sites-available/default
sudo a2enmod rewrite > /dev/null
sudo a2enmod actions > /dev/null
sudo a2enmod headers > /dev/null
# Restart Apache to take effect
sudo /etc/init.d/apache2 restart
# Setup a database if we are installing
if [ "$DB" == "postgres" ]
then
psql -c "DROP DATABASE IF EXISTS elkarte_test;" -U postgres
psql -c "create database elkarte_test;" -U postgres
elif [ "$DB" == "mysqli" ]
then
mysql -e "DROP DATABASE IF EXISTS elkarte_test;" -uroot
mysql -e "create database IF NOT EXISTS elkarte_test;" -uroot
fi
# Install or Update Composer
if [ "$DB" != "none" ]
then
composer -v > /dev/null 2>&1
COMPOSER_IS_INSTALLED=$?
if [ $COMPOSER_IS_INSTALLED -ne 0 ]
then
echo "Installing Composer"
curl -sS https://getcomposer.org/installer | sudo php -- --install-dir=/usr/local/bin --filename=composer
else
echo "Updating Composer"
composer self-update
fi
fi
|
{
"content_hash": "4fb7263b426d84d20cd74f1614ca2116",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 113,
"avg_line_length": 27.189655172413794,
"alnum_prop": 0.6791376030437539,
"repo_name": "wizardaf/Elkarte",
"id": "c74c4b7da4c2a3f5f1ad5562289049593667d6e7",
"size": "1718",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "tests/travis-ci/setup-server.sh",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "279"
},
{
"name": "CSS",
"bytes": "330227"
},
{
"name": "HTML",
"bytes": "45"
},
{
"name": "JavaScript",
"bytes": "588241"
},
{
"name": "PHP",
"bytes": "8097003"
},
{
"name": "Shell",
"bytes": "6608"
}
]
}
|
module Basquiat
class Railtie < ::Rails::Railtie
initializer 'load_basquiat_configuration' do
ENV['BASQUIAT_ENV'] = Rails.env
Basquiat.configure do |config|
config.config_file = Rails.root + 'config/basquiat.yml'
end
end
config.after_initialize do
Basquiat.configuration.reload_classes
end
end
end
|
{
"content_hash": "62477cf89b99a9eeb284629dde2b804a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 63,
"avg_line_length": 25.071428571428573,
"alnum_prop": 0.6752136752136753,
"repo_name": "VAGAScom/basquiat",
"id": "df5ac5f45f231ec4a60ebcdf530e396b6bf6097c",
"size": "382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/basquiat/rails/railtie.rb",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ruby",
"bytes": "72060"
},
{
"name": "Shell",
"bytes": "796"
}
]
}
|
package org.apache.lens.server.query;
import org.apache.lens.api.query.QueryHandle;
import org.apache.lens.api.query.QueryStatus;
import org.apache.lens.server.api.error.LensException;
import org.apache.lens.server.api.events.AsyncEventListener;
import org.apache.lens.server.api.events.LensEventService;
import org.apache.lens.server.api.query.QueryContext;
import org.apache.lens.server.api.query.QueryEnded;
import org.apache.lens.server.stats.event.query.QueryDriverStatistics;
import org.apache.lens.server.stats.event.query.QueryExecutionStatistics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Top level class which handles all Query Events.
*/
public class QueryExecutionStatisticsGenerator extends AsyncEventListener<QueryEnded> {
/** The Constant LOG. */
private static final Logger LOG = LoggerFactory.getLogger(QueryExecutionStatisticsGenerator.class);
/** The query service. */
private final QueryExecutionServiceImpl queryService;
/** The event service. */
private final LensEventService eventService;
/**
* Instantiates a new query execution statistics generator.
*
* @param queryService the query service
* @param eventService the event service
*/
public QueryExecutionStatisticsGenerator(QueryExecutionServiceImpl queryService, LensEventService eventService) {
this.queryService = queryService;
this.eventService = eventService;
}
/*
* (non-Javadoc)
*
* @see org.apache.lens.server.api.events.AsyncEventListener#process(org.apache.lens.server.api.events.LensEvent)
*/
@Override
public void process(QueryEnded ended) {
if (ended.getCurrentValue() == QueryStatus.Status.CLOSED) {
return;
}
QueryHandle handle = ended.getQueryHandle();
QueryExecutionStatistics event = new QueryExecutionStatistics(System.currentTimeMillis());
QueryContext ctx = queryService.getQueryContext(handle);
if (ctx == null) {
LOG.warn("Could not find the context for " + handle + " for event:" + ended.getCurrentValue()
+ ". No stat generated");
return;
}
event.setEndTime(ctx.getEndTime());
event.setStartTime(ctx.getLaunchTime());
event.setStatus(ctx.getStatus());
event.setCause(ended.getCause() != null ? ended.getCause() : "");
event.setResult(ctx.getResultSetPath());
event.setUserQuery(ctx.getUserQuery());
event.setSessionId(ctx.getLensSessionIdentifier());
event.setHandle(ctx.getQueryHandle().toString());
event.setSubmitter(ctx.getSubmittedUser());
event.setClusterUser(ctx.getClusterUser());
event.setSubmissionTime(ctx.getSubmissionTime());
QueryDriverStatistics driverStats = new QueryDriverStatistics();
driverStats.setDriverQuery(ctx.getSelectedDriverQuery());
driverStats.setStartTime(ctx.getDriverStatus().getDriverStartTime());
driverStats.setEndTime(ctx.getDriverStatus().getDriverStartTime());
event.setDriverStats(driverStats);
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Notifying Statistics " + event);
}
eventService.notifyEvent(event);
} catch (LensException e) {
LOG.warn("Unable to notify Execution statistics", e);
}
}
}
|
{
"content_hash": "630620574a6bc968d6b054b16f433a92",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 115,
"avg_line_length": 37.151162790697676,
"alnum_prop": 0.7402190923317684,
"repo_name": "kamaldeep-ebay/lens",
"id": "b57bc640bee018c2b7da560c2c4bcad5e228d41a",
"size": "4003",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lens-server/src/main/java/org/apache/lens/server/query/QueryExecutionStatisticsGenerator.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10377"
},
{
"name": "Java",
"bytes": "4547758"
},
{
"name": "JavaScript",
"bytes": "282078"
},
{
"name": "Shell",
"bytes": "14016"
}
]
}
|
StatsD is a front-end proxy for the Graphite/Carbon metrics server,
originally written by Etsy's Erik Kastner. It is based on ideas from
Flickr and this post by Cal Henderson: Counting and Timing. The
server was written in Node, though there have been implementations
in other languages since then.
|
{
"content_hash": "435fd19427175f6495e9c99a830c59f0",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 68,
"avg_line_length": 59.8,
"alnum_prop": 0.8093645484949833,
"repo_name": "etsy/statsd",
"id": "c96b0fb07ae2206f5034ef054218186a3a935aca",
"size": "318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/history.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "150437"
},
{
"name": "Perl",
"bytes": "1451"
},
{
"name": "Shell",
"bytes": "3690"
}
]
}
|
"""
Django settings for example project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h2ju0ozs*f5hf^e+w)ldcz)v=x5vnw37z#tw-(uu$van&$l+r+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'userroles',
)
USER_ROLES = (
'manager',
'moderator',
'client',
)
MANAGER_ROLES = (
'staff_manager', 'business_manager'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'example.urls'
WSGI_APPLICATION = 'example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
{
"content_hash": "451774ff124cb85aea866a86d2bfb7bc",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 71,
"avg_line_length": 22.870967741935484,
"alnum_prop": 0.7089797837329572,
"repo_name": "laginha/django-user-roles",
"id": "325687036d094b57a31d3217f4d3ddd7e41511b2",
"size": "2127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/example/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20421"
}
]
}
|
package gov.nih.nci.nbia.datamodel;
import gov.nih.nci.nbia.beans.searchresults.ImageResultWrapper;
import java.util.ArrayList;
import java.util.List;
import javax.faces.model.DataModel;
import javax.faces.model.ListDataModel;
public class IcefacesRowColumnDataModel implements IcefacesRowColumnDataModelInterface {
public IcefacesRowColumnDataModel(List<ImageResultWrapper> thumbnailImageDto)
{
this.thumbnailImageDto = thumbnailImageDto;
generateDataModels();
}
public ImageResultWrapper getCellValue()
{
if (rowDataModel.isRowAvailable() && columnDataModel.isRowAvailable() ){
String row = (String)rowDataModel.getRowData();
int currentRow = Integer.parseInt(row);
Object column = (String)columnDataModel.getRowData();
int currentColumn = ((List<String>)columnDataModel.getWrappedData()).indexOf(column);
intValue = currentRow + currentColumn + currentRow*(actureColumns -1);
if (intValue <= thumbnailImageDto.size()-1)
{
return thumbnailImageDto.get(intValue);
}
}
return null;
}
public boolean getCellVisibility()
{
if (rowDataModel.isRowAvailable() && columnDataModel.isRowAvailable())
{
String row = (String)rowDataModel.getRowData();
int currentRow = Integer.parseInt(row);
Object column = (String)columnDataModel.getRowData();
int currentColumn = ((List<String>)columnDataModel.getWrappedData()).indexOf(column);
return getRegisteredId(currentRow, currentColumn);
}
return false;
}
public DataModel getColumnDataModel() {
return columnDataModel;
}
public void setColumnDataModel(DataModel columnDataModel) {
this.columnDataModel = columnDataModel;
}
public DataModel getRowDataModel() {
return rowDataModel;
}
public void setRowDataModel(DataModel rowDataModel) {
this.rowDataModel = rowDataModel;
}
public String getImageLabel() {
return imageLabel;
}
public void setImageLabel(String imageLabel) {
this.imageLabel = imageLabel;
}
public int getColumns() {
return columns;
}
public void setColumns(int columns) {
this.columns = columns;
}
public int getRows() {
return rows;
}
public void setRows(int rows) {
this.rows = rows;
}
public boolean getShowPaginator()
{
int totalImage = 4*columns;
if (thumbnailImageDto.size() < totalImage)
{
return false;
}
else
{
return true;
}
}
///////////////////////////////////////////////////////PRIVATE/////////////////////////////////////
private DataModel columnDataModel;
private DataModel rowDataModel;
private List<ImageResultWrapper> thumbnailImageDto;
private int columns = 5;
private int actureColumns = 5;
private int rows = 4;
private int intValue = 0;
private String imageLabel;
private boolean getRegisteredId(int r, int c)
{
intValue = r + c + r*(actureColumns -1);
if (intValue > thumbnailImageDto.size()-1)
{
return false;
}
this.setImageLabel(""+(intValue +1));
return true;
}
private void calculateRows()
{
rows = thumbnailImageDto.size() / actureColumns;
if (thumbnailImageDto.size() % actureColumns != 0)
{
rows += 1;
}
}
private void generateDataModels()
{
List<String> rowList = new ArrayList<String>();
calculateRows();
for (int i = 0; i < rows; i++)
{
rowList.add(""+i);
}
if (rowDataModel == null)
{
rowDataModel = new ListDataModel(rowList);
}
else
{
rowDataModel.setWrappedData(rowList);
}
List<String> columnList = new ArrayList<String>();
if (thumbnailImageDto.size() < columns)
{
actureColumns = thumbnailImageDto.size();
}
else
{
actureColumns = columns;
}
for (int i = 0; i < actureColumns; i++)
{
columnList.add(""+i);
}
if (columnDataModel == null)
{
columnDataModel = new ListDataModel(columnList);
}
else
{
columnDataModel.setWrappedData(columnList);
}
}
}
|
{
"content_hash": "29145c0971abe4742d2fe98a13f2f285",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 100,
"avg_line_length": 22.065573770491802,
"alnum_prop": 0.6572560673600792,
"repo_name": "NCIP/national-biomedical-image-archive",
"id": "fca80a125501480d96452cdb0550afc4b5e49ed6",
"size": "4245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "software/nbia-web/src/gov/nih/nci/nbia/datamodel/IcefacesRowColumnDataModel.java",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "300"
},
{
"name": "CSS",
"bytes": "24518"
},
{
"name": "Groovy",
"bytes": "38152"
},
{
"name": "HTML",
"bytes": "683022"
},
{
"name": "Java",
"bytes": "4480887"
},
{
"name": "JavaScript",
"bytes": "109669"
},
{
"name": "PLSQL",
"bytes": "35795"
},
{
"name": "Perl",
"bytes": "4919"
},
{
"name": "Shell",
"bytes": "744"
},
{
"name": "XSLT",
"bytes": "215237"
}
]
}
|
import shortAnswer from './short_answer';
import languages from '../../../../constants/language_types';
describe('short answer serializer', () => {
let item;
let newItemAttr;
let result;
beforeEach(() => {
item = {
question: {
choices: [{
id: 'id14a6824a-79f2-4c00-ac6a-b41cbb64db45',
text: 'the bus',
wordType: 'noun',
},
{
id: 'id969e920d-6d22-4d06-b4ac-40a821e350c6',
text: 'the airport',
wordType: 'noun',
}],
correctFeedback: {
text: 'correctText',
fileIds: {},
id: '1',
},
},
language: languages.languageTypeId.english,
};
newItemAttr = {
question: {
choices: [{},
{}],
correctFeedback: {
text: 'correctText',
fileIds: {},
id: '1',
},
expectedLines: 15,
expectedLength: 200,
maxStrings: 20,
},
};
result = shortAnswer(item, newItemAttr);
});
it('scrubs the question and answer', () => {
expect(result.answers[0].feedback.text).toEqual(newItemAttr.question.correctFeedback.text);
expect(result.question.expectedLength).toEqual(newItemAttr.question.expectedLength);
});
});
|
{
"content_hash": "14b7334442efc3e26cf517649f4eeedd",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 95,
"avg_line_length": 25.274509803921568,
"alnum_prop": 0.5275407292474786,
"repo_name": "atomicjolt/OpenAssessmentsClient",
"id": "7eed83fe73f62beac89e3293977fc125ce35c551",
"size": "1289",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "client/js/middleware/serialization/qbank/serializers/short_answer.spec.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "77344"
},
{
"name": "HTML",
"bytes": "3927"
},
{
"name": "JavaScript",
"bytes": "408397"
},
{
"name": "Shell",
"bytes": "3085"
}
]
}
|
SETUP="/vagrant/vagrant-setup"
PPWD=$PWD
if [ -d "/usr/share/hbase" ]
then
echo "HBase is already installed, nothing done!"
else
source $SETUP/include.sh
# yum install -y bzip2 gzip lzo-devel zlib-devel
source $SETUP/hadoop277.sh
source $SETUP/zookeeper3410.sh
if ps ax | grep -v grep | grep $SERVICE > /dev/null
then
echo "hadoop service is running"
else
echo "Starting hadoop service"
service hadoop start
fi
cd /usr/share
HBASE_FILE="hbase-2.1.0"
wget_and_untar http://apache.mirror.anlx.net/hbase/2.1.0/ $HBASE_FILE-bin.tar.gz
mkdir hbase
mkdir hbase/logs
cd $HBASE_FILE
mv * ../hbase
cd ..
rmdir $HBASE_FILE
echo -e "\nLD_LIBRARY_PATH=/usr/local/lib\nexport LD_LIBRARY_PATH\n" >> ./hbase/conf/hbase-env.sh
rm ./hbase/conf/hbase-site.xml
cp $SETUP/hbase/conf/hbase-site.xml ./hbase/conf/hbase-site.xml
perl -pi -e "s/# export HBASE_MANAGES_ZK=true/export HBASE_MANAGES_ZK=false/g" ./hbase/conf/hbase-env.sh
perl -pi -e "s/# export JAVA_HOME=\x2Fusr\x2Fjava\x2Fjdk1.8.0\x2F/export JAVA_HOME=\x2Fusr\x2Fjava\x2Flatest\x2F/g" ./hbase/conf/hbase-env.sh
mkdir /usr/share/hbase/zookeeper
ln -s /usr/share/hadoop/etc/hadoop/hdfs-site.xml /usr/share/hbase/conf/hdfs-site.xml
chown -Rf hadoop.hadoop hbase
chmod +r hbase
chmod +x hbase/conf
chmod -R +r hbase/conf
cp $SETUP/hbase/init.d/hbase /etc/init.d/hbase
chmod 755 /etc/init.d/hbase
echo "Creating -hbase directory in HDFS"
su - hadoop -c "/usr/share/hadoop/bin/hadoop fs -mkdir /hbase"
cd $PPWD
# Open Port 16020
iptables -A INPUT -p tcp --dport 16020 -j ACCEPT
service iptables save
systemctl restart iptables
fi
|
{
"content_hash": "89ac7dd584b2cace76dc313cbb997a6b",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 142,
"avg_line_length": 27.05,
"alnum_prop": 0.7202711028958718,
"repo_name": "sergiomt/centorion",
"id": "8dc2cbcf22c145d1fd038426911843871f5dc430",
"size": "1699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vagrant-setup/hbase210.sh",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "62822"
},
{
"name": "Shell",
"bytes": "118421"
}
]
}
|
import React from 'react';
import { storiesOf } from '@storybook/react-native';
import { withKnobs } from '@storybook/addon-knobs';
import Wrapper from './../../Wrapper';
import { Example as Stack } from './basic';
import { Example as Divider } from './divider';
storiesOf('Stack', module)
.addDecorator(withKnobs)
.addDecorator((getStory: any) => <Wrapper>{getStory()}</Wrapper>)
.add('Basic', () => <Stack />)
.add('Divider', () => <Divider />);
|
{
"content_hash": "326a7d0698ca916d02a429dd94fc1cfb",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 67,
"avg_line_length": 38.083333333333336,
"alnum_prop": 0.6542669584245077,
"repo_name": "GeekyAnts/NativeBase",
"id": "8c3bfa3e0ad2647b78765cd391f77866a8255bbd",
"size": "457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/storybook/stories/components/primitives/Stack/index.tsx",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2083"
},
{
"name": "Java",
"bytes": "6282"
},
{
"name": "JavaScript",
"bytes": "19861"
},
{
"name": "Objective-C",
"bytes": "4511"
},
{
"name": "Ruby",
"bytes": "826"
},
{
"name": "Shell",
"bytes": "1654"
},
{
"name": "Starlark",
"bytes": "602"
},
{
"name": "TypeScript",
"bytes": "1420485"
}
]
}
|
class PostfixMynetworks
require 'ipaddr'
attr_accessor :networks
attr_reader :errors, :error_messages
def initialize
@networks = get_mynetworks
end
def valid?
@errors = false
@error_messages = Array.new
@networks.each do |network|
return false unless valid_cidr?(network)
end
true
end
def save
unless valid?
return false
break
end
Sudo.exec("/usr/local/sbin/postconf -e mynetworks=127.0.0.0/8 #{@networks.join(' ')}'")
Sudo.exec("/usr/local/sbin/postfix reload")
true
end
private
def get_mynetworks
mynetworks = %x{/usr/local/sbin/postconf -h mynetworks}.split(/[,\s]/)
mynetworks.delete("127.0.0.0/8")
mynetworks
end
def valid_cidr?(cidr)
begin
cidr =~ /(.*)\/(.*)/
addr = $1 ? $1 : cidr
netsize = $2
unless addr == IPAddr.new(cidr).to_s
@errors = true
@error_messages << "#{cidr} is not a valid CIDR block, you might meant to enter " + IPAddr.new(cidr).to_s
return false
end
true
rescue
@errors = true
@error_messages << "#{cidr} is invalid"
false
end
end
end
|
{
"content_hash": "7de542698fb0ae74f72b2f6100d5c459",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 113,
"avg_line_length": 21.10909090909091,
"alnum_prop": 0.5968992248062015,
"repo_name": "dagn/mailserv",
"id": "fe2343b60edefa247709bfa4a9c306bf35f0c122",
"size": "1161",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "admin/lib/postfix_mynetworks.rb",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2501"
},
{
"name": "CSS",
"bytes": "48827"
},
{
"name": "HTML",
"bytes": "76717"
},
{
"name": "JavaScript",
"bytes": "6967"
},
{
"name": "Nginx",
"bytes": "2883"
},
{
"name": "PHP",
"bytes": "62046"
},
{
"name": "Ruby",
"bytes": "196265"
},
{
"name": "Shell",
"bytes": "27106"
}
]
}
|
require 'xml/mapping_extensions'
require 'stash/wrapper/identifier_type'
module Stash
module Wrapper
# Mapping class for `<st:identifier>`
class Identifier
include ::XML::Mapping
typesafe_enum_node :type, '@type', class: IdentifierType, default_value: nil
text_node :value, '.', default_value: nil
# Creates a new {Identifier}
def initialize(type:, value:)
raise ArgumentError, "Identifier type does not appear to be an IdentifierType: #{type || 'nil'}" unless type.is_a?(IdentifierType)
raise ArgumentError, "Identifier value does not appear to be a non-empty string: #{value.inspect}" if value.to_s.strip.empty?
self.type = type
self.value = value
end
def formatted
type.format(value)
end
end
end
end
|
{
"content_hash": "504f73854976cb275f6ca0cdda3ef982",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 138,
"avg_line_length": 30.074074074074073,
"alnum_prop": 0.6600985221674877,
"repo_name": "CDLUC3/stash",
"id": "ce322e2efbdcd0fd4a01e6705c583451e39ba919",
"size": "812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stash-wrapper/lib/stash/wrapper/identifier.rb",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2245445"
},
{
"name": "HTML",
"bytes": "638313"
},
{
"name": "JavaScript",
"bytes": "263949"
},
{
"name": "Ruby",
"bytes": "1229318"
},
{
"name": "Shell",
"bytes": "1138"
}
]
}
|
package kuberuntime
import (
"path/filepath"
"reflect"
"sort"
"testing"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/pkg/credentialprovider"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
apitest "k8s.io/kubernetes/pkg/kubelet/apis/cri/testing"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
)
var (
fakeCreatedAt int64 = 1
)
func createTestRuntimeManager() (*apitest.FakeRuntimeService, *apitest.FakeImageService, *kubeGenericRuntimeManager, error) {
return customTestRuntimeManager(&credentialprovider.BasicDockerKeyring{})
}
func customTestRuntimeManager(keyring *credentialprovider.BasicDockerKeyring) (*apitest.FakeRuntimeService, *apitest.FakeImageService, *kubeGenericRuntimeManager, error) {
fakeRuntimeService := apitest.NewFakeRuntimeService()
fakeImageService := apitest.NewFakeImageService()
// Only an empty machineInfo is needed here, because in unit test all containers are besteffort,
// data in machineInfo is not used. If burstable containers are used in unit test in the future,
// we may want to set memory capacity.
machineInfo := &cadvisorapi.MachineInfo{}
osInterface := &containertest.FakeOS{}
manager, err := newFakeKubeRuntimeManager(fakeRuntimeService, fakeImageService, machineInfo, osInterface, &containertest.FakeRuntimeHelper{}, keyring)
return fakeRuntimeService, fakeImageService, manager, err
}
// sandboxTemplate is a sandbox template to create fake sandbox.
type sandboxTemplate struct {
pod *v1.Pod
attempt uint32
createdAt int64
state runtimeapi.PodSandboxState
}
// containerTemplate is a container template to create fake container.
type containerTemplate struct {
pod *v1.Pod
container *v1.Container
sandboxAttempt uint32
attempt int
createdAt int64
state runtimeapi.ContainerState
}
// makeAndSetFakePod is a helper function to create and set one fake sandbox for a pod and
// one fake container for each of its container.
func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime *apitest.FakeRuntimeService,
pod *v1.Pod) (*apitest.FakePodSandbox, []*apitest.FakeContainer) {
sandbox := makeFakePodSandbox(t, m, sandboxTemplate{
pod: pod,
createdAt: fakeCreatedAt,
state: runtimeapi.PodSandboxState_SANDBOX_READY,
})
var containers []*apitest.FakeContainer
newTemplate := func(c *v1.Container) containerTemplate {
return containerTemplate{
pod: pod,
container: c,
createdAt: fakeCreatedAt,
state: runtimeapi.ContainerState_CONTAINER_RUNNING,
}
}
for i := range pod.Spec.Containers {
containers = append(containers, makeFakeContainer(t, m, newTemplate(&pod.Spec.Containers[i])))
}
for i := range pod.Spec.InitContainers {
containers = append(containers, makeFakeContainer(t, m, newTemplate(&pod.Spec.InitContainers[i])))
}
fakeRuntime.SetFakeSandboxes([]*apitest.FakePodSandbox{sandbox})
fakeRuntime.SetFakeContainers(containers)
return sandbox, containers
}
// makeFakePodSandbox creates a fake pod sandbox based on a sandbox template.
func makeFakePodSandbox(t *testing.T, m *kubeGenericRuntimeManager, template sandboxTemplate) *apitest.FakePodSandbox {
config, err := m.generatePodSandboxConfig(template.pod, template.attempt)
assert.NoError(t, err, "generatePodSandboxConfig for sandbox template %+v", template)
podSandboxID := apitest.BuildSandboxName(config.Metadata)
return &apitest.FakePodSandbox{
PodSandboxStatus: runtimeapi.PodSandboxStatus{
Id: podSandboxID,
Metadata: config.Metadata,
State: template.state,
CreatedAt: template.createdAt,
Network: &runtimeapi.PodSandboxNetworkStatus{
Ip: apitest.FakePodSandboxIP,
},
Labels: config.Labels,
},
}
}
// makeFakePodSandboxes creates a group of fake pod sandboxes based on the sandbox templates.
// The function guarantees the order of the fake pod sandboxes is the same with the templates.
func makeFakePodSandboxes(t *testing.T, m *kubeGenericRuntimeManager, templates []sandboxTemplate) []*apitest.FakePodSandbox {
var fakePodSandboxes []*apitest.FakePodSandbox
for _, template := range templates {
fakePodSandboxes = append(fakePodSandboxes, makeFakePodSandbox(t, m, template))
}
return fakePodSandboxes
}
// makeFakeContainer creates a fake container based on a container template.
func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template containerTemplate) *apitest.FakeContainer {
sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt)
assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template)
containerConfig, _, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", template.container.Image)
assert.NoError(t, err, "generateContainerConfig for container template %+v", template)
podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata)
containerID := apitest.BuildContainerName(containerConfig.Metadata, podSandboxID)
imageRef := containerConfig.Image.Image
return &apitest.FakeContainer{
ContainerStatus: runtimeapi.ContainerStatus{
Id: containerID,
Metadata: containerConfig.Metadata,
Image: containerConfig.Image,
ImageRef: imageRef,
CreatedAt: template.createdAt,
State: template.state,
Labels: containerConfig.Labels,
Annotations: containerConfig.Annotations,
LogPath: filepath.Join(sandboxConfig.GetLogDirectory(), containerConfig.GetLogPath()),
},
SandboxID: podSandboxID,
}
}
// makeFakeContainers creates a group of fake containers based on the container templates.
// The function guarantees the order of the fake containers is the same with the templates.
func makeFakeContainers(t *testing.T, m *kubeGenericRuntimeManager, templates []containerTemplate) []*apitest.FakeContainer {
var fakeContainers []*apitest.FakeContainer
for _, template := range templates {
fakeContainers = append(fakeContainers, makeFakeContainer(t, m, template))
}
return fakeContainers
}
// makeTestContainer creates a test api container.
func makeTestContainer(name, image string) v1.Container {
return v1.Container{
Name: name,
Image: image,
}
}
// makeTestPod creates a test api pod.
func makeTestPod(podName, podNamespace, podUID string, containers []v1.Container) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(podUID),
Name: podName,
Namespace: podNamespace,
},
Spec: v1.PodSpec{
Containers: containers,
},
}
}
// verifyPods returns true if the two pod slices are equal.
func verifyPods(a, b []*kubecontainer.Pod) bool {
if len(a) != len(b) {
return false
}
// Sort the containers within a pod.
for i := range a {
sort.Sort(containersByID(a[i].Containers))
}
for i := range b {
sort.Sort(containersByID(b[i].Containers))
}
// Sort the pods by UID.
sort.Sort(podsByID(a))
sort.Sort(podsByID(b))
return reflect.DeepEqual(a, b)
}
func verifyFakeContainerList(fakeRuntime *apitest.FakeRuntimeService, expected sets.String) (sets.String, bool) {
actual := sets.NewString()
for _, c := range fakeRuntime.Containers {
actual.Insert(c.Id)
}
return actual, actual.Equal(expected)
}
// Only extract the fields of interests.
type cRecord struct {
name string
attempt uint32
state runtimeapi.ContainerState
}
type cRecordList []*cRecord
func (b cRecordList) Len() int { return len(b) }
func (b cRecordList) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b cRecordList) Less(i, j int) bool {
if b[i].name != b[j].name {
return b[i].name < b[j].name
}
return b[i].attempt < b[j].attempt
}
func verifyContainerStatuses(t *testing.T, runtime *apitest.FakeRuntimeService, expected []*cRecord, desc string) {
actual := []*cRecord{}
for _, cStatus := range runtime.Containers {
actual = append(actual, &cRecord{name: cStatus.Metadata.Name, attempt: cStatus.Metadata.Attempt, state: cStatus.State})
}
sort.Sort(cRecordList(expected))
sort.Sort(cRecordList(actual))
assert.Equal(t, expected, actual, desc)
}
func TestNewKubeRuntimeManager(t *testing.T) {
_, _, _, err := createTestRuntimeManager()
assert.NoError(t, err)
}
func TestVersion(t *testing.T) {
_, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
version, err := m.Version()
assert.NoError(t, err)
assert.Equal(t, kubeRuntimeAPIVersion, version.String())
}
func TestContainerRuntimeType(t *testing.T) {
_, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
runtimeType := m.Type()
assert.Equal(t, apitest.FakeRuntimeName, runtimeType)
}
func TestGetPodStatus(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
containers := []v1.Container{
{
Name: "foo1",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
{
Name: "foo2",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: containers,
},
}
// Set fake sandbox and faked containers to fakeRuntime.
makeAndSetFakePod(t, m, fakeRuntime, pod)
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
assert.Equal(t, pod.UID, podStatus.ID)
assert.Equal(t, pod.Name, podStatus.Name)
assert.Equal(t, pod.Namespace, podStatus.Namespace)
assert.Equal(t, apitest.FakePodSandboxIP, podStatus.IP)
}
func TestGetPods(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo1",
Image: "busybox",
},
{
Name: "foo2",
Image: "busybox",
},
},
},
}
// Set fake sandbox and fake containers to fakeRuntime.
fakeSandbox, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
// Convert the fakeContainers to kubecontainer.Container
containers := make([]*kubecontainer.Container, len(fakeContainers))
for i := range containers {
fakeContainer := fakeContainers[i]
c, err := m.toKubeContainer(&runtimeapi.Container{
Id: fakeContainer.Id,
Metadata: fakeContainer.Metadata,
State: fakeContainer.State,
Image: fakeContainer.Image,
ImageRef: fakeContainer.ImageRef,
Labels: fakeContainer.Labels,
Annotations: fakeContainer.Annotations,
})
if err != nil {
t.Fatalf("unexpected error %v", err)
}
containers[i] = c
}
// Convert fakeSandbox to kubecontainer.Container
sandbox, err := m.sandboxToKubeContainer(&runtimeapi.PodSandbox{
Id: fakeSandbox.Id,
Metadata: fakeSandbox.Metadata,
State: fakeSandbox.State,
CreatedAt: fakeSandbox.CreatedAt,
Labels: fakeSandbox.Labels,
Annotations: fakeSandbox.Annotations,
})
if err != nil {
t.Fatalf("unexpected error %v", err)
}
expected := []*kubecontainer.Pod{
{
ID: types.UID("12345678"),
Name: "foo",
Namespace: "new",
Containers: []*kubecontainer.Container{containers[0], containers[1]},
Sandboxes: []*kubecontainer.Container{sandbox},
},
}
actual, err := m.GetPods(false)
assert.NoError(t, err)
if !verifyPods(expected, actual) {
t.Errorf("expected %#v, got %#v", expected, actual)
}
}
func TestKillPod(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo1",
Image: "busybox",
},
{
Name: "foo2",
Image: "busybox",
},
},
},
}
// Set fake sandbox and fake containers to fakeRuntime.
fakeSandbox, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
// Convert the fakeContainers to kubecontainer.Container
containers := make([]*kubecontainer.Container, len(fakeContainers))
for i := range containers {
fakeContainer := fakeContainers[i]
c, err := m.toKubeContainer(&runtimeapi.Container{
Id: fakeContainer.Id,
Metadata: fakeContainer.Metadata,
State: fakeContainer.State,
Image: fakeContainer.Image,
ImageRef: fakeContainer.ImageRef,
Labels: fakeContainer.Labels,
})
if err != nil {
t.Fatalf("unexpected error %v", err)
}
containers[i] = c
}
runningPod := kubecontainer.Pod{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
Containers: []*kubecontainer.Container{containers[0], containers[1]},
Sandboxes: []*kubecontainer.Container{
{
ID: kubecontainer.ContainerID{
ID: fakeSandbox.Id,
Type: apitest.FakeRuntimeName,
},
},
},
}
err = m.KillPod(pod, runningPod, nil)
assert.NoError(t, err)
assert.Equal(t, 2, len(fakeRuntime.Containers))
assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
for _, sandbox := range fakeRuntime.Sandboxes {
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.State)
}
for _, c := range fakeRuntime.Containers {
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_EXITED, c.State)
}
}
func TestSyncPod(t *testing.T) {
fakeRuntime, fakeImage, m, err := createTestRuntimeManager()
assert.NoError(t, err)
containers := []v1.Container{
{
Name: "foo1",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
{
Name: "foo2",
Image: "alpine",
ImagePullPolicy: v1.PullIfNotPresent,
},
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: containers,
},
}
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
result := m.SyncPod(pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
assert.NoError(t, result.Error())
assert.Equal(t, 2, len(fakeRuntime.Containers))
assert.Equal(t, 2, len(fakeImage.Images))
assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
for _, sandbox := range fakeRuntime.Sandboxes {
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.State)
}
for _, c := range fakeRuntime.Containers {
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.State)
}
}
func TestPruneInitContainers(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
init1 := makeTestContainer("init1", "busybox")
init2 := makeTestContainer("init2", "busybox")
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{init1, init2},
},
}
templates := []containerTemplate{
{pod: pod, container: &init1, attempt: 3, createdAt: 3, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init1, attempt: 2, createdAt: 2, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init2, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init1, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_UNKNOWN},
{pod: pod, container: &init2, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{pod: pod, container: &init1, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
}
fakes := makeFakeContainers(t, m, templates)
fakeRuntime.SetFakeContainers(fakes)
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
m.pruneInitContainersBeforeStart(pod, podStatus)
expectedContainers := sets.NewString(fakes[0].Id, fakes[2].Id)
if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
t.Errorf("expected %v, got %v", expectedContainers, actual)
}
}
func TestSyncPodWithInitContainers(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
initContainers := []v1.Container{
{
Name: "init1",
Image: "init",
ImagePullPolicy: v1.PullIfNotPresent,
},
}
containers := []v1.Container{
{
Name: "foo1",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
{
Name: "foo2",
Image: "alpine",
ImagePullPolicy: v1.PullIfNotPresent,
},
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: containers,
InitContainers: initContainers,
},
}
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
// 1. should only create the init container.
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
result := m.SyncPod(pod, podStatus, []v1.Secret{}, backOff)
assert.NoError(t, result.Error())
expected := []*cRecord{
{name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
}
verifyContainerStatuses(t, fakeRuntime, expected, "start only the init container")
// 2. should not create app container because init container is still running.
podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
result = m.SyncPod(pod, podStatus, []v1.Secret{}, backOff)
assert.NoError(t, result.Error())
verifyContainerStatuses(t, fakeRuntime, expected, "init container still running; do nothing")
// 3. should create all app containers because init container finished.
// Stop init container instance 0.
sandboxIDs, err := m.getSandboxIDByPodUID(pod.UID, nil)
require.NoError(t, err)
sandboxID := sandboxIDs[0]
initID0, err := fakeRuntime.GetContainerID(sandboxID, initContainers[0].Name, 0)
require.NoError(t, err)
fakeRuntime.StopContainer(initID0, 0)
// Sync again.
podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
result = m.SyncPod(pod, podStatus, []v1.Secret{}, backOff)
assert.NoError(t, result.Error())
expected = []*cRecord{
{name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{name: containers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
{name: containers[1].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
}
verifyContainerStatuses(t, fakeRuntime, expected, "init container completed; all app containers should be running")
// 4. should restart the init container if needed to create a new podsandbox
// Stop the pod sandbox.
fakeRuntime.StopPodSandbox(sandboxID)
// Sync again.
podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
result = m.SyncPod(pod, podStatus, []v1.Secret{}, backOff)
assert.NoError(t, result.Error())
expected = []*cRecord{
// The first init container instance is purged and no longer visible.
// The second (attempt == 1) instance has been started and is running.
{name: initContainers[0].Name, attempt: 1, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
// All containers are killed.
{name: containers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
{name: containers[1].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
}
verifyContainerStatuses(t, fakeRuntime, expected, "kill all app containers, purge the existing init container, and restart a new one")
}
// A helper function to get a basic pod and its status assuming all sandbox and
// containers are running and ready.
func makeBasePodAndStatus() (*v1.Pod, *kubecontainer.PodStatus) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "foo-ns",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo1",
Image: "busybox",
},
{
Name: "foo2",
Image: "busybox",
},
{
Name: "foo3",
Image: "busybox",
},
},
},
}
status := &kubecontainer.PodStatus{
ID: pod.UID,
Name: pod.Name,
Namespace: pod.Namespace,
SandboxStatuses: []*runtimeapi.PodSandboxStatus{
{
Id: "sandboxID",
State: runtimeapi.PodSandboxState_SANDBOX_READY,
Metadata: &runtimeapi.PodSandboxMetadata{Name: pod.Name, Namespace: pod.Namespace, Uid: "sandboxuid", Attempt: uint32(0)},
Network: &runtimeapi.PodSandboxNetworkStatus{Ip: "10.0.0.1"},
},
},
ContainerStatuses: []*kubecontainer.ContainerStatus{
{
ID: kubecontainer.ContainerID{ID: "id1"},
Name: "foo1", State: kubecontainer.ContainerStateRunning,
Hash: kubecontainer.HashContainer(&pod.Spec.Containers[0]),
},
{
ID: kubecontainer.ContainerID{ID: "id2"},
Name: "foo2", State: kubecontainer.ContainerStateRunning,
Hash: kubecontainer.HashContainer(&pod.Spec.Containers[1]),
},
{
ID: kubecontainer.ContainerID{ID: "id3"},
Name: "foo3", State: kubecontainer.ContainerStateRunning,
Hash: kubecontainer.HashContainer(&pod.Spec.Containers[2]),
},
},
}
return pod, status
}
func TestComputePodActions(t *testing.T) {
_, _, m, err := createTestRuntimeManager()
require.NoError(t, err)
// Createing a pair reference pod and status for the test cases to refer
// the specific fields.
basePod, baseStatus := makeBasePodAndStatus()
noAction := podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
}
for desc, test := range map[string]struct {
mutatePodFn func(*v1.Pod)
mutateStatusFn func(*kubecontainer.PodStatus)
actions podActions
}{
"everying is good; do nothing": {
actions: noAction,
},
"start pod sandbox and all containers for a new pod": {
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// No container or sandbox exists.
status.SandboxStatuses = []*runtimeapi.PodSandboxStatus{}
status.ContainerStatuses = []*kubecontainer.ContainerStatus{}
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
Attempt: uint32(0),
ContainersToStart: []int{0, 1, 2},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"restart exited containers if RestartPolicy == Always": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// The first container completed, restart it,
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[0].ExitCode = 0
// The second container exited with failure, restart it,
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[1].ExitCode = 111
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{0, 1},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"restart failed containers if RestartPolicy == OnFailure": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// The first container completed, don't restart it,
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[0].ExitCode = 0
// The second container exited with failure, restart it,
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[1].ExitCode = 111
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{1},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"don't restart containers if RestartPolicy == Never": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// Don't restart any containers.
status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[0].ExitCode = 0
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[1].ExitCode = 111
},
actions: noAction,
},
"Kill pod and recreate everything if the pod sandbox is dead, and RestartPolicy == Always": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
ContainersToStart: []int{0, 1, 2},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"Kill pod and recreate all containers (except for the succeeded one) if the pod sandbox is dead, and RestartPolicy == OnFailure": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[1].ExitCode = 0
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
ContainersToStart: []int{0, 2},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"Kill pod and recreate all containers if the PodSandbox does not have an IP": {
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].Network.Ip = ""
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
ContainersToStart: []int{0, 1, 2},
ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
},
},
"Kill and recreate the container if the container's spec changed": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
},
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[1].Hash = uint64(432423432)
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToKill: getKillMap(basePod, baseStatus, []int{1}),
ContainersToStart: []int{1},
},
// TODO: Add a test case for containers which failed the liveness
// check. Will need to fake the livessness check result.
},
"Verify we do not create a pod sandbox if no ready sandbox for pod with RestartPolicy=Never and all containers exited": {
mutatePodFn: func(pod *v1.Pod) {
pod.Spec.RestartPolicy = v1.RestartPolicyNever
},
mutateStatusFn: func(status *kubecontainer.PodStatus) {
// no ready sandbox
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
status.SandboxStatuses[0].Metadata.Attempt = uint32(1)
// all containers exited
for i := range status.ContainerStatuses {
status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited
status.ContainerStatuses[i].ExitCode = 0
}
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(2),
CreateSandbox: false,
KillPod: true,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
},
},
"Kill and recreate the container if the container is in unknown state": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[1].State = kubecontainer.ContainerStateUnknown
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToKill: getKillMap(basePod, baseStatus, []int{1}),
ContainersToStart: []int{1},
},
},
} {
pod, status := makeBasePodAndStatus()
if test.mutatePodFn != nil {
test.mutatePodFn(pod)
}
if test.mutateStatusFn != nil {
test.mutateStatusFn(status)
}
actions := m.computePodActions(pod, status)
verifyActions(t, &test.actions, &actions, desc)
}
}
func getKillMap(pod *v1.Pod, status *kubecontainer.PodStatus, cIndexes []int) map[kubecontainer.ContainerID]containerToKillInfo {
m := map[kubecontainer.ContainerID]containerToKillInfo{}
for _, i := range cIndexes {
m[status.ContainerStatuses[i].ID] = containerToKillInfo{
container: &pod.Spec.Containers[i],
name: pod.Spec.Containers[i].Name,
}
}
return m
}
func getKillMapWithInitContainers(pod *v1.Pod, status *kubecontainer.PodStatus, cIndexes []int) map[kubecontainer.ContainerID]containerToKillInfo {
m := map[kubecontainer.ContainerID]containerToKillInfo{}
for _, i := range cIndexes {
m[status.ContainerStatuses[i].ID] = containerToKillInfo{
container: &pod.Spec.InitContainers[i],
name: pod.Spec.InitContainers[i].Name,
}
}
return m
}
func verifyActions(t *testing.T, expected, actual *podActions, desc string) {
if actual.ContainersToKill != nil {
// Clear the message field since we don't need to verify the message.
for k, info := range actual.ContainersToKill {
info.message = ""
actual.ContainersToKill[k] = info
}
}
assert.Equal(t, expected, actual, desc)
}
func TestComputePodActionsWithInitContainers(t *testing.T) {
_, _, m, err := createTestRuntimeManager()
require.NoError(t, err)
// Createing a pair reference pod and status for the test cases to refer
// the specific fields.
basePod, baseStatus := makeBasePodAndStatusWithInitContainers()
noAction := podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
}
for desc, test := range map[string]struct {
mutatePodFn func(*v1.Pod)
mutateStatusFn func(*kubecontainer.PodStatus)
actions podActions
}{
"initialization completed; start all containers": {
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{0, 1, 2},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"initialization in progress; do nothing": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].State = kubecontainer.ContainerStateRunning
},
actions: noAction,
},
"Kill pod and restart the first init container if the pod sandbox is dead": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
},
actions: podActions{
KillPod: true,
CreateSandbox: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
Attempt: uint32(1),
NextInitContainerToStart: &basePod.Spec.InitContainers[0],
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"initialization failed; restart the last init container if RestartPolicy == Always": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].ExitCode = 137
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
NextInitContainerToStart: &basePod.Spec.InitContainers[2],
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"initialization failed; restart the last init container if RestartPolicy == OnFailure": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].ExitCode = 137
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
NextInitContainerToStart: &basePod.Spec.InitContainers[2],
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"initialization failed; kill pod if RestartPolicy == Never": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].ExitCode = 137
},
actions: podActions{
KillPod: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
"init container state unknown; kill and recreate the last init container if RestartPolicy == Always": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].State = kubecontainer.ContainerStateUnknown
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
NextInitContainerToStart: &basePod.Spec.InitContainers[2],
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}),
},
},
"init container state unknown; kill and recreate the last init container if RestartPolicy == OnFailure": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].State = kubecontainer.ContainerStateUnknown
},
actions: podActions{
SandboxID: baseStatus.SandboxStatuses[0].Id,
NextInitContainerToStart: &basePod.Spec.InitContainers[2],
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}),
},
},
"init container state unknown; kill pod if RestartPolicy == Never": {
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
mutateStatusFn: func(status *kubecontainer.PodStatus) {
status.ContainerStatuses[2].State = kubecontainer.ContainerStateUnknown
},
actions: podActions{
KillPod: true,
SandboxID: baseStatus.SandboxStatuses[0].Id,
ContainersToStart: []int{},
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
},
},
} {
pod, status := makeBasePodAndStatusWithInitContainers()
if test.mutatePodFn != nil {
test.mutatePodFn(pod)
}
if test.mutateStatusFn != nil {
test.mutateStatusFn(status)
}
actions := m.computePodActions(pod, status)
verifyActions(t, &test.actions, &actions, desc)
}
}
func makeBasePodAndStatusWithInitContainers() (*v1.Pod, *kubecontainer.PodStatus) {
pod, status := makeBasePodAndStatus()
pod.Spec.InitContainers = []v1.Container{
{
Name: "init1",
Image: "bar-image",
},
{
Name: "init2",
Image: "bar-image",
},
{
Name: "init3",
Image: "bar-image",
},
}
// Replace the original statuses of the containers with those for the init
// containers.
status.ContainerStatuses = []*kubecontainer.ContainerStatus{
{
ID: kubecontainer.ContainerID{ID: "initid1"},
Name: "init1", State: kubecontainer.ContainerStateExited,
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
},
{
ID: kubecontainer.ContainerID{ID: "initid2"},
Name: "init2", State: kubecontainer.ContainerStateExited,
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
},
{
ID: kubecontainer.ContainerID{ID: "initid3"},
Name: "init3", State: kubecontainer.ContainerStateExited,
Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
},
}
return pod, status
}
|
{
"content_hash": "56f5c7ddb0e9c9db24322532bd543088",
"timestamp": "",
"source": "github",
"line_count": 1066,
"max_line_length": 171,
"avg_line_length": 34.61632270168855,
"alnum_prop": 0.7028806807403594,
"repo_name": "Stackdriver/heapster",
"id": "8de68a7e4bc5f02ba6bda6943c864ef1035eeae5",
"size": "37470",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "387"
},
{
"name": "Go",
"bytes": "792333"
},
{
"name": "Makefile",
"bytes": "11470"
},
{
"name": "Shell",
"bytes": "12097"
}
]
}
|
package com.karaokekeyboard.dict;
import java.util.Comparator;
public class UnigramStr implements Comparable<UnigramStr>{
private static final int FIRST_NON_ALPHABET_THAI = 0x0E2F;
private static final int LAST_NON_ALPHABET_THAI = 0x0E5B;
private String mStr;
private int mFreq;
public UnigramStr(String str, int freq){
setString(str);
setFrequency(freq);
}
@Override
public String toString() {
return "" + this.mStr + " (" + this.mFreq + ")";
//return "" + mGroupInitial;
}
public int getFrequency() {
return mFreq;
}
public void setFrequency(int mFreq) {
this.mFreq = mFreq;
}
public String getString() {
return mStr;
}
public void setString(String mStr) {
this.mStr = mStr;
}
public char getStringInitial(){
Character initial_c = null;
for(int i = 0; i < this.mStr.length(); i++){
char cur_c = this.mStr.charAt(i);
if((int)cur_c < FIRST_NON_ALPHABET_THAI || (int)cur_c > LAST_NON_ALPHABET_THAI){
initial_c = new Character(cur_c);
break;
}
}
return (initial_c == null)? this.mStr.charAt(0): initial_c;
}
@Override
public int compareTo(UnigramStr ustr) {
if(this.mFreq > ustr.mFreq){
return 1;
}else if (this.mFreq == ustr.mFreq){
return 0;
}else {
return -1;
}
}
public static class UnigramStrComparatorByString implements Comparator<UnigramStr>{
@Override
public int compare(UnigramStr arg0, UnigramStr arg1) {
return arg0.getString().compareTo(arg1.getString());
}
}
};
|
{
"content_hash": "c04015be6530599672988f780fdbf8d8",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 84,
"avg_line_length": 20.943661971830984,
"alnum_prop": 0.6745124411566913,
"repo_name": "jewkub/KaraokeKeyboard",
"id": "f4d89965cff9084d10aa9bdadd82fa220c75d610",
"size": "1487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "softkeyboard/src/main/java/com/karaokekeyboard/dict/UnigramStr.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "156740"
}
]
}
|
package com.xeiam.xchange.clevercoin.service.polling;
import java.io.IOException;
import java.math.BigDecimal;
import java.util.Arrays;
import java.util.List;
import si.mazi.rescu.RestProxyFactory;
import com.xeiam.xchange.Exchange;
import com.xeiam.xchange.clevercoin.CleverCoinAuthenticated;
import com.xeiam.xchange.clevercoin.dto.account.CleverCoinBalance;
import com.xeiam.xchange.clevercoin.dto.account.CleverCoinDepositAddress;
import com.xeiam.xchange.clevercoin.dto.account.CleverCoinRippleDepositAddress;
import com.xeiam.xchange.clevercoin.dto.account.CleverCoinWithdrawal;
import com.xeiam.xchange.clevercoin.dto.account.DepositTransaction;
import com.xeiam.xchange.clevercoin.dto.account.WithdrawalRequest;
import com.xeiam.xchange.clevercoin.service.CleverCoinDigest;
import com.xeiam.xchange.exceptions.ExchangeException;
/**
* @author Karsten Nilsen & Konstantin Indjov
*/
public class CleverCoinAccountServiceRaw extends CleverCoinBasePollingService {
private final CleverCoinDigest signatureCreator;
private final CleverCoinAuthenticated CleverCoinAuthenticated;
/**
* Constructor
*
* @param exchange
*/
protected CleverCoinAccountServiceRaw(Exchange exchange) {
super(exchange);
this.CleverCoinAuthenticated = RestProxyFactory.createProxy(CleverCoinAuthenticated.class, exchange.getExchangeSpecification().getSslUri());
this.signatureCreator = CleverCoinDigest.createInstance(exchange.getExchangeSpecification().getSecretKey(), exchange.getExchangeSpecification().getApiKey());
}
public CleverCoinBalance[] getCleverCoinBalance() throws IOException {
return CleverCoinAuthenticated.getBalance(exchange.getExchangeSpecification().getApiKey(), signatureCreator,
exchange.getNonceFactory());
}
public CleverCoinWithdrawal withdrawCleverCoinFunds(BigDecimal amount, final String address) throws IOException {
// amount = amount including the 0.0001 BTC network fee
final CleverCoinWithdrawal response = CleverCoinAuthenticated.withdrawBitcoin(exchange.getExchangeSpecification().getApiKey(), signatureCreator,
exchange.getNonceFactory(), amount, address);
if (response.getError() != null) {
throw new ExchangeException("Withdrawing funds from CleverCoin failed: " + response.getError());
}
return response;
}
public CleverCoinDepositAddress getCleverCoinBitcoinDepositAddress() throws IOException {
final CleverCoinDepositAddress response = CleverCoinAuthenticated.getBitcoinDepositAddress(exchange.getExchangeSpecification().getApiKey(),
signatureCreator, exchange.getNonceFactory());
if (response.getError() != null) {
throw new ExchangeException("Requesting Bitcoin deposit address failed: " + response.getError());
}
return response;
}
}
|
{
"content_hash": "b6d7cbdf4d3deb45178ff67b875c20e5",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 161,
"avg_line_length": 39.38028169014085,
"alnum_prop": 0.7993562231759657,
"repo_name": "nivertech/XChange",
"id": "dff34e81edde3c21c8a2c6608345e0e2c92d7efb",
"size": "2796",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "xchange-clevercoin/src/main/java/com/xeiam/xchange/clevercoin/service/polling/CleverCoinAccountServiceRaw.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "4604003"
}
]
}
|
package org.apache.flink.runtime.taskexecutor;
import org.apache.flink.annotation.VisibleForTesting;
import org.apache.flink.metrics.MetricGroup;
import org.apache.flink.runtime.broadcast.BroadcastVariableManager;
import org.apache.flink.runtime.clusterframework.types.AllocationID;
import org.apache.flink.runtime.io.disk.iomanager.IOManager;
import org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync;
import org.apache.flink.runtime.io.network.TaskEventDispatcher;
import org.apache.flink.runtime.memory.MemoryManager;
import org.apache.flink.runtime.shuffle.ShuffleEnvironment;
import org.apache.flink.runtime.shuffle.ShuffleEnvironmentContext;
import org.apache.flink.runtime.shuffle.ShuffleServiceLoader;
import org.apache.flink.runtime.state.TaskExecutorLocalStateStoresManager;
import org.apache.flink.runtime.taskexecutor.slot.TaskSlotTable;
import org.apache.flink.runtime.taskexecutor.slot.TaskSlotTableImpl;
import org.apache.flink.runtime.taskexecutor.slot.TimerService;
import org.apache.flink.runtime.taskmanager.Task;
import org.apache.flink.runtime.taskmanager.UnresolvedTaskManagerLocation;
import org.apache.flink.runtime.util.ExecutorThreadFactory;
import org.apache.flink.util.ExceptionUtils;
import org.apache.flink.util.FlinkException;
import org.apache.flink.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledThreadPoolExecutor;
/**
* Container for {@link TaskExecutor} services such as the {@link MemoryManager}, {@link IOManager},
* {@link ShuffleEnvironment}. All services are exclusive to a single {@link TaskExecutor}.
* Consequently, the respective {@link TaskExecutor} is responsible for closing them.
*/
public class TaskManagerServices {
private static final Logger LOG = LoggerFactory.getLogger(TaskManagerServices.class);
@VisibleForTesting
public static final String LOCAL_STATE_SUB_DIRECTORY_ROOT = "localState";
/** TaskManager services. */
private final UnresolvedTaskManagerLocation unresolvedTaskManagerLocation;
private final long managedMemorySize;
private final IOManager ioManager;
private final ShuffleEnvironment<?, ?> shuffleEnvironment;
private final KvStateService kvStateService;
private final BroadcastVariableManager broadcastVariableManager;
private final TaskSlotTable<Task> taskSlotTable;
private final JobManagerTable jobManagerTable;
private final JobLeaderService jobLeaderService;
private final TaskExecutorLocalStateStoresManager taskManagerStateStore;
private final TaskEventDispatcher taskEventDispatcher;
private final ExecutorService ioExecutor;
TaskManagerServices(
UnresolvedTaskManagerLocation unresolvedTaskManagerLocation,
long managedMemorySize,
IOManager ioManager,
ShuffleEnvironment<?, ?> shuffleEnvironment,
KvStateService kvStateService,
BroadcastVariableManager broadcastVariableManager,
TaskSlotTable<Task> taskSlotTable,
JobManagerTable jobManagerTable,
JobLeaderService jobLeaderService,
TaskExecutorLocalStateStoresManager taskManagerStateStore,
TaskEventDispatcher taskEventDispatcher,
ExecutorService ioExecutor) {
this.unresolvedTaskManagerLocation = Preconditions.checkNotNull(unresolvedTaskManagerLocation);
this.managedMemorySize = managedMemorySize;
this.ioManager = Preconditions.checkNotNull(ioManager);
this.shuffleEnvironment = Preconditions.checkNotNull(shuffleEnvironment);
this.kvStateService = Preconditions.checkNotNull(kvStateService);
this.broadcastVariableManager = Preconditions.checkNotNull(broadcastVariableManager);
this.taskSlotTable = Preconditions.checkNotNull(taskSlotTable);
this.jobManagerTable = Preconditions.checkNotNull(jobManagerTable);
this.jobLeaderService = Preconditions.checkNotNull(jobLeaderService);
this.taskManagerStateStore = Preconditions.checkNotNull(taskManagerStateStore);
this.taskEventDispatcher = Preconditions.checkNotNull(taskEventDispatcher);
this.ioExecutor = Preconditions.checkNotNull(ioExecutor);
}
// --------------------------------------------------------------------------------------------
// Getter/Setter
// --------------------------------------------------------------------------------------------
long getManagedMemorySize() {
return managedMemorySize;
}
public IOManager getIOManager() {
return ioManager;
}
public ShuffleEnvironment<?, ?> getShuffleEnvironment() {
return shuffleEnvironment;
}
public KvStateService getKvStateService() {
return kvStateService;
}
public UnresolvedTaskManagerLocation getUnresolvedTaskManagerLocation() {
return unresolvedTaskManagerLocation;
}
public BroadcastVariableManager getBroadcastVariableManager() {
return broadcastVariableManager;
}
public TaskSlotTable<Task> getTaskSlotTable() {
return taskSlotTable;
}
public JobManagerTable getJobManagerTable() {
return jobManagerTable;
}
public JobLeaderService getJobLeaderService() {
return jobLeaderService;
}
public TaskExecutorLocalStateStoresManager getTaskManagerStateStore() {
return taskManagerStateStore;
}
public TaskEventDispatcher getTaskEventDispatcher() {
return taskEventDispatcher;
}
public Executor getIOExecutor() {
return ioExecutor;
}
// --------------------------------------------------------------------------------------------
// Shut down method
// --------------------------------------------------------------------------------------------
/**
* Shuts the {@link TaskExecutor} services down.
*/
public void shutDown() throws FlinkException {
Exception exception = null;
try {
taskManagerStateStore.shutdown();
} catch (Exception e) {
exception = e;
}
try {
ioManager.close();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
shuffleEnvironment.close();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
kvStateService.shutdown();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
taskSlotTable.close();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
jobLeaderService.stop();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
try {
ioExecutor.shutdown();
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
taskEventDispatcher.clearAll();
if (exception != null) {
throw new FlinkException("Could not properly shut down the TaskManager services.", exception);
}
}
// --------------------------------------------------------------------------------------------
// Static factory methods for task manager services
// --------------------------------------------------------------------------------------------
/**
* Creates and returns the task manager services.
*
* @param taskManagerServicesConfiguration task manager configuration
* @param taskManagerMetricGroup metric group of the task manager
* @param taskIOExecutor executor for async IO operations
* @return task manager components
* @throws Exception
*/
public static TaskManagerServices fromConfiguration(
TaskManagerServicesConfiguration taskManagerServicesConfiguration,
MetricGroup taskManagerMetricGroup,
Executor taskIOExecutor) throws Exception {
// pre-start checks
checkTempDirs(taskManagerServicesConfiguration.getTmpDirPaths());
final TaskEventDispatcher taskEventDispatcher = new TaskEventDispatcher();
// start the I/O manager, it will create some temp directories.
final IOManager ioManager = new IOManagerAsync(taskManagerServicesConfiguration.getTmpDirPaths());
final ShuffleEnvironment<?, ?> shuffleEnvironment = createShuffleEnvironment(
taskManagerServicesConfiguration,
taskEventDispatcher,
taskManagerMetricGroup);
final int listeningDataPort = shuffleEnvironment.start();
final KvStateService kvStateService = KvStateService.fromConfiguration(taskManagerServicesConfiguration);
kvStateService.start();
final UnresolvedTaskManagerLocation unresolvedTaskManagerLocation = new UnresolvedTaskManagerLocation(
taskManagerServicesConfiguration.getResourceID(),
taskManagerServicesConfiguration.getExternalAddress(),
// we expose the task manager location with the listening port
// iff the external data port is not explicitly defined
taskManagerServicesConfiguration.getExternalDataPort() > 0 ?
taskManagerServicesConfiguration.getExternalDataPort() :
listeningDataPort);
final BroadcastVariableManager broadcastVariableManager = new BroadcastVariableManager();
final TaskSlotTable<Task> taskSlotTable = createTaskSlotTable(
taskManagerServicesConfiguration.getNumberOfSlots(),
taskManagerServicesConfiguration.getTaskExecutorResourceSpec(),
taskManagerServicesConfiguration.getTimerServiceShutdownTimeout(),
taskManagerServicesConfiguration.getPageSize());
final JobManagerTable jobManagerTable = new JobManagerTable();
final JobLeaderService jobLeaderService = new JobLeaderService(unresolvedTaskManagerLocation, taskManagerServicesConfiguration.getRetryingRegistrationConfiguration());
final String[] stateRootDirectoryStrings = taskManagerServicesConfiguration.getLocalRecoveryStateRootDirectories();
final File[] stateRootDirectoryFiles = new File[stateRootDirectoryStrings.length];
for (int i = 0; i < stateRootDirectoryStrings.length; ++i) {
stateRootDirectoryFiles[i] = new File(stateRootDirectoryStrings[i], LOCAL_STATE_SUB_DIRECTORY_ROOT);
}
final TaskExecutorLocalStateStoresManager taskStateManager = new TaskExecutorLocalStateStoresManager(
taskManagerServicesConfiguration.isLocalRecoveryEnabled(),
stateRootDirectoryFiles,
taskIOExecutor);
final ExecutorService ioExecutor = Executors.newSingleThreadExecutor(new ExecutorThreadFactory("taskexecutor-io"));
return new TaskManagerServices(
unresolvedTaskManagerLocation,
taskManagerServicesConfiguration.getManagedMemorySize().getBytes(),
ioManager,
shuffleEnvironment,
kvStateService,
broadcastVariableManager,
taskSlotTable,
jobManagerTable,
jobLeaderService,
taskStateManager,
taskEventDispatcher,
ioExecutor);
}
private static TaskSlotTable<Task> createTaskSlotTable(
final int numberOfSlots,
final TaskExecutorResourceSpec taskExecutorResourceSpec,
final long timerServiceShutdownTimeout,
final int pageSize) {
final TimerService<AllocationID> timerService = new TimerService<>(
new ScheduledThreadPoolExecutor(1),
timerServiceShutdownTimeout);
return new TaskSlotTableImpl<>(
numberOfSlots,
TaskExecutorResourceUtils.generateTotalAvailableResourceProfile(taskExecutorResourceSpec),
TaskExecutorResourceUtils.generateDefaultSlotResourceProfile(taskExecutorResourceSpec, numberOfSlots),
pageSize,
timerService);
}
private static ShuffleEnvironment<?, ?> createShuffleEnvironment(
TaskManagerServicesConfiguration taskManagerServicesConfiguration,
TaskEventDispatcher taskEventDispatcher,
MetricGroup taskManagerMetricGroup) throws FlinkException {
final ShuffleEnvironmentContext shuffleEnvironmentContext = new ShuffleEnvironmentContext(
taskManagerServicesConfiguration.getConfiguration(),
taskManagerServicesConfiguration.getResourceID(),
taskManagerServicesConfiguration.getNetworkMemorySize(),
taskManagerServicesConfiguration.isLocalCommunicationOnly(),
taskManagerServicesConfiguration.getBindAddress(),
taskEventDispatcher,
taskManagerMetricGroup);
return ShuffleServiceLoader
.loadShuffleServiceFactory(taskManagerServicesConfiguration.getConfiguration())
.createShuffleEnvironment(shuffleEnvironmentContext);
}
/**
* Validates that all the directories denoted by the strings do actually exist or can be created, are proper
* directories (not files), and are writable.
*
* @param tmpDirs The array of directory paths to check.
* @throws IOException Thrown if any of the directories does not exist and cannot be created or is not writable
* or is a file, rather than a directory.
*/
private static void checkTempDirs(String[] tmpDirs) throws IOException {
for (String dir : tmpDirs) {
if (dir != null && !dir.equals("")) {
File file = new File(dir);
if (!file.exists()) {
if (!file.mkdirs()) {
throw new IOException("Temporary file directory " + file.getAbsolutePath() + " does not exist and could not be created.");
}
}
if (!file.isDirectory()) {
throw new IOException("Temporary file directory " + file.getAbsolutePath() + " is not a directory.");
}
if (!file.canWrite()) {
throw new IOException("Temporary file directory " + file.getAbsolutePath() + " is not writable.");
}
if (LOG.isInfoEnabled()) {
long totalSpaceGb = file.getTotalSpace() >> 30;
long usableSpaceGb = file.getUsableSpace() >> 30;
double usablePercentage = (double) usableSpaceGb / totalSpaceGb * 100;
String path = file.getAbsolutePath();
LOG.info(String.format("Temporary file directory '%s': total %d GB, " + "usable %d GB (%.2f%% usable)",
path, totalSpaceGb, usableSpaceGb, usablePercentage));
}
} else {
throw new IllegalArgumentException("Temporary file directory #$id is null.");
}
}
}
}
|
{
"content_hash": "071456f72895670418de47441adc16d1",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 169,
"avg_line_length": 37.611111111111114,
"alnum_prop": 0.7595273264401773,
"repo_name": "bowenli86/flink",
"id": "1ef4929a24e968f23f3c3561499b6cbeea593460",
"size": "14345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flink-runtime/src/main/java/org/apache/flink/runtime/taskexecutor/TaskManagerServices.java",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4588"
},
{
"name": "CSS",
"bytes": "58146"
},
{
"name": "Clojure",
"bytes": "93329"
},
{
"name": "Dockerfile",
"bytes": "12142"
},
{
"name": "FreeMarker",
"bytes": "25294"
},
{
"name": "HTML",
"bytes": "108358"
},
{
"name": "Java",
"bytes": "51972721"
},
{
"name": "JavaScript",
"bytes": "1829"
},
{
"name": "Makefile",
"bytes": "5134"
},
{
"name": "Python",
"bytes": "1014734"
},
{
"name": "Scala",
"bytes": "13770008"
},
{
"name": "Shell",
"bytes": "513941"
},
{
"name": "TSQL",
"bytes": "123113"
},
{
"name": "TypeScript",
"bytes": "246974"
}
]
}
|
using System.Collections.Generic;
using System.ComponentModel;
using System.Windows.Input;
using Tailviewer.Api;
using Tailviewer.BusinessLogic.Searches;
namespace Tailviewer.Ui.LogView
{
public interface IFindAllViewModel
: INotifyPropertyChanged
{
IEnumerable<LogLineIndex> SelectedLogLines { get; set; }
ILogSource LogSource { get; }
ILogSourceSearch Search { get; }
string SearchTerm { get; set; }
bool Show { get; }
string ErrorMessage { get; }
bool IsEmpty { get; }
ICommand CloseCommand { get; }
}
}
|
{
"content_hash": "0ec8634ec9d3cf9ec8f4459ba8422d9d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 58,
"avg_line_length": 25.19047619047619,
"alnum_prop": 0.7485822306238186,
"repo_name": "Kittyfisto/SharpTail",
"id": "017f341fad777f6b4b7443bd0b16f794335c7755",
"size": "531",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/Tailviewer/Ui/LogView/IFindAllViewModel.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "16902"
},
{
"name": "C#",
"bytes": "3219332"
},
{
"name": "PureBasic",
"bytes": "751"
}
]
}
|
namespace Microsoft.Azure.Management.ApiManagement
{
using Microsoft.Rest;
using Microsoft.Rest.Azure;
using Models;
using System.Collections;
using System.Collections.Generic;
using System.Threading;
using System.Threading.Tasks;
/// <summary>
/// Extension methods for PrivateEndpointConnectionOperations.
/// </summary>
public static partial class PrivateEndpointConnectionOperationsExtensions
{
/// <summary>
/// Lists all private endpoint connections of the API Management service
/// instance.
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
public static IEnumerable<PrivateEndpointConnection> ListByService(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName)
{
return operations.ListByServiceAsync(resourceGroupName, serviceName).GetAwaiter().GetResult();
}
/// <summary>
/// Lists all private endpoint connections of the API Management service
/// instance.
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
public static async Task<IEnumerable<PrivateEndpointConnection>> ListByServiceAsync(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName, CancellationToken cancellationToken = default(CancellationToken))
{
using (var _result = await operations.ListByServiceWithHttpMessagesAsync(resourceGroupName, serviceName, null, cancellationToken).ConfigureAwait(false))
{
return _result.Body;
}
}
/// <summary>
/// Gets the details of the Private Endpoint Connection specified by its
/// identifier.
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
/// <param name='privateEndpointConnectionName'>
/// Name of the private endpoint connection.
/// </param>
public static PrivateEndpointConnection GetByName(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName, string privateEndpointConnectionName)
{
return operations.GetByNameAsync(resourceGroupName, serviceName, privateEndpointConnectionName).GetAwaiter().GetResult();
}
/// <summary>
/// Gets the details of the Private Endpoint Connection specified by its
/// identifier.
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
/// <param name='privateEndpointConnectionName'>
/// Name of the private endpoint connection.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
public static async Task<PrivateEndpointConnection> GetByNameAsync(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName, string privateEndpointConnectionName, CancellationToken cancellationToken = default(CancellationToken))
{
using (var _result = await operations.GetByNameWithHttpMessagesAsync(resourceGroupName, serviceName, privateEndpointConnectionName, null, cancellationToken).ConfigureAwait(false))
{
return _result.Body;
}
}
/// <summary>
/// Creates a new Private Endpoint Connection or updates an existing one.
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
/// <param name='privateEndpointConnectionName'>
/// Name of the private endpoint connection.
/// </param>
/// <param name='privateEndpointConnectionRequest'>
/// </param>
public static PrivateEndpointConnection CreateOrUpdate(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName, string privateEndpointConnectionName, PrivateEndpointConnectionRequest privateEndpointConnectionRequest)
{
return operations.CreateOrUpdateAsync(resourceGroupName, serviceName, privateEndpointConnectionName, privateEndpointConnectionRequest).GetAwaiter().GetResult();
}
/// <summary>
/// Creates a new Private Endpoint Connection or updates an existing one.
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
/// <param name='privateEndpointConnectionName'>
/// Name of the private endpoint connection.
/// </param>
/// <param name='privateEndpointConnectionRequest'>
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
public static async Task<PrivateEndpointConnection> CreateOrUpdateAsync(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName, string privateEndpointConnectionName, PrivateEndpointConnectionRequest privateEndpointConnectionRequest, CancellationToken cancellationToken = default(CancellationToken))
{
using (var _result = await operations.CreateOrUpdateWithHttpMessagesAsync(resourceGroupName, serviceName, privateEndpointConnectionName, privateEndpointConnectionRequest, null, cancellationToken).ConfigureAwait(false))
{
return _result.Body;
}
}
/// <summary>
/// Deletes the specified Private Endpoint Connection.
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
/// <param name='privateEndpointConnectionName'>
/// Name of the private endpoint connection.
/// </param>
public static void Delete(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName, string privateEndpointConnectionName)
{
operations.DeleteAsync(resourceGroupName, serviceName, privateEndpointConnectionName).GetAwaiter().GetResult();
}
/// <summary>
/// Deletes the specified Private Endpoint Connection.
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
/// <param name='privateEndpointConnectionName'>
/// Name of the private endpoint connection.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
public static async Task DeleteAsync(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName, string privateEndpointConnectionName, CancellationToken cancellationToken = default(CancellationToken))
{
(await operations.DeleteWithHttpMessagesAsync(resourceGroupName, serviceName, privateEndpointConnectionName, null, cancellationToken).ConfigureAwait(false)).Dispose();
}
/// <summary>
/// Description for Gets the private link resources
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
public static PrivateLinkResourceListResult ListPrivateLinkResources(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName)
{
return operations.ListPrivateLinkResourcesAsync(resourceGroupName, serviceName).GetAwaiter().GetResult();
}
/// <summary>
/// Description for Gets the private link resources
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
public static async Task<PrivateLinkResourceListResult> ListPrivateLinkResourcesAsync(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName, CancellationToken cancellationToken = default(CancellationToken))
{
using (var _result = await operations.ListPrivateLinkResourcesWithHttpMessagesAsync(resourceGroupName, serviceName, null, cancellationToken).ConfigureAwait(false))
{
return _result.Body;
}
}
/// <summary>
/// Description for Gets the private link resources
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
/// <param name='privateLinkSubResourceName'>
/// Name of the private link resource.
/// </param>
public static PrivateLinkResource GetPrivateLinkResource(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName, string privateLinkSubResourceName)
{
return operations.GetPrivateLinkResourceAsync(resourceGroupName, serviceName, privateLinkSubResourceName).GetAwaiter().GetResult();
}
/// <summary>
/// Description for Gets the private link resources
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
/// <param name='privateLinkSubResourceName'>
/// Name of the private link resource.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
public static async Task<PrivateLinkResource> GetPrivateLinkResourceAsync(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName, string privateLinkSubResourceName, CancellationToken cancellationToken = default(CancellationToken))
{
using (var _result = await operations.GetPrivateLinkResourceWithHttpMessagesAsync(resourceGroupName, serviceName, privateLinkSubResourceName, null, cancellationToken).ConfigureAwait(false))
{
return _result.Body;
}
}
/// <summary>
/// Creates a new Private Endpoint Connection or updates an existing one.
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
/// <param name='privateEndpointConnectionName'>
/// Name of the private endpoint connection.
/// </param>
/// <param name='privateEndpointConnectionRequest'>
/// </param>
public static PrivateEndpointConnection BeginCreateOrUpdate(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName, string privateEndpointConnectionName, PrivateEndpointConnectionRequest privateEndpointConnectionRequest)
{
return operations.BeginCreateOrUpdateAsync(resourceGroupName, serviceName, privateEndpointConnectionName, privateEndpointConnectionRequest).GetAwaiter().GetResult();
}
/// <summary>
/// Creates a new Private Endpoint Connection or updates an existing one.
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
/// <param name='privateEndpointConnectionName'>
/// Name of the private endpoint connection.
/// </param>
/// <param name='privateEndpointConnectionRequest'>
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
public static async Task<PrivateEndpointConnection> BeginCreateOrUpdateAsync(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName, string privateEndpointConnectionName, PrivateEndpointConnectionRequest privateEndpointConnectionRequest, CancellationToken cancellationToken = default(CancellationToken))
{
using (var _result = await operations.BeginCreateOrUpdateWithHttpMessagesAsync(resourceGroupName, serviceName, privateEndpointConnectionName, privateEndpointConnectionRequest, null, cancellationToken).ConfigureAwait(false))
{
return _result.Body;
}
}
/// <summary>
/// Deletes the specified Private Endpoint Connection.
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
/// <param name='privateEndpointConnectionName'>
/// Name of the private endpoint connection.
/// </param>
public static void BeginDelete(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName, string privateEndpointConnectionName)
{
operations.BeginDeleteAsync(resourceGroupName, serviceName, privateEndpointConnectionName).GetAwaiter().GetResult();
}
/// <summary>
/// Deletes the specified Private Endpoint Connection.
/// </summary>
/// <param name='operations'>
/// The operations group for this extension method.
/// </param>
/// <param name='resourceGroupName'>
/// The name of the resource group.
/// </param>
/// <param name='serviceName'>
/// The name of the API Management service.
/// </param>
/// <param name='privateEndpointConnectionName'>
/// Name of the private endpoint connection.
/// </param>
/// <param name='cancellationToken'>
/// The cancellation token.
/// </param>
public static async Task BeginDeleteAsync(this IPrivateEndpointConnectionOperations operations, string resourceGroupName, string serviceName, string privateEndpointConnectionName, CancellationToken cancellationToken = default(CancellationToken))
{
(await operations.BeginDeleteWithHttpMessagesAsync(resourceGroupName, serviceName, privateEndpointConnectionName, null, cancellationToken).ConfigureAwait(false)).Dispose();
}
}
}
|
{
"content_hash": "94070f70055ab35c32c12a8d8cbedcb9",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 359,
"avg_line_length": 50.854881266490764,
"alnum_prop": 0.5930787589498807,
"repo_name": "Azure/azure-sdk-for-net",
"id": "75e31d2b0c6ef085fd10a295dbba1425dda1775e",
"size": "19627",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/apimanagement/Microsoft.Azure.Management.ApiManagement/src/Generated/PrivateEndpointConnectionOperationsExtensions.cs",
"mode": "33188",
"license": "mit",
"language": []
}
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_12) on Sun Feb 08 17:27:09 PST 2009 -->
<META http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<TITLE>
Uses of Class net.sourceforge.pmd.util.designer.MyPrintStream (PMD 4.2.5 API)
</TITLE>
<META NAME="date" CONTENT="2009-02-08">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class net.sourceforge.pmd.util.designer.MyPrintStream (PMD 4.2.5 API)";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../net/sourceforge/pmd/util/designer/MyPrintStream.html" title="class in net.sourceforge.pmd.util.designer"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../index.html?net/sourceforge/pmd/util/designer//class-useMyPrintStream.html" target="_top"><B>FRAMES</B></A>
<A HREF="MyPrintStream.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<CENTER>
<H2>
<B>Uses of Class<br>net.sourceforge.pmd.util.designer.MyPrintStream</B></H2>
</CENTER>
No usage of net.sourceforge.pmd.util.designer.MyPrintStream
<P>
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../net/sourceforge/pmd/util/designer/MyPrintStream.html" title="class in net.sourceforge.pmd.util.designer"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../index.html?net/sourceforge/pmd/util/designer//class-useMyPrintStream.html" target="_top"><B>FRAMES</B></A>
<A HREF="MyPrintStream.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
Copyright © 2002-2009 InfoEther. All Rights Reserved.
</BODY>
</HTML>
|
{
"content_hash": "87203f7cd4b9f4cd38340a691152caba",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 240,
"avg_line_length": 42.813793103448276,
"alnum_prop": 0.6163015463917526,
"repo_name": "deleidos/digitaledge-platform",
"id": "0df4991c1748c8c20dded72af123e484e23bff65",
"size": "6208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commons/buildtools/pmd/docs/apidocs/net/sourceforge/pmd/util/designer/class-use/MyPrintStream.html",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "16315580"
},
{
"name": "Batchfile",
"bytes": "15678"
},
{
"name": "C",
"bytes": "26042"
},
{
"name": "CSS",
"bytes": "846559"
},
{
"name": "Groovy",
"bytes": "93743"
},
{
"name": "HTML",
"bytes": "36583222"
},
{
"name": "Java",
"bytes": "33127586"
},
{
"name": "JavaScript",
"bytes": "2030589"
},
{
"name": "Nginx",
"bytes": "3934"
},
{
"name": "Perl",
"bytes": "330290"
},
{
"name": "Python",
"bytes": "54288"
},
{
"name": "Ruby",
"bytes": "5133"
},
{
"name": "Shell",
"bytes": "2482631"
},
{
"name": "XSLT",
"bytes": "978664"
}
]
}
|
<?xml version="1.0" encoding="ISO-8859-1"?>
<!--
~ Copyright 2010 Ning, Inc.
~
~ Ning licenses this file to you under the Apache License, version 2.0
~ (the "License"); you may not use this file except in compliance with the
~ License. You may obtain a copy of the License at:
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
~ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
~ License for the specific language governing permissions and limitations
~ under the License.
-->
<project name="Collector">
<publishDate position="right"/>
<version position="right"/>
<body>
<links>
<item name="Maven Central" href="http://repo1.maven.org/maven2/com/ning/"/>
</links>
<menu name="Dwarf">
<item name="Dwarf project" href="http://pierre.github.com/dwarf"/>
<!--<item name="FAQ" href="/faq.html"/>-->
<item name="Monitoring via JMX" href="/monitoring.html"/>
</menu>
<menu ref="modules"/>
<menu ref="reports"/>
</body>
</project>
|
{
"content_hash": "d963afd76ec6c95c4cba514dca04d7d5",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 87,
"avg_line_length": 34.47222222222222,
"alnum_prop": 0.6293311845286059,
"repo_name": "pierre/collector",
"id": "999d2b6ac6c67692eda56184601342b9baeee7c9",
"size": "1241",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/site/site.xml",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3324"
},
{
"name": "Java",
"bytes": "514252"
},
{
"name": "JavaScript",
"bytes": "31693"
},
{
"name": "Perl",
"bytes": "249"
},
{
"name": "Python",
"bytes": "170394"
},
{
"name": "Ruby",
"bytes": "2120"
},
{
"name": "Shell",
"bytes": "1459"
}
]
}
|
from datetime import datetime
from corehq.apps.users.models import CommCareUser
from corehq.apps.sms.api import send_sms_to_verified_number
from corehq.util.translation import localize
from custom.ilsgateway.models import SupplyPointStatus, SupplyPointStatusTypes, SupplyPointStatusValues, \
DeliveryGroupReport
from custom.ilsgateway.tanzania.handlers.keyword import KeywordHandler
from custom.ilsgateway.tanzania.reminders import SUBMITTED_NOTIFICATION_MSD, SUBMITTED_CONFIRM, \
SUBMITTED_REMINDER_DISTRICT, SUBMITTED_INVALID_QUANTITY
class RandrHandler(KeywordHandler):
def handle(self):
return self._handle()
def help(self):
return self._handle(help=True)
def _send_submission_alert_to_msd(self, params):
users = filter(lambda u: u.user_data.get('role', None) == 'MSD', CommCareUser.by_domain(self.domain))
for user in users:
if not user.get_verified_number():
continue
with localize(user.get_language_code()):
send_sms_to_verified_number(user.get_verified_number(), SUBMITTED_NOTIFICATION_MSD % params)
def _handle(self, help=False):
location = self.user.location
status_type = None
if location.location_type_name == 'FACILITY':
status_type = SupplyPointStatusTypes.R_AND_R_FACILITY
self.respond(SUBMITTED_CONFIRM, sp_name=location.name, contact_name=self.user.name)
elif location.location_type_name == 'DISTRICT':
if help:
quantities = [0, 0, 0]
self.respond(SUBMITTED_REMINDER_DISTRICT)
else:
quantities = [self.args[1], self.args[3], self.args[5]]
for quantity in quantities:
try:
int(quantity)
except ValueError:
self.respond(SUBMITTED_INVALID_QUANTITY % {'number': quantity})
return True
DeliveryGroupReport.objects.create(
location_id=location.get_id,
quantity=quantities[0],
message=self.msg.couch_id,
delivery_group="A"
)
DeliveryGroupReport.objects.create(
location_id=location.get_id,
quantity=quantities[1],
message=self.msg.couch_id,
delivery_group="B"
)
DeliveryGroupReport.objects.create(
location_id=location.get_id,
quantity=quantities[2],
message=self.msg.couch_id,
delivery_group="C"
)
self.respond(
SUBMITTED_CONFIRM,
sp_name=location.name,
contact_name=self.user.first_name + " " + self.user.last_name
)
status_type = SupplyPointStatusTypes.R_AND_R_DISTRICT
params = {
'district_name': location.name,
'group_a': quantities[0],
'group_b': quantities[1],
'group_c': quantities[2]
}
self._send_submission_alert_to_msd(params)
SupplyPointStatus.objects.create(location_id=location.get_id,
status_type=status_type,
status_value=SupplyPointStatusValues.SUBMITTED,
status_date=datetime.utcnow())
return True
|
{
"content_hash": "fcb67966963dfa5258c84d27882bb52d",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 109,
"avg_line_length": 42.642857142857146,
"alnum_prop": 0.5577889447236181,
"repo_name": "qedsoftware/commcare-hq",
"id": "eb046c89eb84d74e324bc09b0ce6f6d7d5a7f103",
"size": "3582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom/ilsgateway/tanzania/handlers/randr.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
]
}
|
namespace ScmDataAccess
{
public class InventoryItem
{
public int PartTypeId { get; set; }
public PartType Part { get; set; }
public int Count { get; set; }
public int OrderThreshold { get; set; }
}
}
|
{
"content_hash": "4b873f9a059927264b88c08f598497fb",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 43,
"avg_line_length": 22.4,
"alnum_prop": 0.6428571428571429,
"repo_name": "dmetzgar/dotnetcoreinaction",
"id": "59d1542232a47c69a5f34ba86a6291b95a8c6b78",
"size": "226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chapter6/DapperDi/ScmDataAccess/InventoryItem.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "95461"
}
]
}
|
PasswordGenerationManager::PasswordGenerationManager(
content::WebContents* contents,
PasswordManagerClient* client)
: web_contents_(contents),
observer_(NULL),
client_(client),
driver_(NULL) {
if (client)
driver_ = client->GetDriver();
}
PasswordGenerationManager::~PasswordGenerationManager() {}
void PasswordGenerationManager::SetTestObserver(
autofill::PasswordGenerationPopupObserver* observer) {
observer_ = observer;
}
void PasswordGenerationManager::DetectAccountCreationForms(
const std::vector<autofill::FormStructure*>& forms) {
std::vector<autofill::FormData> account_creation_forms;
for (std::vector<autofill::FormStructure*>::const_iterator form_it =
forms.begin(); form_it != forms.end(); ++form_it) {
autofill::FormStructure* form = *form_it;
for (std::vector<autofill::AutofillField*>::const_iterator field_it =
form->begin(); field_it != form->end(); ++field_it) {
autofill::AutofillField* field = *field_it;
if (field->server_type() == autofill::ACCOUNT_CREATION_PASSWORD) {
account_creation_forms.push_back(form->ToFormData());
break;
}
}
}
if (!account_creation_forms.empty() && IsGenerationEnabled()) {
SendAccountCreationFormsToRenderer(web_contents_->GetRenderViewHost(),
account_creation_forms);
}
}
// In order for password generation to be enabled, we need to make sure:
// (1) Password sync is enabled, and
// (2) Password saving is enabled.
bool PasswordGenerationManager::IsGenerationEnabled() const
{
if (!driver_->GetPasswordManager()->IsPasswordManagerSavingEnabled()) {
DVLOG(2) << "Generation disabled because password saving is disabled";
return false;
}
if (!client_->IsPasswordSyncEnabled()) {
DVLOG(2) << "Generation disabled because passwords are not being synced";
return false;
}
return true;
}
void PasswordGenerationManager::SendAccountCreationFormsToRenderer(
content::RenderViewHost* host,
const std::vector<autofill::FormData>& forms) {
host->Send(new AutofillMsg_AccountCreationFormsDetected(
host->GetRoutingID(), forms));
}
gfx::RectF PasswordGenerationManager::GetBoundsInScreenSpace(
const gfx::RectF& bounds) {
gfx::Rect client_area;
web_contents_->GetView()->GetContainerBounds(&client_area);
return bounds + client_area.OffsetFromOrigin();
}
void PasswordGenerationManager::OnShowPasswordGenerationPopup(
const gfx::RectF& bounds,
int max_length,
const autofill::PasswordForm& form)
{
// TODO: Call Popup for Password
}
void PasswordGenerationManager::OnShowPasswordEditingPopup(
const gfx::RectF& bounds,
const autofill::PasswordForm& form)
{
// TODO: Call Popup for Edit Password
}
void PasswordGenerationManager::OnHidePasswordGenerationPopup()
{
HidePopup();
}
void PasswordGenerationManager::HidePopup()
{
// TODO: Call Popup Hide
}
#endif // TIZEN_AUTOFILL_SUPPORT
|
{
"content_hash": "51a0d5f2badf8949c5c03b8ff92ecec3",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 77,
"avg_line_length": 30.804123711340207,
"alnum_prop": 0.7058232931726908,
"repo_name": "crosswalk-project/chromium-efl",
"id": "de38900e2e273837189af641b8eeeac167deaae0",
"size": "4102",
"binary": false,
"copies": "2",
"ref": "refs/heads/efl/crosswalk-11/40.0.2214.28",
"path": "impl/browser/password_manager/password_generation_manager.cc",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "381829"
},
{
"name": "C++",
"bytes": "2650535"
},
{
"name": "CSS",
"bytes": "2328"
},
{
"name": "Objective-C",
"bytes": "1921"
},
{
"name": "Python",
"bytes": "48748"
},
{
"name": "Shell",
"bytes": "43749"
},
{
"name": "XSLT",
"bytes": "4826"
}
]
}
|
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _slicedToArray = function () { function sliceIterator(arr, i) { var _arr = []; var _n = true; var _d = false; var _e = undefined; try { for (var _i = arr[Symbol.iterator](), _s; !(_n = (_s = _i.next()).done); _n = true) { _arr.push(_s.value); if (i && _arr.length === i) break; } } catch (err) { _d = true; _e = err; } finally { try { if (!_n && _i["return"]) _i["return"](); } finally { if (_d) throw _e; } } return _arr; } return function (arr, i) { if (Array.isArray(arr)) { return arr; } else if (Symbol.iterator in Object(arr)) { return sliceIterator(arr, i); } else { throw new TypeError("Invalid attempt to destructure non-iterable instance"); } }; }();
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
var View = function () {
function View(name, dependencies) {
_classCallCheck(this, View);
this.name = name;
this.dependencies = dependencies;
var notification = atom.notifications.addInfo('Installing ' + name + ' dependencies', {
detail: 'Installing ' + dependencies.map(function (i) {
return i.name;
}).join(', '),
dismissable: true
});
var progress = document.createElement('progress');
this.dispose = function () {
notification.dismiss();
};
this.advance = function () {
progress.value++;
};
progress.max = dependencies.length;
progress.style.width = '100%';
try {
var notificationView = atom.views.getView(notification);
var notificationContent = notificationView.querySelector('.detail-content');
if (notificationContent) {
notificationContent.appendChild(progress);
}
} catch (_) {/* Notifications package is disabled */}
}
_createClass(View, [{
key: 'complete',
value: function complete(errors) {
this.dispose();
if (!errors.size) {
atom.notifications.addSuccess('Installed ' + this.name + ' dependencies', {
detail: 'Installed ' + this.dependencies.map(function (i) {
return i.name;
}).join(', ')
});
return;
}
var packages = [];
var _iteratorNormalCompletion = true;
var _didIteratorError = false;
var _iteratorError = undefined;
try {
for (var _iterator = errors[Symbol.iterator](), _step; !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true) {
var _step$value = _slicedToArray(_step.value, 2);
var packageName = _step$value[0];
var error = _step$value[1];
packages.push(' • ' + packageName);
console.error('[Package-Deps] Unable to install', packageName, ', Error:', error && error.stack || error);
}
} catch (err) {
_didIteratorError = true;
_iteratorError = err;
} finally {
try {
if (!_iteratorNormalCompletion && _iterator.return) {
_iterator.return();
}
} finally {
if (_didIteratorError) {
throw _iteratorError;
}
}
}
atom.notifications.addWarning('Failed to install ' + this.name + ' dependencies', {
detail: 'These packages were not installed, check your console\nfor more info.\n' + packages.join('\n'),
dismissable: true
});
}
}]);
return View;
}();
exports.default = View;
|
{
"content_hash": "4b568f75942449a791ddc641e96b0dc6",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 664,
"avg_line_length": 42.552083333333336,
"alnum_prop": 0.6139534883720931,
"repo_name": "jarednipper/atom-config",
"id": "231aa3ac19627c87a71adcb89ec92069f9c86101",
"size": "4087",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "packages/linter-eslint/node_modules/atom-package-deps/lib/view.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "26416"
},
{
"name": "CoffeeScript",
"bytes": "489295"
},
{
"name": "HTML",
"bytes": "3739"
},
{
"name": "JavaScript",
"bytes": "543861"
},
{
"name": "PHP",
"bytes": "54244"
},
{
"name": "Shell",
"bytes": "7493"
},
{
"name": "Vue",
"bytes": "2964"
}
]
}
|
/**
* Demonstrates how to use Ext.chart.series.Pie
*/
Ext.define('KitchenSink.view.chart.Pie', {
extend: 'Ext.Panel',
requires: [
'Ext.chart.PolarChart',
'Ext.chart.series.Pie',
'Ext.chart.interactions.Rotate'
],
controller: {
type: 'chart'
},
layout: 'fit',
items: [{
xtype: 'toolbar',
docked: 'top',
cls: 'charttoolbar',
items: [{
xtype: 'spacer'
}, {
iconCls: 'x-fa fa-picture-o',
text: 'Theme',
handler: 'onThemeChange'
}, {
iconCls: 'x-fa fa-refresh',
text: 'Refresh',
handler: function() {
Ext.getStore('Pie').generateData(5);
}
}, {
text: 'Reset',
handler: function() {
//ensure the query gets the chart for this kitchensink example
var chart = this.up().up().down('polar');
//reset the rotation
Ext.ComponentQuery.query('series', chart)[0].setRotation(0);
//reset the legend
chart.resetLegendStore();
}
}]
}, {
xtype: 'polar',
store: 'Pie',
interactions: ['rotate', 'itemhighlight'],
legend: {
position: 'right',
verticalWidth: 70
},
innerPadding: Ext.os.is.Desktop ? 40 : 10,
series: [{
type: 'pie',
xField: 'g1',
label: {
field: 'name'
},
donut: 30,
highlightCfg: {
margin: 20
},
style: {
stroke: 'white',
miterLimit: 10,
lineCap: 'miter',
lineWidth: 2
}
}],
axes: []
}],
initialize: function() {
this.callParent();
Ext.getStore('Pie').generateData(5);
}
});
|
{
"content_hash": "bfdfe6c9a095f0d320e0a73516d6fedc",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 78,
"avg_line_length": 25.9873417721519,
"alnum_prop": 0.41061860691670726,
"repo_name": "sqlwang/DeviceManagementSystem",
"id": "9b31728eff0492835fd5842459365422cbdb0d84",
"size": "2053",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "backend/webapp/ext/examples/kitchensink/modern/src/view/chart/Pie.js",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "112"
},
{
"name": "Batchfile",
"bytes": "1541"
},
{
"name": "CSS",
"bytes": "26209291"
},
{
"name": "HTML",
"bytes": "56714"
},
{
"name": "JavaScript",
"bytes": "53945062"
},
{
"name": "PHP",
"bytes": "136411"
},
{
"name": "Python",
"bytes": "3330"
},
{
"name": "Ruby",
"bytes": "11215"
}
]
}
|
<?php
/**
* Return the length of the List value at key
*
* @author Ivan Shumkov
* @package Rediska
* @subpackage Commands
* @version @package_version@
* @link http://rediska.geometria-lab.net
* @license http://www.opensource.org/licenses/bsd-license.php
*/
class Rediska_Command_GetListLength extends Rediska_Command_Abstract
{
/**
* Create command
*
* @param string $key Key name
* @return Rediska_Connection_Exec
*/
public function create($key)
{
$connection = $this->_rediska->getConnectionByKeyName($key);
$command = array('LLEN',
$this->_rediska->getOption('namespace') . $key);
return new Rediska_Connection_Exec($connection, $command);
}
}
|
{
"content_hash": "d48ab5294d08ce3676f8de33adacd0da",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 73,
"avg_line_length": 24.966666666666665,
"alnum_prop": 0.6288384512683578,
"repo_name": "webonyx/Rediska",
"id": "6b0f9a81127f2a3363a758be6a6e068f3b48e42b",
"size": "749",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "library/Rediska/Command/GetListLength.php",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PHP",
"bytes": "710824"
}
]
}
|
package boofcv.alg.sfm;
import boofcv.alg.distort.RemoveRadialPtoN_F64;
import boofcv.struct.calib.IntrinsicParameters;
import boofcv.struct.distort.PixelTransform_F32;
import boofcv.struct.image.ImageFloat32;
import boofcv.struct.image.ImageInteger;
import boofcv.struct.image.ImageSingleBand;
import georegression.struct.point.Point2D_F64;
import georegression.struct.point.Point3D_F64;
/**
* Computes the 3D coordinate a point in a visual camera given a depth image. The visual camera is a standard camera
* while the depth camera contains the depth (value along z-axis) of objects inside its field of view. The
* Kinect (structured light) and flash ladar (time of flight) are examples of sensors which could use this class.
* The z-axis is defined to be pointing straight out of the visual camera and both depth and visual cameras are
* assumed to be parallel with identical pointing vectors for the z-axis.
*
* A mapping is provided for converting between pixels in the visual camera and the depth camera. This mapping
* is assumed to be fixed with time.
*
* @author Peter Abeles
*/
public abstract class DepthSparse3D<T extends ImageSingleBand> {
// Storage for the depth image
protected T depthImage;
// transform from visual camera pixels to normalized image coordinates
private RemoveRadialPtoN_F64 p2n = new RemoveRadialPtoN_F64();
// location of point in visual camera coordinate system
private Point3D_F64 worldPt = new Point3D_F64();
// pixel in normalized image coordinates
private Point2D_F64 norm = new Point2D_F64();
// transform from visual image coordinate system to depth image coordinate system
private PixelTransform_F32 visualToDepth;
// scales the values from the depth image
private double depthScale;
/**
* Configures parameters
*
* @param depthScale Used to change units found in the depth image.
*/
public DepthSparse3D(double depthScale) {
this.depthScale = depthScale;
}
/**
* Configures intrinsic camera parameters
*
* @param paramVisual Intrinsic parameters of visual camera.
* @param visualToDepth Transform from visual to depth camera pixel coordinate systems.
*/
public void configure(IntrinsicParameters paramVisual , PixelTransform_F32 visualToDepth ) {
this.visualToDepth = visualToDepth;
p2n.set(paramVisual.fx,paramVisual.fy,paramVisual.skew,paramVisual.cx,paramVisual.cy,
paramVisual.radial);
}
/**
* Sets the depth image. A reference is saved internally.
*
* @param depthImage Image containing depth information.
*/
public void setDepthImage(T depthImage) {
this.depthImage = depthImage;
}
/**
* Given a pixel coordinate in the visual camera, compute the 3D coordinate of that point.
*
* @param x x-coordinate of point in visual camera
* @param y y-coordinate of point in visual camera
* @return true if a 3D point could be computed and false if not
*/
public boolean process( int x , int y ) {
visualToDepth.compute(x, y);
int depthX = (int)visualToDepth.distX;
int depthY = (int)visualToDepth.distY;
if( depthImage.isInBounds(depthX,depthY) ) {
// get the depth at the specified location
double value = lookupDepth(depthX, depthY);
// see if its an invalid value
if( value == 0 )
return false;
// convert visual pixel into normalized image coordinate
p2n.compute(x,y,norm);
// project into 3D space
worldPt.z = value*depthScale;
worldPt.x = worldPt.z*norm.x;
worldPt.y = worldPt.z*norm.y;
return true;
} else {
return false;
}
}
/**
* The found 3D coordinate of the point in the visual camera coordinate system. Is only valid when
* {@link #process(int, int)} returns true.
*
* @return 3D coordinate of point in visual camera coordinate system
*/
public Point3D_F64 getWorldPt() {
return worldPt;
}
/**
* Internal function which looks up the pixel's depth. Depth is defined as the value of the z-coordinate which
* is pointing out of the camera. If there is no depth measurement at this location return 0.
*
* @param depthX x-coordinate of pixel in depth camera
* @param depthY y-coordinate of pixel in depth camera
* @return depth at the specified coordinate
*/
protected abstract double lookupDepth(int depthX, int depthY);
/**
* Implementation for {@link ImageInteger}.
*/
public static class I<T extends ImageInteger> extends DepthSparse3D<T> {
public I(double depthScale) {
super(depthScale);
}
@Override
protected double lookupDepth(int depthX, int depthY) {
return depthImage.unsafe_get(depthX,depthY);
}
}
/**
* Implementation for {@link ImageFloat32}.
*/
public static class F32 extends DepthSparse3D<ImageFloat32> {
public F32(double depthScale) {
super(depthScale);
}
@Override
protected double lookupDepth(int depthX, int depthY) {
return depthImage.unsafe_get(depthX,depthY);
}
}
}
|
{
"content_hash": "b73a1f3b39fb52b65115890d995ae065",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 117,
"avg_line_length": 30.36024844720497,
"alnum_prop": 0.737315875613748,
"repo_name": "intrack/BoofCV-master",
"id": "d4265748d505a74ca636ebbe20ed259c084b00a4",
"size": "5568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/sfm/src/boofcv/alg/sfm/DepthSparse3D.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "4638"
},
{
"name": "Java",
"bytes": "8541941"
},
{
"name": "Python",
"bytes": "14673"
}
]
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "metrilum.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
{
"content_hash": "a4dde63753d4c4a308f1a2a06cd9fdd6",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 73,
"avg_line_length": 37,
"alnum_prop": 0.6563706563706564,
"repo_name": "Metrilum/Metrilum",
"id": "671268c04065c7fdcddc33287050d2c8bcc412ef",
"size": "540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "484"
},
{
"name": "JavaScript",
"bytes": "2914"
},
{
"name": "Python",
"bytes": "6117"
}
]
}
|
namespace STB {
//! An HTTP client connection
class Http
{
public:
Http() = default;
//! Return true if the connection is open
bool isOpen() const { return socket.isOpen(); }
//! Open the HTTP connection to the named host
bool open(const std::string& hostname_)
{
hostname = hostname_;
return socket.connect(hostname_.c_str(), HTTP_PORT);
}
//! Close connection
void close()
{
socket.close();
}
//! Send an HTTP request and receive the response
unsigned request(std::function<void(const uint8_t*,size_t)> spooler,
const std::string& method,
const std::string& path,
const std::string& search = "")
{
// Request line
std::string request = method + " ";
request += path.empty() ? "/" : path;
if(!search.empty())
{
request += "?";
request += search;
}
request += " HTTP/1.1";
request += HTTP_EOL();
// Request header fields
request += "Host: ";
request += hostname;
request += ":" + std::to_string(HTTP_PORT);
request += HTTP_EOL();
// Empty line
request += HTTP_EOL();
if(!socket.write(request.c_str(), request.size())) return 0;
// Read status line
char line[256];
if (!socket.gets(line, sizeof(line))) return 0;
// Extract status code
char* space = strchr(line, ' ');
if(space == nullptr) return false;
unsigned status = strtoul(space + 1, nullptr, 10);
unsigned content_length{0};
bool chunked{false};
while(true)
{
// Read header field line
if (!socket.gets(line, sizeof(line))) return 0;
// Trim away end of the line
char* eol = strstr(line, HTTP_EOL());
if(eol == nullptr) return 0;
*eol = '\0';
// No more header fields exit to body
if (strlen(line) == 0) break;
// Parse interesting fields
char* value = strchr(line, ':');
if(value != nullptr)
{
*value++ = '\0';
while(*value == ' ') { value++; }
if (strcmp(line, "Content-Length") == 0)
{
content_length = strtoul(value, nullptr, 10);
}
else if (strcmp(line, "Transfer-Encoding") == 0)
{
chunked = strcmp(value, "chunked") == 0;
}
}
}
while(true)
{
if(chunked)
{
if (!socket.gets(line, sizeof(line))) return 0;
content_length = strtoul(line, nullptr, 16);
if (content_length == 0)
{
// Read final empty line
if (!socket.gets(line, sizeof(line))) return 0;
break;
}
}
while(content_length > 0)
{
char buffer[512];
size_t bytes_read;
size_t bytes_to_read = std::min(size_t(content_length), sizeof(buffer));
if (!socket.read(buffer, bytes_to_read, bytes_read)) return 0;
spooler((const uint8_t*)buffer, bytes_read);
content_length -= bytes_read;
}
if(!chunked) break;
if (!socket.gets(line, sizeof(line))) return 0;
}
return status;
}
//! Use HTTP to retrieve a file
bool getFile(const std::string& path,
const std::string& out_file)
{
STB::Http http;
PLT::File file(nullptr, out_file.c_str());
if(!file.openForWrite()) return false;
unsigned status = request([&file](const uint8_t* buffer, size_t size)
{
file.write(buffer, size);
},
"GET",
path);
return 200 == status;
}
private:
static const unsigned HTTP_PORT = 80;
const char* HTTP_EOL() const { return "\r\n"; }
std::string hostname;
PLT::Socket socket;
};
} // namespace PLT
#endif
|
{
"content_hash": "d734d10931e313ccdc87e213575a4411",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 84,
"avg_line_length": 25.24848484848485,
"alnum_prop": 0.4831973115698512,
"repo_name": "AnotherJohnH/Platform",
"id": "21dcc960c7cbb74795b7b5de4eb8e4214bafd2bb",
"size": "5652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "include/STB/Http.h",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "31298"
},
{
"name": "C",
"bytes": "34587"
},
{
"name": "C++",
"bytes": "1169844"
},
{
"name": "CMake",
"bytes": "75291"
},
{
"name": "Makefile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "17644"
},
{
"name": "Shell",
"bytes": "8749"
}
]
}
|
Sushi bar application built with MEAN technology.
Management of a sushi bar.
# Description
System to manage a virtual sushi bar.
Actors: customer(s), cook, waiter, administrator.
Each new customer is associated with a specified id, from a pool of available id (LIFO).
The customer can:
- check the online menu;
- add items to his/her basket;
- place more than one order;
- pay the bill when he/she is going to leave the bar.
The cook is notified of each new order.
When the related susi(s) is prepared the system notifies the waiter who brings the food to the related customer and at the moment the customer is charged.
Administrator can personalize the menu and supervise the work of the waiter/cook.
Fake payment system.
Ideally the system should represent a typical Asian sushi bar where the customers are sitting on a circular desk around the kitchen and they place an order by a tablet which was given them when they take seat.
# Original Requirement (italian)
progetto: gestione di un sushi bar
consegna: 8 settimane divisi in sprint di 2 settimane ciascuno
descrizione: sistema di gestione di un baracchino del sushi virtuale in
cui ogni cliente può consultare il menù online; una volta registrato
può effettuare l'ordine, indicare il tavolo a cui esssere servito e pagare il conto.
Una volta pagato il conto al cuoco compare la comanda da cucinare e servire
al cliente la sua cena/pranzo.
Un amministratore del locale ha a disposizione una dashboard per gestire gli utenti,
personalizzare e comporre il menù con i prezzi delle portate, supervisionare il
lavoro del cuoco.
Piano di consegna.
sprint 1
- registrazione utenti (nominativo, email, password) con conferma via mail
- accesso amministratore da backend
- accesso cuoco da backend
- home page con info del ristorante
sprint 2:
- gestione pietanze e costi da parte dell'amministratore (nome, costo, quantità, descrizione, foto)
- navigazione del menù da parte del cliente e possibilità di compore il carrello con le pietanze di cena/pranzo
sprint 3:
- processo di pagamento fake per utenti registrati e invio della comanda al cuoco
- gestione delle comande da parte del cuoco: presa in carico e completamento (fake)
- avviso al cliente (tramite mail) che la sua cena è pronta
sprint 4:
- gestione utenti da parte dell'aministratore
- cambio password per password dimenticata
|
{
"content_hash": "cd76c800f0bb14ac292fc681534b7731",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 209,
"avg_line_length": 39.31666666666667,
"alnum_prop": 0.7927087749046206,
"repo_name": "egch/sushi-bar-mean",
"id": "541f07981871342dd936a5fb4c8ebe7ef0ca3628",
"size": "2384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "README.md",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "2633"
}
]
}
|
package com.brainfotech.school2go.entity;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.NamedQuery;
import javax.persistence.Table;
import javax.persistence.Temporal;
import javax.persistence.TemporalType;
/**
* The persistent class for the volunteer_types database table.
*
*/
@Entity
@Table(name = "volunteer_types")
@NamedQuery(name = "VolunteerType.findAll", query = "SELECT v FROM VolunteerType v")
public class VolunteerType extends AbstractIdEntity {
private static final long serialVersionUID = 1L;
@Temporal(TemporalType.DATE)
private Date date;
private String description;
@Column(name = "school_id")
private int schoolId;
private String type;
public VolunteerType() {
}
public Date getDate() {
return this.date;
}
public void setDate(Date date) {
this.date = date;
}
public String getDescription() {
return this.description;
}
public void setDescription(String description) {
this.description = description;
}
public int getSchoolId() {
return this.schoolId;
}
public void setSchoolId(int schoolId) {
this.schoolId = schoolId;
}
public String getType() {
return this.type;
}
public void setType(String type) {
this.type = type;
}
}
|
{
"content_hash": "1126d442d15945727ba2b5f2d8accbcf",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 84,
"avg_line_length": 19.970149253731343,
"alnum_prop": 0.7047832585949177,
"repo_name": "brainfotech/school2go",
"id": "1c9761015019fcc22181c8d0176fe93758629a5e",
"size": "1338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/java/com/brainfotech/school2go/entity/VolunteerType.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "914821"
},
{
"name": "CoffeeScript",
"bytes": "4704"
},
{
"name": "Erlang",
"bytes": "4855"
},
{
"name": "Go",
"bytes": "6808"
},
{
"name": "Java",
"bytes": "87241"
},
{
"name": "JavaScript",
"bytes": "4163742"
},
{
"name": "PHP",
"bytes": "93122"
},
{
"name": "Perl",
"bytes": "580035"
},
{
"name": "Python",
"bytes": "5596"
},
{
"name": "Shell",
"bytes": "63585"
}
]
}
|
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/gpu/vt_video_encode_accelerator_mac.h"
#include <memory>
#include "base/threading/thread_task_runner_handle.h"
#include "media/base/mac/video_frame_mac.h"
#include "third_party/webrtc/system_wrappers/include/clock.h"
namespace media {
namespace {
// TODO(emircan): Check if we can find the actual system capabilities via
// creating VTCompressionSessions with varying requirements.
// See crbug.com/584784.
const size_t kBitsPerByte = 8;
const size_t kDefaultResolutionWidth = 640;
const size_t kDefaultResolutionHeight = 480;
const size_t kMaxFrameRateNumerator = 30;
const size_t kMaxFrameRateDenominator = 1;
const size_t kMaxResolutionWidth = 4096;
const size_t kMaxResolutionHeight = 2160;
const size_t kNumInputBuffers = 3;
} // namespace
struct VTVideoEncodeAccelerator::InProgressFrameEncode {
InProgressFrameEncode(base::TimeDelta rtp_timestamp, base::TimeTicks ref_time)
: timestamp(rtp_timestamp), reference_time(ref_time) {}
const base::TimeDelta timestamp;
const base::TimeTicks reference_time;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(InProgressFrameEncode);
};
struct VTVideoEncodeAccelerator::EncodeOutput {
EncodeOutput(VTEncodeInfoFlags info_flags,
CMSampleBufferRef sbuf,
base::TimeDelta timestamp)
: info(info_flags),
sample_buffer(sbuf, base::scoped_policy::RETAIN),
capture_timestamp(timestamp) {}
const VTEncodeInfoFlags info;
const base::ScopedCFTypeRef<CMSampleBufferRef> sample_buffer;
const base::TimeDelta capture_timestamp;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(EncodeOutput);
};
struct VTVideoEncodeAccelerator::BitstreamBufferRef {
BitstreamBufferRef(int32_t id,
std::unique_ptr<base::SharedMemory> shm,
size_t size)
: id(id), shm(std::move(shm)), size(size) {}
const int32_t id;
const std::unique_ptr<base::SharedMemory> shm;
const size_t size;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(BitstreamBufferRef);
};
// .5 is set as a minimum to prevent overcompensating for large temporary
// overshoots. We don't want to degrade video quality too badly.
// .95 is set to prevent oscillations. When a lower bitrate is set on the
// encoder than previously set, its output seems to have a brief period of
// drastically reduced bitrate, so we want to avoid that. In steady state
// conditions, 0.95 seems to give us better overall bitrate over long periods
// of time.
VTVideoEncodeAccelerator::VTVideoEncodeAccelerator()
: target_bitrate_(0),
bitrate_adjuster_(webrtc::Clock::GetRealTimeClock(), .5, .95),
client_task_runner_(base::ThreadTaskRunnerHandle::Get()),
encoder_thread_("VTEncoderThread"),
encoder_task_weak_factory_(this) {
encoder_weak_ptr_ = encoder_task_weak_factory_.GetWeakPtr();
}
VTVideoEncodeAccelerator::~VTVideoEncodeAccelerator() {
DVLOG(3) << __func__;
DCHECK(thread_checker_.CalledOnValidThread());
Destroy();
DCHECK(!encoder_thread_.IsRunning());
DCHECK(!encoder_task_weak_factory_.HasWeakPtrs());
}
VideoEncodeAccelerator::SupportedProfiles
VTVideoEncodeAccelerator::GetSupportedProfiles() {
DVLOG(3) << __func__;
DCHECK(thread_checker_.CalledOnValidThread());
SupportedProfiles profiles;
const bool rv = CreateCompressionSession(
video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0),
gfx::Size(kDefaultResolutionWidth, kDefaultResolutionHeight), true);
DestroyCompressionSession();
if (!rv) {
VLOG(1)
<< "Hardware encode acceleration is not available on this platform.";
return profiles;
}
SupportedProfile profile;
profile.profile = H264PROFILE_BASELINE;
profile.max_framerate_numerator = kMaxFrameRateNumerator;
profile.max_framerate_denominator = kMaxFrameRateDenominator;
profile.max_resolution = gfx::Size(kMaxResolutionWidth, kMaxResolutionHeight);
profiles.push_back(profile);
return profiles;
}
bool VTVideoEncodeAccelerator::Initialize(VideoPixelFormat format,
const gfx::Size& input_visible_size,
VideoCodecProfile output_profile,
uint32_t initial_bitrate,
Client* client) {
DVLOG(3) << __func__ << ": input_format=" << VideoPixelFormatToString(format)
<< ", input_visible_size=" << input_visible_size.ToString()
<< ", output_profile=" << GetProfileName(output_profile)
<< ", initial_bitrate=" << initial_bitrate;
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(client);
if (PIXEL_FORMAT_I420 != format) {
DLOG(ERROR) << "Input format not supported= "
<< VideoPixelFormatToString(format);
return false;
}
if (H264PROFILE_BASELINE != output_profile) {
DLOG(ERROR) << "Output profile not supported= "
<< GetProfileName(output_profile);
return false;
}
client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client));
client_ = client_ptr_factory_->GetWeakPtr();
input_visible_size_ = input_visible_size;
frame_rate_ = kMaxFrameRateNumerator / kMaxFrameRateDenominator;
initial_bitrate_ = initial_bitrate;
bitstream_buffer_size_ = input_visible_size.GetArea();
if (!encoder_thread_.Start()) {
DLOG(ERROR) << "Failed spawning encoder thread.";
return false;
}
encoder_thread_task_runner_ = encoder_thread_.task_runner();
if (!ResetCompressionSession()) {
DLOG(ERROR) << "Failed creating compression session.";
return false;
}
client_task_runner_->PostTask(
FROM_HERE,
base::Bind(&Client::RequireBitstreamBuffers, client_, kNumInputBuffers,
input_visible_size_, bitstream_buffer_size_));
return true;
}
void VTVideoEncodeAccelerator::Encode(const scoped_refptr<VideoFrame>& frame,
bool force_keyframe) {
DVLOG(3) << __func__;
DCHECK(thread_checker_.CalledOnValidThread());
encoder_thread_task_runner_->PostTask(
FROM_HERE, base::Bind(&VTVideoEncodeAccelerator::EncodeTask,
base::Unretained(this), frame, force_keyframe));
}
void VTVideoEncodeAccelerator::UseOutputBitstreamBuffer(
const BitstreamBuffer& buffer) {
DVLOG(3) << __func__ << ": buffer size=" << buffer.size();
DCHECK(thread_checker_.CalledOnValidThread());
if (buffer.size() < bitstream_buffer_size_) {
DLOG(ERROR) << "Output BitstreamBuffer isn't big enough: " << buffer.size()
<< " vs. " << bitstream_buffer_size_;
client_->NotifyError(kInvalidArgumentError);
return;
}
std::unique_ptr<base::SharedMemory> shm(
new base::SharedMemory(buffer.handle(), false));
if (!shm->Map(buffer.size())) {
DLOG(ERROR) << "Failed mapping shared memory.";
client_->NotifyError(kPlatformFailureError);
return;
}
std::unique_ptr<BitstreamBufferRef> buffer_ref(
new BitstreamBufferRef(buffer.id(), std::move(shm), buffer.size()));
encoder_thread_task_runner_->PostTask(
FROM_HERE,
base::Bind(&VTVideoEncodeAccelerator::UseOutputBitstreamBufferTask,
base::Unretained(this), base::Passed(&buffer_ref)));
}
void VTVideoEncodeAccelerator::RequestEncodingParametersChange(
uint32_t bitrate,
uint32_t framerate) {
DVLOG(3) << __func__ << ": bitrate=" << bitrate
<< ": framerate=" << framerate;
DCHECK(thread_checker_.CalledOnValidThread());
encoder_thread_task_runner_->PostTask(
FROM_HERE,
base::Bind(&VTVideoEncodeAccelerator::RequestEncodingParametersChangeTask,
base::Unretained(this), bitrate, framerate));
}
void VTVideoEncodeAccelerator::Destroy() {
DVLOG(3) << __func__;
DCHECK(thread_checker_.CalledOnValidThread());
// Cancel all callbacks.
client_ptr_factory_.reset();
if (encoder_thread_.IsRunning()) {
encoder_thread_task_runner_->PostTask(
FROM_HERE, base::Bind(&VTVideoEncodeAccelerator::DestroyTask,
base::Unretained(this)));
encoder_thread_.Stop();
} else {
DestroyTask();
}
}
void VTVideoEncodeAccelerator::EncodeTask(
const scoped_refptr<VideoFrame>& frame,
bool force_keyframe) {
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
DCHECK(compression_session_);
DCHECK(frame);
// TODO(emircan): See if we can eliminate a copy here by using
// CVPixelBufferPool for the allocation of incoming VideoFrames.
base::ScopedCFTypeRef<CVPixelBufferRef> pixel_buffer =
WrapVideoFrameInCVPixelBuffer(*frame);
base::ScopedCFTypeRef<CFDictionaryRef> frame_props =
video_toolbox::DictionaryWithKeyValue(
kVTEncodeFrameOptionKey_ForceKeyFrame,
force_keyframe ? kCFBooleanTrue : kCFBooleanFalse);
base::TimeTicks ref_time;
if (!frame->metadata()->GetTimeTicks(VideoFrameMetadata::REFERENCE_TIME,
&ref_time)) {
ref_time = base::TimeTicks::Now();
}
auto timestamp_cm =
CMTimeMake(frame->timestamp().InMicroseconds(), USEC_PER_SEC);
// Wrap information we'll need after the frame is encoded in a heap object.
// We'll get the pointer back from the VideoToolbox completion callback.
std::unique_ptr<InProgressFrameEncode> request(
new InProgressFrameEncode(frame->timestamp(), ref_time));
// Update the bitrate if needed.
SetAdjustedBitrate(bitrate_adjuster_.GetAdjustedBitrateBps());
// We can pass the ownership of |request| to the encode callback if
// successful. Otherwise let it fall out of scope.
OSStatus status = VTCompressionSessionEncodeFrame(
compression_session_, pixel_buffer, timestamp_cm, CMTime{0, 0, 0, 0},
frame_props, reinterpret_cast<void*>(request.get()), nullptr);
if (status != noErr) {
DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status;
NotifyError(kPlatformFailureError);
} else {
CHECK(request.release());
}
}
void VTVideoEncodeAccelerator::UseOutputBitstreamBufferTask(
std::unique_ptr<BitstreamBufferRef> buffer_ref) {
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
// If there is already EncodeOutput waiting, copy its output first.
if (!encoder_output_queue_.empty()) {
std::unique_ptr<VTVideoEncodeAccelerator::EncodeOutput> encode_output =
std::move(encoder_output_queue_.front());
encoder_output_queue_.pop_front();
ReturnBitstreamBuffer(std::move(encode_output), std::move(buffer_ref));
return;
}
bitstream_buffer_queue_.push_back(std::move(buffer_ref));
}
void VTVideoEncodeAccelerator::RequestEncodingParametersChangeTask(
uint32_t bitrate,
uint32_t framerate) {
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
if (!compression_session_) {
NotifyError(kPlatformFailureError);
return;
}
if (framerate != static_cast<uint32_t>(frame_rate_)) {
video_toolbox::SessionPropertySetter session_property_setter(
compression_session_);
session_property_setter.Set(kVTCompressionPropertyKey_ExpectedFrameRate,
frame_rate_);
}
if (bitrate != static_cast<uint32_t>(target_bitrate_) && bitrate > 0) {
target_bitrate_ = bitrate;
bitrate_adjuster_.SetTargetBitrateBps(target_bitrate_);
SetAdjustedBitrate(bitrate_adjuster_.GetAdjustedBitrateBps());
}
}
void VTVideoEncodeAccelerator::SetAdjustedBitrate(int32_t bitrate) {
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
if (bitrate == encoder_set_bitrate_)
return;
encoder_set_bitrate_ = bitrate;
video_toolbox::SessionPropertySetter session_property_setter(
compression_session_);
bool rv = session_property_setter.Set(
kVTCompressionPropertyKey_AverageBitRate, encoder_set_bitrate_);
rv &= session_property_setter.Set(
kVTCompressionPropertyKey_DataRateLimits,
video_toolbox::ArrayWithIntegerAndFloat(
encoder_set_bitrate_ / kBitsPerByte, 1.0f));
DLOG_IF(ERROR, !rv)
<< "Couldn't change bitrate parameters of encode session.";
}
void VTVideoEncodeAccelerator::DestroyTask() {
DCHECK(thread_checker_.CalledOnValidThread() ||
(encoder_thread_.IsRunning() &&
encoder_thread_task_runner_->BelongsToCurrentThread()));
// Cancel all encoder thread callbacks.
encoder_task_weak_factory_.InvalidateWeakPtrs();
// This call blocks until all pending frames are flushed out.
DestroyCompressionSession();
}
void VTVideoEncodeAccelerator::NotifyError(
VideoEncodeAccelerator::Error error) {
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
client_task_runner_->PostTask(
FROM_HERE, base::Bind(&Client::NotifyError, client_, error));
}
// static
void VTVideoEncodeAccelerator::CompressionCallback(void* encoder_opaque,
void* request_opaque,
OSStatus status,
VTEncodeInfoFlags info,
CMSampleBufferRef sbuf) {
// This function may be called asynchronously, on a different thread from the
// one that calls VTCompressionSessionEncodeFrame.
DVLOG(3) << __func__;
auto* encoder = reinterpret_cast<VTVideoEncodeAccelerator*>(encoder_opaque);
DCHECK(encoder);
// InProgressFrameEncode holds timestamp information of the encoded frame.
std::unique_ptr<InProgressFrameEncode> frame_info(
reinterpret_cast<InProgressFrameEncode*>(request_opaque));
// EncodeOutput holds onto CMSampleBufferRef when posting task between
// threads.
std::unique_ptr<EncodeOutput> encode_output(
new EncodeOutput(info, sbuf, frame_info->timestamp));
// This method is NOT called on |encoder_thread_|, so we still need to
// post a task back to it to do work.
encoder->encoder_thread_task_runner_->PostTask(
FROM_HERE, base::Bind(&VTVideoEncodeAccelerator::CompressionCallbackTask,
encoder->encoder_weak_ptr_, status,
base::Passed(&encode_output)));
}
void VTVideoEncodeAccelerator::CompressionCallbackTask(
OSStatus status,
std::unique_ptr<EncodeOutput> encode_output) {
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
if (status != noErr) {
DLOG(ERROR) << " encode failed: " << status;
NotifyError(kPlatformFailureError);
return;
}
// If there isn't any BitstreamBuffer to copy into, add it to a queue for
// later use.
if (bitstream_buffer_queue_.empty()) {
encoder_output_queue_.push_back(std::move(encode_output));
return;
}
std::unique_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref =
std::move(bitstream_buffer_queue_.front());
bitstream_buffer_queue_.pop_front();
ReturnBitstreamBuffer(std::move(encode_output), std::move(buffer_ref));
}
void VTVideoEncodeAccelerator::ReturnBitstreamBuffer(
std::unique_ptr<EncodeOutput> encode_output,
std::unique_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref) {
DVLOG(3) << __func__;
DCHECK(encoder_thread_task_runner_->BelongsToCurrentThread());
if (encode_output->info & kVTEncodeInfo_FrameDropped) {
DVLOG(2) << " frame dropped";
client_task_runner_->PostTask(
FROM_HERE,
base::Bind(&Client::BitstreamBufferReady, client_, buffer_ref->id, 0,
false, encode_output->capture_timestamp));
return;
}
auto* sample_attachments = static_cast<CFDictionaryRef>(
CFArrayGetValueAtIndex(CMSampleBufferGetSampleAttachmentsArray(
encode_output->sample_buffer.get(), true),
0));
const bool keyframe = !CFDictionaryContainsKey(
sample_attachments, kCMSampleAttachmentKey_NotSync);
size_t used_buffer_size = 0;
const bool copy_rv = video_toolbox::CopySampleBufferToAnnexBBuffer(
encode_output->sample_buffer.get(), keyframe, buffer_ref->size,
reinterpret_cast<char*>(buffer_ref->shm->memory()), &used_buffer_size);
if (!copy_rv) {
DLOG(ERROR) << "Cannot copy output from SampleBuffer to AnnexBBuffer.";
used_buffer_size = 0;
}
bitrate_adjuster_.Update(used_buffer_size);
client_task_runner_->PostTask(
FROM_HERE,
base::Bind(&Client::BitstreamBufferReady, client_, buffer_ref->id,
used_buffer_size, keyframe, encode_output->capture_timestamp));
}
bool VTVideoEncodeAccelerator::ResetCompressionSession() {
DCHECK(thread_checker_.CalledOnValidThread());
DestroyCompressionSession();
CFTypeRef attributes_keys[] = {kCVPixelBufferOpenGLCompatibilityKey,
kCVPixelBufferIOSurfacePropertiesKey,
kCVPixelBufferPixelFormatTypeKey};
const int format[] = {kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange};
CFTypeRef attributes_values[] = {
kCFBooleanTrue,
video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0).release(),
video_toolbox::ArrayWithIntegers(format, arraysize(format)).release()};
const base::ScopedCFTypeRef<CFDictionaryRef> attributes =
video_toolbox::DictionaryWithKeysAndValues(
attributes_keys, attributes_values, arraysize(attributes_keys));
for (auto* v : attributes_values)
CFRelease(v);
bool session_rv =
CreateCompressionSession(attributes, input_visible_size_, false);
if (!session_rv) {
DestroyCompressionSession();
return false;
}
const bool configure_rv = ConfigureCompressionSession();
if (configure_rv)
RequestEncodingParametersChange(initial_bitrate_, frame_rate_);
return configure_rv;
}
bool VTVideoEncodeAccelerator::CreateCompressionSession(
base::ScopedCFTypeRef<CFDictionaryRef> attributes,
const gfx::Size& input_size,
bool require_hw_encoding) {
DCHECK(thread_checker_.CalledOnValidThread());
std::vector<CFTypeRef> encoder_keys;
std::vector<CFTypeRef> encoder_values;
if (require_hw_encoding) {
encoder_keys.push_back(
kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder);
encoder_values.push_back(kCFBooleanTrue);
} else {
encoder_keys.push_back(
kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder);
encoder_values.push_back(kCFBooleanTrue);
}
base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec =
video_toolbox::DictionaryWithKeysAndValues(
encoder_keys.data(), encoder_values.data(), encoder_keys.size());
// Create the compression session.
// Note that the encoder object is given to the compression session as the
// callback context using a raw pointer. The C API does not allow us to use a
// smart pointer, nor is this encoder ref counted. However, this is still
// safe, because we 1) we own the compression session and 2) we tear it down
// safely. When destructing the encoder, the compression session is flushed
// and invalidated. Internally, VideoToolbox will join all of its threads
// before returning to the client. Therefore, when control returns to us, we
// are guaranteed that the output callback will not execute again.
OSStatus status = VTCompressionSessionCreate(
kCFAllocatorDefault, input_size.width(), input_size.height(),
kCMVideoCodecType_H264, encoder_spec, attributes,
nullptr /* compressedDataAllocator */,
&VTVideoEncodeAccelerator::CompressionCallback,
reinterpret_cast<void*>(this), compression_session_.InitializeInto());
if (status != noErr) {
DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status;
return false;
}
DVLOG(3) << " VTCompressionSession created with HW encode: "
<< require_hw_encoding << ", input size=" << input_size.ToString();
return true;
}
bool VTVideoEncodeAccelerator::ConfigureCompressionSession() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(compression_session_);
video_toolbox::SessionPropertySetter session_property_setter(
compression_session_);
bool rv = true;
rv &= session_property_setter.Set(kVTCompressionPropertyKey_ProfileLevel,
kVTProfileLevel_H264_Baseline_AutoLevel);
rv &= session_property_setter.Set(kVTCompressionPropertyKey_RealTime, true);
rv &= session_property_setter.Set(
kVTCompressionPropertyKey_AllowFrameReordering, false);
// Limit keyframe output to 4 minutes, see crbug.com/658429.
rv &= session_property_setter.Set(
kVTCompressionPropertyKey_MaxKeyFrameInterval, 7200);
rv &= session_property_setter.Set(
kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, 240);
DLOG_IF(ERROR, !rv) << " Setting session property failed.";
return rv;
}
void VTVideoEncodeAccelerator::DestroyCompressionSession() {
DCHECK(thread_checker_.CalledOnValidThread() ||
(encoder_thread_.IsRunning() &&
encoder_thread_task_runner_->BelongsToCurrentThread()));
if (compression_session_) {
VTCompressionSessionInvalidate(compression_session_);
compression_session_.reset();
}
}
} // namespace media
|
{
"content_hash": "68a4655a5e3856e4d7a84919f4fd6c4f",
"timestamp": "",
"source": "github",
"line_count": 559,
"max_line_length": 80,
"avg_line_length": 38.04293381037567,
"alnum_prop": 0.696651932662466,
"repo_name": "google-ar/WebARonARCore",
"id": "f5aff7e4c381930a28278b924844c62e7c518bc6",
"size": "21266",
"binary": false,
"copies": "1",
"ref": "refs/heads/webarcore_57.0.2987.5",
"path": "media/gpu/vt_video_encode_accelerator_mac.cc",
"mode": "33188",
"license": "apache-2.0",
"language": []
}
|
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2015 Red Hat, Inc. and/or its affiliates
and other contributors as indicated by the @author tags.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="true">
<appender name="console" class="org.apache.log4j.ConsoleAppender">
<param name="Target" value="System.out"/>
<param name="Threshold" value="DEBUG" />
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="%-5p %d{dd-MM HH:mm:ss,SSS} (%F:%M:%L) -%m%n" />
</layout>
</appender>
<root>
<priority value ="DEBUG" />
<appender-ref ref="console" />
</root>
</log4j:configuration>
|
{
"content_hash": "bf8147d55170659ac5d5bb68e7edf57f",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 94,
"avg_line_length": 35.270270270270274,
"alnum_prop": 0.6942528735632184,
"repo_name": "jshaughn/hawkular-alerts",
"id": "fb7a6550fa2f5c3e330d7d6fdcb5ae0bd4976d91",
"size": "1305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hawkular-alerts-engine/src/test/resources/log4j.xml",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "20559"
},
{
"name": "Java",
"bytes": "454088"
},
{
"name": "Shell",
"bytes": "1474"
},
{
"name": "XSLT",
"bytes": "20344"
}
]
}
|
package com.particles.android.util;
import static android.opengl.GLES20.GL_LINEAR;
import static android.opengl.GLES20.GL_LINEAR_MIPMAP_LINEAR;
import static android.opengl.GLES20.GL_TEXTURE_2D;
import static android.opengl.GLES20.GL_TEXTURE_MAG_FILTER;
import static android.opengl.GLES20.GL_TEXTURE_MIN_FILTER;
import static android.opengl.GLES20.glBindTexture;
import static android.opengl.GLES20.glDeleteTextures;
import static android.opengl.GLES20.glGenTextures;
import static android.opengl.GLES20.glGenerateMipmap;
import static android.opengl.GLES20.glTexParameteri;
import static android.opengl.GLUtils.texImage2D;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.util.Log;
public class TextureHelper {
private static final String TAG = "TextureHelper";
/**
* Loads a texture from a resource ID, returning the OpenGL ID for that
* texture. Returns 0 if the load failed.
*
* @param context
* @param resourceId
* @return
*/
public static int loadTexture(Context context, int resourceId) {
final int[] textureObjectIds = new int[1];
glGenTextures(1, textureObjectIds, 0);
if (textureObjectIds[0] == 0) {
if (LoggerConfig.ON) {
Log.w(TAG, "Could not generate a new OpenGL texture object.");
}
return 0;
}
final BitmapFactory.Options options = new BitmapFactory.Options();
options.inScaled = false;
// Read in the resource
final Bitmap bitmap = BitmapFactory.decodeResource(
context.getResources(), resourceId, options);
if (bitmap == null) {
if (LoggerConfig.ON) {
Log.w(TAG, "Resource ID " + resourceId
+ " could not be decoded.");
}
glDeleteTextures(1, textureObjectIds, 0);
return 0;
}
// Bind to the texture in OpenGL
glBindTexture(GL_TEXTURE_2D, textureObjectIds[0]);
// Set filtering: a default must be set, or the texture will be
// black.
glTexParameteri(GL_TEXTURE_2D,
GL_TEXTURE_MIN_FILTER,
GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D,
GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// Load the bitmap into the bound texture.
texImage2D(GL_TEXTURE_2D, 0, bitmap, 0);
// Note: Following code may cause an error to be reported in the
// ADB log as follows: E/IMGSRV(20095): :0: HardwareMipGen:
// Failed to generate texture mipmap levels (error=3)
// No OpenGL error will be encountered (glGetError() will return
// 0). If this happens, just squash the source image to be
// square. It will look the same because of texture coordinates,
// and mipmap generation will work.
glGenerateMipmap(GL_TEXTURE_2D);
// Recycle the bitmap, since its data has been loaded into
// OpenGL.
bitmap.recycle();
// Unbind from the texture.
glBindTexture(GL_TEXTURE_2D, 0);
return textureObjectIds[0];
}
}
|
{
"content_hash": "5f0d2e056e9d1e48b0b9f22c2caa2877",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 78,
"avg_line_length": 34.82608695652174,
"alnum_prop": 0.6438826466916354,
"repo_name": "sunzhiyuan/opengles2forandroid",
"id": "0175d1040d14a852a28ee88ca356d26041f7c0c3",
"size": "3605",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code-of-bookopengles2forandroid/code/Particles/src/com/particles/android/util/TextureHelper.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "33319"
},
{
"name": "Java",
"bytes": "964859"
}
]
}
|
End of preview.
No dataset card yet
- Downloads last month
- 5