file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
sr_mulher.py | # !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = 'CVtek dev'
__credits__ = []
__version__ = "1.0"
__maintainer__ = "CVTek dev"
__status__ = "Development"
__model_name__ = 'sr_mulher.SRMulher'
import auth, base_models
from orm import *
from form import *
class SRMulher(Model, View):
| def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'sr_mulher'
self.__title__ ='Inscrição e Identificação da Mulher'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
self.__get_options__ = ['nome'] # define tambem o campo a ser mostrado no m2m, independentemente da descricao no field do m2m
self.__order_by__ = 'sr_mulher.nome'
#choice field com a estrutura de saude
self.numero_inscricao = integer_field(view_order = 1, name = 'Nº de Inscrição', size = 40)
self.nome = string_field(view_order = 2, name = 'Nome Completo', size = 70, onlist = True)
self.data_nascimento = date_field(view_order = 3, name = 'Data Nascimento', size=40, args = 'required', onlist = True)
self.escolaridade = combo_field(view_order = 4, name = 'Escolaridade', size = 40, default = '', options = [('analfabeta','Analfabeta'), ('primaria','Primária'), ('secundaria','Secundária'), ('mais','Mais')], onlist = True)
self.telefone = string_field(view_order = 5, name = 'Telefone', size = 40, onlist = True)
self.endereco_familia = text_field(view_order=6, name='Endereço Familia', size=70, args="rows=30", onlist=False, search=False)
self.endereco_actual = text_field(view_order=7, name='Endereço Fixo Actual', size=70, args="rows=30", onlist=False, search=False)
self.observacoes = text_field(view_order=8, name='Observações', size=80, args="rows=30", onlist=False, search=False)
self.estado = combo_field(view_order = 9, name = 'Estado', size = 40, default = 'active', options = [('active','Activo'), ('canceled','Cancelado')], onlist = True)
| identifier_body | |
sr_mulher.py | # !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = 'CVtek dev'
__credits__ = [] | __status__ = "Development"
__model_name__ = 'sr_mulher.SRMulher'
import auth, base_models
from orm import *
from form import *
class SRMulher(Model, View):
def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'sr_mulher'
self.__title__ ='Inscrição e Identificação da Mulher'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
self.__get_options__ = ['nome'] # define tambem o campo a ser mostrado no m2m, independentemente da descricao no field do m2m
self.__order_by__ = 'sr_mulher.nome'
#choice field com a estrutura de saude
self.numero_inscricao = integer_field(view_order = 1, name = 'Nº de Inscrição', size = 40)
self.nome = string_field(view_order = 2, name = 'Nome Completo', size = 70, onlist = True)
self.data_nascimento = date_field(view_order = 3, name = 'Data Nascimento', size=40, args = 'required', onlist = True)
self.escolaridade = combo_field(view_order = 4, name = 'Escolaridade', size = 40, default = '', options = [('analfabeta','Analfabeta'), ('primaria','Primária'), ('secundaria','Secundária'), ('mais','Mais')], onlist = True)
self.telefone = string_field(view_order = 5, name = 'Telefone', size = 40, onlist = True)
self.endereco_familia = text_field(view_order=6, name='Endereço Familia', size=70, args="rows=30", onlist=False, search=False)
self.endereco_actual = text_field(view_order=7, name='Endereço Fixo Actual', size=70, args="rows=30", onlist=False, search=False)
self.observacoes = text_field(view_order=8, name='Observações', size=80, args="rows=30", onlist=False, search=False)
self.estado = combo_field(view_order = 9, name = 'Estado', size = 40, default = 'active', options = [('active','Activo'), ('canceled','Cancelado')], onlist = True) | __version__ = "1.0"
__maintainer__ = "CVTek dev" | random_line_split |
crypto.ts | import crypto from 'crypto';
import { Faker } from '../faker';
export class Crypto {
private CHARACTERS: string = '0123456789abcdefghijklmnopqrstuvwxyz';
private readonly faker: Faker;
constructor(faker: Faker) {
this.faker = faker;
}
public md5(): string {
const hash = crypto.createHash('md5');
const array = this.CHARACTERS.split('');
const characters = this.faker.Random.assortment(array, 255).join('');
hash.update(characters);
return hash.digest('hex');
}
public sha1(): string {
const hash = crypto.createHash('sha1');
const array = this.CHARACTERS.split('');
const characters = this.faker.Random.assortment(array, 255).join('');
hash.update(characters);
return hash.digest('hex');
}
public | (): string {
const hash = crypto.createHash('sha256');
const array = this.CHARACTERS.split('');
const characters = this.faker.Random.assortment(array, 255).join('');
hash.update(characters);
return hash.digest('hex');
}
}
| sha256 | identifier_name |
crypto.ts | import crypto from 'crypto';
import { Faker } from '../faker';
export class Crypto {
private CHARACTERS: string = '0123456789abcdefghijklmnopqrstuvwxyz';
private readonly faker: Faker;
constructor(faker: Faker) {
this.faker = faker;
}
public md5(): string {
const hash = crypto.createHash('md5');
const array = this.CHARACTERS.split('');
const characters = this.faker.Random.assortment(array, 255).join('');
hash.update(characters);
return hash.digest('hex');
}
public sha1(): string {
const hash = crypto.createHash('sha1');
const array = this.CHARACTERS.split('');
const characters = this.faker.Random.assortment(array, 255).join('');
hash.update(characters);
return hash.digest('hex');
}
public sha256(): string |
}
| {
const hash = crypto.createHash('sha256');
const array = this.CHARACTERS.split('');
const characters = this.faker.Random.assortment(array, 255).join('');
hash.update(characters);
return hash.digest('hex');
} | identifier_body |
crypto.ts | import crypto from 'crypto';
import { Faker } from '../faker';
export class Crypto {
private CHARACTERS: string = '0123456789abcdefghijklmnopqrstuvwxyz';
| constructor(faker: Faker) {
this.faker = faker;
}
public md5(): string {
const hash = crypto.createHash('md5');
const array = this.CHARACTERS.split('');
const characters = this.faker.Random.assortment(array, 255).join('');
hash.update(characters);
return hash.digest('hex');
}
public sha1(): string {
const hash = crypto.createHash('sha1');
const array = this.CHARACTERS.split('');
const characters = this.faker.Random.assortment(array, 255).join('');
hash.update(characters);
return hash.digest('hex');
}
public sha256(): string {
const hash = crypto.createHash('sha256');
const array = this.CHARACTERS.split('');
const characters = this.faker.Random.assortment(array, 255).join('');
hash.update(characters);
return hash.digest('hex');
}
} | private readonly faker: Faker;
| random_line_split |
codeBlockSelectLang.element.tsx | import { EG, p } from '@web-companions/gfc';
import { render } from 'lit-html2';
import { HLJS_LANGUAGES } from './utils';
export const codeBlockSelectLangElement = EG({
props: {
domCodeEl: p.req<HTMLElement>(),
selectedLanguage: p.req<string>(),
onChange: p.req<(language: string | null) => void>(),
},
})(function* (params) {
//
const updateAttributes = (event: Event) => {
const target = event.target as HTMLSelectElement;
params.onChange(target.value === 'null' ? null : target.value);
};
while (true) {
yield render(
<>
<pre>{params.domCodeEl}</pre>
<select class='hljs-codeblock__select' contentEditable={false} value={params.selectedLanguage} onchange={updateAttributes}>
<option value="null" selected={params.selectedLanguage == null}>
auto
</option>
<option disabled={true}>—</option>
{HLJS_LANGUAGES.map((lang) => (
<option key={lang} value={lang} selected={params.selectedLanguage === lang}>
{lang}
</option>
))}
</select>
</>,
this
); | }
}); | random_line_split | |
codeBlockSelectLang.element.tsx | import { EG, p } from '@web-companions/gfc';
import { render } from 'lit-html2';
import { HLJS_LANGUAGES } from './utils';
export const codeBlockSelectLangElement = EG({
props: {
domCodeEl: p.req<HTMLElement>(),
selectedLanguage: p.req<string>(),
onChange: p.req<(language: string | null) => void>(),
},
})(function* (params) {
//
const updateAttributes = (event: Event) => {
const target = event.target as HTMLSelectElement;
params.onChange(target.value === 'null' ? null : target.value);
};
while (true) | );
| {
yield render(
<>
<pre>{params.domCodeEl}</pre>
<select class='hljs-codeblock__select' contentEditable={false} value={params.selectedLanguage} onchange={updateAttributes}>
<option value="null" selected={params.selectedLanguage == null}>
auto
</option>
<option disabled={true}>—</option>
{HLJS_LANGUAGES.map((lang) => (
<option key={lang} value={lang} selected={params.selectedLanguage === lang}>
{lang}
</option>
))}
</select>
</>,
this
);
}
} | conditional_block |
profile-link-text.component.ts | // Copyright 2014 The Oppia Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Component for creating text links to a user's profile page.
*/
import { Component, Input } from '@angular/core';
import { downgradeComponent } from '@angular/upgrade/static';
import { AppConstants } from 'app.constants';
@Component({
selector: 'profile-link-text',
templateUrl: './profile-link-text.component.html',
styleUrls: []
})
export class ProfileLinkTextComponent {
// These properties are initialized using Angular lifecycle hooks
// and we need to do non-null assertion, for more information see
// https://github.com/oppia/oppia/wiki/Guide-on-defining-types#ts-7-1
@Input() username!: string;
profileUrl = (
'/' + AppConstants.PAGES_REGISTERED_WITH_FRONTEND.PROFILE.ROUTE +
'/' + this.username
);
constructor() {}
isUsernameLinkable(username: string): boolean |
}
angular.module('oppia').directive(
'profileLinkText', downgradeComponent(
{component: ProfileLinkTextComponent}));
| {
return ['admin', 'OppiaMigrationBot'].indexOf(username) === -1;
} | identifier_body |
profile-link-text.component.ts | // Copyright 2014 The Oppia Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Component for creating text links to a user's profile page.
*/
import { Component, Input } from '@angular/core';
import { downgradeComponent } from '@angular/upgrade/static';
import { AppConstants } from 'app.constants';
@Component({
selector: 'profile-link-text',
templateUrl: './profile-link-text.component.html',
styleUrls: []
})
export class ProfileLinkTextComponent {
// These properties are initialized using Angular lifecycle hooks
// and we need to do non-null assertion, for more information see
// https://github.com/oppia/oppia/wiki/Guide-on-defining-types#ts-7-1
@Input() username!: string;
profileUrl = (
'/' + AppConstants.PAGES_REGISTERED_WITH_FRONTEND.PROFILE.ROUTE +
'/' + this.username
);
constructor() {}
| (username: string): boolean {
return ['admin', 'OppiaMigrationBot'].indexOf(username) === -1;
}
}
angular.module('oppia').directive(
'profileLinkText', downgradeComponent(
{component: ProfileLinkTextComponent}));
| isUsernameLinkable | identifier_name |
profile-link-text.component.ts | // Copyright 2014 The Oppia Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/** | */
import { Component, Input } from '@angular/core';
import { downgradeComponent } from '@angular/upgrade/static';
import { AppConstants } from 'app.constants';
@Component({
selector: 'profile-link-text',
templateUrl: './profile-link-text.component.html',
styleUrls: []
})
export class ProfileLinkTextComponent {
// These properties are initialized using Angular lifecycle hooks
// and we need to do non-null assertion, for more information see
// https://github.com/oppia/oppia/wiki/Guide-on-defining-types#ts-7-1
@Input() username!: string;
profileUrl = (
'/' + AppConstants.PAGES_REGISTERED_WITH_FRONTEND.PROFILE.ROUTE +
'/' + this.username
);
constructor() {}
isUsernameLinkable(username: string): boolean {
return ['admin', 'OppiaMigrationBot'].indexOf(username) === -1;
}
}
angular.module('oppia').directive(
'profileLinkText', downgradeComponent(
{component: ProfileLinkTextComponent})); | * @fileoverview Component for creating text links to a user's profile page. | random_line_split |
block_header.rs | use Encode;
use VarInt;
#[derive(Debug, Encode, PartialEq)]
/// 4 version int32_t Block version information (note, this is signed)
/// 32 prev_block char[32] The hash value of the previous block this particular block references
/// 32 merkle_root char[32] The reference to a Merkle tree collection which is a hash of all transactions related to this block
/// 4 timestamp uint32_t A timestamp recording when this block was created (Will overflow in 2106[2])
/// 4 bits uint32_t The calculated difficulty target being used for this block
/// 4 nonce uint32_t The nonce used to generate this block… to allow variations of the header and compute different hashes
/// 1 txn_count var_int Number of transaction entries, this value is always 0
pub struct Bl |
pub version: i32,
pub prev_block: [u8; 32],
pub merkle_root: [u8; 32],
pub timestamp: u32,
pub bits: u32,
pub nonce: u32,
/// txn_count is a var_int on the wire
pub txn_count: VarInt,
}
| ockHeader { | identifier_name |
block_header.rs | use Encode;
use VarInt;
#[derive(Debug, Encode, PartialEq)]
/// 4 version int32_t Block version information (note, this is signed)
/// 32 prev_block char[32] The hash value of the previous block this particular block references
/// 32 merkle_root char[32] The reference to a Merkle tree collection which is a hash of all transactions related to this block
/// 4 timestamp uint32_t A timestamp recording when this block was created (Will overflow in 2106[2])
/// 4 bits uint32_t The calculated difficulty target being used for this block
/// 4 nonce uint32_t The nonce used to generate this block… to allow variations of the header and compute different hashes
/// 1 txn_count var_int Number of transaction entries, this value is always 0
pub struct BlockHeader {
pub version: i32,
pub prev_block: [u8; 32],
pub merkle_root: [u8; 32],
pub timestamp: u32,
pub bits: u32,
pub nonce: u32,
/// txn_count is a var_int on the wire | } | pub txn_count: VarInt, | random_line_split |
__init__.py | #!/usr/bin/python
#-*- coding:utf-8 -*-
__author__ = 'david'
import numpy as np
import nibabel as nib
import resources as rs
# from vispy import app
from plot import Canvas
import matplotlib.pyplot as plt
import gc
np.random.seed()
class Clarity(object):
def __init__(self,token,imgfile=None,pointsfile=None):
if token not in rs.TOKENS:
raise ValueError("Token %s not found."%(token)) | self._pointsfile = pointsfile
self._img = None # img data
self._points = None # [[x],[y],[z],[v]]
self._shape = None # (x,y,z)
self._max = None # max value
def loadImg(self, path=None, info=False):
if path is None:
path = rs.RAW_DATA_PATH
pathname = path+self._token+".img"
img = nib.load(pathname)
if info:
print(img)
self._img = img.get_data()[:,:,:,0]
self._shape = self._img.shape
self._max = np.max(self._img)
print("Image Loaded: %s"%(pathname))
return self
def loadEqImg(self, path=None, info=False):
if path is None:
path = rs.RAW_DATA_PATH
pathname = path+self._token+".nii"
img = nib.load(pathname)
if info:
print(img)
self._img = img.get_data()[:,:,:,0]
self._shape = self._img.shape
self._max = np.max(self._img)
print("Image Loaded: %s"%(pathname))
return self
def getShape(self):
return self._shape
def getMax(self):
return self._max
def discardImg(self):
del self._img
gc.collect()
return self
def getHistogram(self,bins,range,density=True):
if self._img is None:
raise ValueError("Img haven't loaded, please call loadImg() first.")
return np.histogram(self._img.flatten(), bins=bins, range=range, density=density)
def imgToPoints(self, threshold=0.1, sample=0.5, optimize=True):
if not 0 <= threshold < 1:
raise ValueError("Threshold should be within [0,1).")
if not 0 < sample <= 1:
raise ValueError("Sample rate should be within (0,1].")
if self._img is None:
raise ValueError("Img haven't loaded, please call loadImg() first.")
total = self._shape[0]*self._shape[1]*self._shape[2]
print("Coverting to points...\ntoken=%s\ntotal=%d\nmax=%f\nthreshold=%f\nsample=%f"\
%(self._token,total,self._max,threshold,sample))
print("(This will take couple minutes)")
# threshold
filt = self._img > threshold * self._max
x, y, z = np.where(filt)
v = self._img[filt]
if optimize:
self.discardImg()
v = np.int16(255*(np.float32(v)/np.float32(self._max)))
l = v.shape
print("Above threshold=%d"%(l))
# sample
if sample < 1.0:
filt = np.random.random(size=l) < sample
x = x[filt]
y = y[filt]
z = z[filt]
v = v[filt]
self._points = np.vstack([x,y,z,v])
self._points = np.transpose(self._points)
print("Samples=%d"%(self._points.shape[0]))
print("Finished")
return self
def loadPoints(self,path=None):
if path is None:
path = rs.POINTS_DATA_PATH
pathname = path+self._token+".csv"
self._points = np.loadtxt(pathname,dtype=np.int16,delimiter=',')
print("Points Loaded: %s"%(pathname))
return self
def savePoints(self,path=None):
if self._points is None:
raise ValueError("Points is empty, please call imgToPoints() first.")
if path is None:
path = rs.POINTS_DATA_PATH
pathname = path+self._token+".csv"
np.savetxt(pathname,self._points,fmt='%d',delimiter=',')
return self
def centralize(self):
# Centralize the data
# use mean or median
centerX = np.mean(self._points[:,0])
centerY = np.mean(self._points[:,1])
centerZ = np.mean(self._points[:,2])
self._points[:,0] -= np.int16(centerX)
self._points[:,1] -= np.int16(centerY)
self._points[:,2] -= np.int16(centerZ)
return self
def histogramEqualize(self,scale=30):
# get image histogram
imhist, bins = np.histogram(self._points[:,3],256,density=True)
cdf = imhist.cumsum() # cumulative distribution function
cdf = scale * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
return np.interp(self._points[:,3],bins[:-1],cdf)
def showHistogram(self,bins=255):
plt.hist(self._points[:,3],bins=bins)
plt.title("%s Points Histogram"%(self._token))
plt.ylabel("count")
plt.xlabel("level")
plt.grid()
plt.show()
def show(self):
if self._points is None:
raise ValueError("Points is empty, please call imgToPoints() first.")
# centralize
self.centralize()
# colors
colors = np.array(np.abs(self._points[:,:3]),dtype=np.float32)
mx = np.max(colors[:,0])
my = np.max(colors[:,1])
mz = np.max(colors[:,2])
brighter = 0.1
colors[:,0]/=mx+brighter
colors[:,1]/=my+brighter
colors[:,2]/=mz+brighter
alpha = np.empty((len(colors[:,0]),1))
alpha.fill(0.8)
colors = np.hstack([colors,alpha])
# sizes
sizes = self.histogramEqualize()
# visualize
c = Canvas(self._points[:,:3],colors,sizes)
app.run()
if __name__ == '__main__':
pass |
self._token = token
self._imgfile = imgfile | random_line_split |
__init__.py | #!/usr/bin/python
#-*- coding:utf-8 -*-
__author__ = 'david'
import numpy as np
import nibabel as nib
import resources as rs
# from vispy import app
from plot import Canvas
import matplotlib.pyplot as plt
import gc
np.random.seed()
class Clarity(object):
def __init__(self,token,imgfile=None,pointsfile=None):
if token not in rs.TOKENS:
raise ValueError("Token %s not found."%(token))
self._token = token
self._imgfile = imgfile
self._pointsfile = pointsfile
self._img = None # img data
self._points = None # [[x],[y],[z],[v]]
self._shape = None # (x,y,z)
self._max = None # max value
def loadImg(self, path=None, info=False):
if path is None:
path = rs.RAW_DATA_PATH
pathname = path+self._token+".img"
img = nib.load(pathname)
if info:
print(img)
self._img = img.get_data()[:,:,:,0]
self._shape = self._img.shape
self._max = np.max(self._img)
print("Image Loaded: %s"%(pathname))
return self
def loadEqImg(self, path=None, info=False):
if path is None:
path = rs.RAW_DATA_PATH
pathname = path+self._token+".nii"
img = nib.load(pathname)
if info:
print(img)
self._img = img.get_data()[:,:,:,0]
self._shape = self._img.shape
self._max = np.max(self._img)
print("Image Loaded: %s"%(pathname))
return self
def getShape(self):
return self._shape
def getMax(self):
return self._max
def discardImg(self):
del self._img
gc.collect()
return self
def getHistogram(self,bins,range,density=True):
if self._img is None:
raise ValueError("Img haven't loaded, please call loadImg() first.")
return np.histogram(self._img.flatten(), bins=bins, range=range, density=density)
def imgToPoints(self, threshold=0.1, sample=0.5, optimize=True):
|
def loadPoints(self,path=None):
if path is None:
path = rs.POINTS_DATA_PATH
pathname = path+self._token+".csv"
self._points = np.loadtxt(pathname,dtype=np.int16,delimiter=',')
print("Points Loaded: %s"%(pathname))
return self
def savePoints(self,path=None):
if self._points is None:
raise ValueError("Points is empty, please call imgToPoints() first.")
if path is None:
path = rs.POINTS_DATA_PATH
pathname = path+self._token+".csv"
np.savetxt(pathname,self._points,fmt='%d',delimiter=',')
return self
def centralize(self):
# Centralize the data
# use mean or median
centerX = np.mean(self._points[:,0])
centerY = np.mean(self._points[:,1])
centerZ = np.mean(self._points[:,2])
self._points[:,0] -= np.int16(centerX)
self._points[:,1] -= np.int16(centerY)
self._points[:,2] -= np.int16(centerZ)
return self
def histogramEqualize(self,scale=30):
# get image histogram
imhist, bins = np.histogram(self._points[:,3],256,density=True)
cdf = imhist.cumsum() # cumulative distribution function
cdf = scale * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
return np.interp(self._points[:,3],bins[:-1],cdf)
def showHistogram(self,bins=255):
plt.hist(self._points[:,3],bins=bins)
plt.title("%s Points Histogram"%(self._token))
plt.ylabel("count")
plt.xlabel("level")
plt.grid()
plt.show()
def show(self):
if self._points is None:
raise ValueError("Points is empty, please call imgToPoints() first.")
# centralize
self.centralize()
# colors
colors = np.array(np.abs(self._points[:,:3]),dtype=np.float32)
mx = np.max(colors[:,0])
my = np.max(colors[:,1])
mz = np.max(colors[:,2])
brighter = 0.1
colors[:,0]/=mx+brighter
colors[:,1]/=my+brighter
colors[:,2]/=mz+brighter
alpha = np.empty((len(colors[:,0]),1))
alpha.fill(0.8)
colors = np.hstack([colors,alpha])
# sizes
sizes = self.histogramEqualize()
# visualize
c = Canvas(self._points[:,:3],colors,sizes)
app.run()
if __name__ == '__main__':
pass
| if not 0 <= threshold < 1:
raise ValueError("Threshold should be within [0,1).")
if not 0 < sample <= 1:
raise ValueError("Sample rate should be within (0,1].")
if self._img is None:
raise ValueError("Img haven't loaded, please call loadImg() first.")
total = self._shape[0]*self._shape[1]*self._shape[2]
print("Coverting to points...\ntoken=%s\ntotal=%d\nmax=%f\nthreshold=%f\nsample=%f"\
%(self._token,total,self._max,threshold,sample))
print("(This will take couple minutes)")
# threshold
filt = self._img > threshold * self._max
x, y, z = np.where(filt)
v = self._img[filt]
if optimize:
self.discardImg()
v = np.int16(255*(np.float32(v)/np.float32(self._max)))
l = v.shape
print("Above threshold=%d"%(l))
# sample
if sample < 1.0:
filt = np.random.random(size=l) < sample
x = x[filt]
y = y[filt]
z = z[filt]
v = v[filt]
self._points = np.vstack([x,y,z,v])
self._points = np.transpose(self._points)
print("Samples=%d"%(self._points.shape[0]))
print("Finished")
return self | identifier_body |
__init__.py | #!/usr/bin/python
#-*- coding:utf-8 -*-
__author__ = 'david'
import numpy as np
import nibabel as nib
import resources as rs
# from vispy import app
from plot import Canvas
import matplotlib.pyplot as plt
import gc
np.random.seed()
class Clarity(object):
def __init__(self,token,imgfile=None,pointsfile=None):
if token not in rs.TOKENS:
raise ValueError("Token %s not found."%(token))
self._token = token
self._imgfile = imgfile
self._pointsfile = pointsfile
self._img = None # img data
self._points = None # [[x],[y],[z],[v]]
self._shape = None # (x,y,z)
self._max = None # max value
def loadImg(self, path=None, info=False):
if path is None:
path = rs.RAW_DATA_PATH
pathname = path+self._token+".img"
img = nib.load(pathname)
if info:
print(img)
self._img = img.get_data()[:,:,:,0]
self._shape = self._img.shape
self._max = np.max(self._img)
print("Image Loaded: %s"%(pathname))
return self
def loadEqImg(self, path=None, info=False):
if path is None:
path = rs.RAW_DATA_PATH
pathname = path+self._token+".nii"
img = nib.load(pathname)
if info:
print(img)
self._img = img.get_data()[:,:,:,0]
self._shape = self._img.shape
self._max = np.max(self._img)
print("Image Loaded: %s"%(pathname))
return self
def getShape(self):
return self._shape
def getMax(self):
return self._max
def discardImg(self):
del self._img
gc.collect()
return self
def getHistogram(self,bins,range,density=True):
if self._img is None:
raise ValueError("Img haven't loaded, please call loadImg() first.")
return np.histogram(self._img.flatten(), bins=bins, range=range, density=density)
def imgToPoints(self, threshold=0.1, sample=0.5, optimize=True):
if not 0 <= threshold < 1:
raise ValueError("Threshold should be within [0,1).")
if not 0 < sample <= 1:
raise ValueError("Sample rate should be within (0,1].")
if self._img is None:
raise ValueError("Img haven't loaded, please call loadImg() first.")
total = self._shape[0]*self._shape[1]*self._shape[2]
print("Coverting to points...\ntoken=%s\ntotal=%d\nmax=%f\nthreshold=%f\nsample=%f"\
%(self._token,total,self._max,threshold,sample))
print("(This will take couple minutes)")
# threshold
filt = self._img > threshold * self._max
x, y, z = np.where(filt)
v = self._img[filt]
if optimize:
self.discardImg()
v = np.int16(255*(np.float32(v)/np.float32(self._max)))
l = v.shape
print("Above threshold=%d"%(l))
# sample
if sample < 1.0:
filt = np.random.random(size=l) < sample
x = x[filt]
y = y[filt]
z = z[filt]
v = v[filt]
self._points = np.vstack([x,y,z,v])
self._points = np.transpose(self._points)
print("Samples=%d"%(self._points.shape[0]))
print("Finished")
return self
def loadPoints(self,path=None):
if path is None:
path = rs.POINTS_DATA_PATH
pathname = path+self._token+".csv"
self._points = np.loadtxt(pathname,dtype=np.int16,delimiter=',')
print("Points Loaded: %s"%(pathname))
return self
def savePoints(self,path=None):
if self._points is None:
raise ValueError("Points is empty, please call imgToPoints() first.")
if path is None:
path = rs.POINTS_DATA_PATH
pathname = path+self._token+".csv"
np.savetxt(pathname,self._points,fmt='%d',delimiter=',')
return self
def centralize(self):
# Centralize the data
# use mean or median
centerX = np.mean(self._points[:,0])
centerY = np.mean(self._points[:,1])
centerZ = np.mean(self._points[:,2])
self._points[:,0] -= np.int16(centerX)
self._points[:,1] -= np.int16(centerY)
self._points[:,2] -= np.int16(centerZ)
return self
def | (self,scale=30):
# get image histogram
imhist, bins = np.histogram(self._points[:,3],256,density=True)
cdf = imhist.cumsum() # cumulative distribution function
cdf = scale * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
return np.interp(self._points[:,3],bins[:-1],cdf)
def showHistogram(self,bins=255):
plt.hist(self._points[:,3],bins=bins)
plt.title("%s Points Histogram"%(self._token))
plt.ylabel("count")
plt.xlabel("level")
plt.grid()
plt.show()
def show(self):
if self._points is None:
raise ValueError("Points is empty, please call imgToPoints() first.")
# centralize
self.centralize()
# colors
colors = np.array(np.abs(self._points[:,:3]),dtype=np.float32)
mx = np.max(colors[:,0])
my = np.max(colors[:,1])
mz = np.max(colors[:,2])
brighter = 0.1
colors[:,0]/=mx+brighter
colors[:,1]/=my+brighter
colors[:,2]/=mz+brighter
alpha = np.empty((len(colors[:,0]),1))
alpha.fill(0.8)
colors = np.hstack([colors,alpha])
# sizes
sizes = self.histogramEqualize()
# visualize
c = Canvas(self._points[:,:3],colors,sizes)
app.run()
if __name__ == '__main__':
pass
| histogramEqualize | identifier_name |
__init__.py | #!/usr/bin/python
#-*- coding:utf-8 -*-
__author__ = 'david'
import numpy as np
import nibabel as nib
import resources as rs
# from vispy import app
from plot import Canvas
import matplotlib.pyplot as plt
import gc
np.random.seed()
class Clarity(object):
def __init__(self,token,imgfile=None,pointsfile=None):
if token not in rs.TOKENS:
raise ValueError("Token %s not found."%(token))
self._token = token
self._imgfile = imgfile
self._pointsfile = pointsfile
self._img = None # img data
self._points = None # [[x],[y],[z],[v]]
self._shape = None # (x,y,z)
self._max = None # max value
def loadImg(self, path=None, info=False):
if path is None:
path = rs.RAW_DATA_PATH
pathname = path+self._token+".img"
img = nib.load(pathname)
if info:
print(img)
self._img = img.get_data()[:,:,:,0]
self._shape = self._img.shape
self._max = np.max(self._img)
print("Image Loaded: %s"%(pathname))
return self
def loadEqImg(self, path=None, info=False):
if path is None:
path = rs.RAW_DATA_PATH
pathname = path+self._token+".nii"
img = nib.load(pathname)
if info:
print(img)
self._img = img.get_data()[:,:,:,0]
self._shape = self._img.shape
self._max = np.max(self._img)
print("Image Loaded: %s"%(pathname))
return self
def getShape(self):
return self._shape
def getMax(self):
return self._max
def discardImg(self):
del self._img
gc.collect()
return self
def getHistogram(self,bins,range,density=True):
if self._img is None:
|
return np.histogram(self._img.flatten(), bins=bins, range=range, density=density)
def imgToPoints(self, threshold=0.1, sample=0.5, optimize=True):
if not 0 <= threshold < 1:
raise ValueError("Threshold should be within [0,1).")
if not 0 < sample <= 1:
raise ValueError("Sample rate should be within (0,1].")
if self._img is None:
raise ValueError("Img haven't loaded, please call loadImg() first.")
total = self._shape[0]*self._shape[1]*self._shape[2]
print("Coverting to points...\ntoken=%s\ntotal=%d\nmax=%f\nthreshold=%f\nsample=%f"\
%(self._token,total,self._max,threshold,sample))
print("(This will take couple minutes)")
# threshold
filt = self._img > threshold * self._max
x, y, z = np.where(filt)
v = self._img[filt]
if optimize:
self.discardImg()
v = np.int16(255*(np.float32(v)/np.float32(self._max)))
l = v.shape
print("Above threshold=%d"%(l))
# sample
if sample < 1.0:
filt = np.random.random(size=l) < sample
x = x[filt]
y = y[filt]
z = z[filt]
v = v[filt]
self._points = np.vstack([x,y,z,v])
self._points = np.transpose(self._points)
print("Samples=%d"%(self._points.shape[0]))
print("Finished")
return self
def loadPoints(self,path=None):
if path is None:
path = rs.POINTS_DATA_PATH
pathname = path+self._token+".csv"
self._points = np.loadtxt(pathname,dtype=np.int16,delimiter=',')
print("Points Loaded: %s"%(pathname))
return self
def savePoints(self,path=None):
if self._points is None:
raise ValueError("Points is empty, please call imgToPoints() first.")
if path is None:
path = rs.POINTS_DATA_PATH
pathname = path+self._token+".csv"
np.savetxt(pathname,self._points,fmt='%d',delimiter=',')
return self
def centralize(self):
# Centralize the data
# use mean or median
centerX = np.mean(self._points[:,0])
centerY = np.mean(self._points[:,1])
centerZ = np.mean(self._points[:,2])
self._points[:,0] -= np.int16(centerX)
self._points[:,1] -= np.int16(centerY)
self._points[:,2] -= np.int16(centerZ)
return self
def histogramEqualize(self,scale=30):
# get image histogram
imhist, bins = np.histogram(self._points[:,3],256,density=True)
cdf = imhist.cumsum() # cumulative distribution function
cdf = scale * cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
return np.interp(self._points[:,3],bins[:-1],cdf)
def showHistogram(self,bins=255):
plt.hist(self._points[:,3],bins=bins)
plt.title("%s Points Histogram"%(self._token))
plt.ylabel("count")
plt.xlabel("level")
plt.grid()
plt.show()
def show(self):
if self._points is None:
raise ValueError("Points is empty, please call imgToPoints() first.")
# centralize
self.centralize()
# colors
colors = np.array(np.abs(self._points[:,:3]),dtype=np.float32)
mx = np.max(colors[:,0])
my = np.max(colors[:,1])
mz = np.max(colors[:,2])
brighter = 0.1
colors[:,0]/=mx+brighter
colors[:,1]/=my+brighter
colors[:,2]/=mz+brighter
alpha = np.empty((len(colors[:,0]),1))
alpha.fill(0.8)
colors = np.hstack([colors,alpha])
# sizes
sizes = self.histogramEqualize()
# visualize
c = Canvas(self._points[:,:3],colors,sizes)
app.run()
if __name__ == '__main__':
pass
| raise ValueError("Img haven't loaded, please call loadImg() first.") | conditional_block |
test_universe.py | import xml.etree.ElementTree as ET
import numpy as np
import openmc
import pytest
from tests.unit_tests import assert_unbounded
def test_basic():
c1 = openmc.Cell()
c2 = openmc.Cell()
c3 = openmc.Cell()
u = openmc.Universe(name='cool', cells=(c1, c2, c3))
assert u.name == 'cool'
cells = set(u.cells.values())
assert not (cells ^ {c1, c2, c3})
# Test __repr__
repr(u)
with pytest.raises(TypeError):
u.add_cell(openmc.Material())
with pytest.raises(TypeError): |
u.remove_cell(c3)
cells = set(u.cells.values())
assert not (cells ^ {c1, c2})
u.clear_cells()
assert not set(u.cells)
def test_bounding_box():
cyl1 = openmc.ZCylinder(r=1.0)
cyl2 = openmc.ZCylinder(r=2.0)
c1 = openmc.Cell(region=-cyl1)
c2 = openmc.Cell(region=+cyl1 & -cyl2)
u = openmc.Universe(cells=[c1, c2])
ll, ur = u.bounding_box
assert ll == pytest.approx((-2., -2., -np.inf))
assert ur == pytest.approx((2., 2., np.inf))
u = openmc.Universe()
assert_unbounded(u)
def test_plot(run_in_tmpdir, sphere_model):
m = sphere_model.materials[0]
univ = sphere_model.geometry.root_universe
colors = {m: 'limegreen'}
for basis in ('xy', 'yz', 'xz'):
univ.plot(
basis=basis,
pixels=(10, 10),
color_by='material',
colors=colors,
)
def test_get_nuclides(uo2):
c = openmc.Cell(fill=uo2)
univ = openmc.Universe(cells=[c])
nucs = univ.get_nuclides()
assert nucs == ['U235', 'O16']
def test_cells():
cells = [openmc.Cell() for i in range(5)]
cells2 = [openmc.Cell() for i in range(3)]
cells[0].fill = openmc.Universe(cells=cells2)
u = openmc.Universe(cells=cells)
assert not (set(u.cells.values()) ^ set(cells))
all_cells = set(u.get_all_cells().values())
assert not (all_cells ^ set(cells + cells2))
def test_get_all_materials(cell_with_lattice):
cells, mats, univ, lattice = cell_with_lattice
test_mats = set(univ.get_all_materials().values())
assert not (test_mats ^ set(mats))
def test_get_all_universes():
c1 = openmc.Cell()
u1 = openmc.Universe(cells=[c1])
c2 = openmc.Cell()
u2 = openmc.Universe(cells=[c2])
c3 = openmc.Cell(fill=u1)
c4 = openmc.Cell(fill=u2)
u3 = openmc.Universe(cells=[c3, c4])
univs = set(u3.get_all_universes().values())
assert not (univs ^ {u1, u2})
def test_create_xml(cell_with_lattice):
cells = [openmc.Cell() for i in range(5)]
u = openmc.Universe(cells=cells)
geom = ET.Element('geom')
u.create_xml_subelement(geom)
cell_elems = geom.findall('cell')
assert len(cell_elems) == len(cells)
assert all(c.get('universe') == str(u.id) for c in cell_elems)
assert not (set(c.get('id') for c in cell_elems) ^
set(str(c.id) for c in cells)) | u.add_cells(c1) | random_line_split |
test_universe.py | import xml.etree.ElementTree as ET
import numpy as np
import openmc
import pytest
from tests.unit_tests import assert_unbounded
def test_basic():
c1 = openmc.Cell()
c2 = openmc.Cell()
c3 = openmc.Cell()
u = openmc.Universe(name='cool', cells=(c1, c2, c3))
assert u.name == 'cool'
cells = set(u.cells.values())
assert not (cells ^ {c1, c2, c3})
# Test __repr__
repr(u)
with pytest.raises(TypeError):
u.add_cell(openmc.Material())
with pytest.raises(TypeError):
u.add_cells(c1)
u.remove_cell(c3)
cells = set(u.cells.values())
assert not (cells ^ {c1, c2})
u.clear_cells()
assert not set(u.cells)
def test_bounding_box():
cyl1 = openmc.ZCylinder(r=1.0)
cyl2 = openmc.ZCylinder(r=2.0)
c1 = openmc.Cell(region=-cyl1)
c2 = openmc.Cell(region=+cyl1 & -cyl2)
u = openmc.Universe(cells=[c1, c2])
ll, ur = u.bounding_box
assert ll == pytest.approx((-2., -2., -np.inf))
assert ur == pytest.approx((2., 2., np.inf))
u = openmc.Universe()
assert_unbounded(u)
def test_plot(run_in_tmpdir, sphere_model):
|
def test_get_nuclides(uo2):
c = openmc.Cell(fill=uo2)
univ = openmc.Universe(cells=[c])
nucs = univ.get_nuclides()
assert nucs == ['U235', 'O16']
def test_cells():
cells = [openmc.Cell() for i in range(5)]
cells2 = [openmc.Cell() for i in range(3)]
cells[0].fill = openmc.Universe(cells=cells2)
u = openmc.Universe(cells=cells)
assert not (set(u.cells.values()) ^ set(cells))
all_cells = set(u.get_all_cells().values())
assert not (all_cells ^ set(cells + cells2))
def test_get_all_materials(cell_with_lattice):
cells, mats, univ, lattice = cell_with_lattice
test_mats = set(univ.get_all_materials().values())
assert not (test_mats ^ set(mats))
def test_get_all_universes():
c1 = openmc.Cell()
u1 = openmc.Universe(cells=[c1])
c2 = openmc.Cell()
u2 = openmc.Universe(cells=[c2])
c3 = openmc.Cell(fill=u1)
c4 = openmc.Cell(fill=u2)
u3 = openmc.Universe(cells=[c3, c4])
univs = set(u3.get_all_universes().values())
assert not (univs ^ {u1, u2})
def test_create_xml(cell_with_lattice):
cells = [openmc.Cell() for i in range(5)]
u = openmc.Universe(cells=cells)
geom = ET.Element('geom')
u.create_xml_subelement(geom)
cell_elems = geom.findall('cell')
assert len(cell_elems) == len(cells)
assert all(c.get('universe') == str(u.id) for c in cell_elems)
assert not (set(c.get('id') for c in cell_elems) ^
set(str(c.id) for c in cells))
| m = sphere_model.materials[0]
univ = sphere_model.geometry.root_universe
colors = {m: 'limegreen'}
for basis in ('xy', 'yz', 'xz'):
univ.plot(
basis=basis,
pixels=(10, 10),
color_by='material',
colors=colors,
) | identifier_body |
test_universe.py | import xml.etree.ElementTree as ET
import numpy as np
import openmc
import pytest
from tests.unit_tests import assert_unbounded
def test_basic():
c1 = openmc.Cell()
c2 = openmc.Cell()
c3 = openmc.Cell()
u = openmc.Universe(name='cool', cells=(c1, c2, c3))
assert u.name == 'cool'
cells = set(u.cells.values())
assert not (cells ^ {c1, c2, c3})
# Test __repr__
repr(u)
with pytest.raises(TypeError):
u.add_cell(openmc.Material())
with pytest.raises(TypeError):
u.add_cells(c1)
u.remove_cell(c3)
cells = set(u.cells.values())
assert not (cells ^ {c1, c2})
u.clear_cells()
assert not set(u.cells)
def test_bounding_box():
cyl1 = openmc.ZCylinder(r=1.0)
cyl2 = openmc.ZCylinder(r=2.0)
c1 = openmc.Cell(region=-cyl1)
c2 = openmc.Cell(region=+cyl1 & -cyl2)
u = openmc.Universe(cells=[c1, c2])
ll, ur = u.bounding_box
assert ll == pytest.approx((-2., -2., -np.inf))
assert ur == pytest.approx((2., 2., np.inf))
u = openmc.Universe()
assert_unbounded(u)
def test_plot(run_in_tmpdir, sphere_model):
m = sphere_model.materials[0]
univ = sphere_model.geometry.root_universe
colors = {m: 'limegreen'}
for basis in ('xy', 'yz', 'xz'):
|
def test_get_nuclides(uo2):
c = openmc.Cell(fill=uo2)
univ = openmc.Universe(cells=[c])
nucs = univ.get_nuclides()
assert nucs == ['U235', 'O16']
def test_cells():
cells = [openmc.Cell() for i in range(5)]
cells2 = [openmc.Cell() for i in range(3)]
cells[0].fill = openmc.Universe(cells=cells2)
u = openmc.Universe(cells=cells)
assert not (set(u.cells.values()) ^ set(cells))
all_cells = set(u.get_all_cells().values())
assert not (all_cells ^ set(cells + cells2))
def test_get_all_materials(cell_with_lattice):
cells, mats, univ, lattice = cell_with_lattice
test_mats = set(univ.get_all_materials().values())
assert not (test_mats ^ set(mats))
def test_get_all_universes():
c1 = openmc.Cell()
u1 = openmc.Universe(cells=[c1])
c2 = openmc.Cell()
u2 = openmc.Universe(cells=[c2])
c3 = openmc.Cell(fill=u1)
c4 = openmc.Cell(fill=u2)
u3 = openmc.Universe(cells=[c3, c4])
univs = set(u3.get_all_universes().values())
assert not (univs ^ {u1, u2})
def test_create_xml(cell_with_lattice):
cells = [openmc.Cell() for i in range(5)]
u = openmc.Universe(cells=cells)
geom = ET.Element('geom')
u.create_xml_subelement(geom)
cell_elems = geom.findall('cell')
assert len(cell_elems) == len(cells)
assert all(c.get('universe') == str(u.id) for c in cell_elems)
assert not (set(c.get('id') for c in cell_elems) ^
set(str(c.id) for c in cells))
| univ.plot(
basis=basis,
pixels=(10, 10),
color_by='material',
colors=colors,
) | conditional_block |
test_universe.py | import xml.etree.ElementTree as ET
import numpy as np
import openmc
import pytest
from tests.unit_tests import assert_unbounded
def test_basic():
c1 = openmc.Cell()
c2 = openmc.Cell()
c3 = openmc.Cell()
u = openmc.Universe(name='cool', cells=(c1, c2, c3))
assert u.name == 'cool'
cells = set(u.cells.values())
assert not (cells ^ {c1, c2, c3})
# Test __repr__
repr(u)
with pytest.raises(TypeError):
u.add_cell(openmc.Material())
with pytest.raises(TypeError):
u.add_cells(c1)
u.remove_cell(c3)
cells = set(u.cells.values())
assert not (cells ^ {c1, c2})
u.clear_cells()
assert not set(u.cells)
def test_bounding_box():
cyl1 = openmc.ZCylinder(r=1.0)
cyl2 = openmc.ZCylinder(r=2.0)
c1 = openmc.Cell(region=-cyl1)
c2 = openmc.Cell(region=+cyl1 & -cyl2)
u = openmc.Universe(cells=[c1, c2])
ll, ur = u.bounding_box
assert ll == pytest.approx((-2., -2., -np.inf))
assert ur == pytest.approx((2., 2., np.inf))
u = openmc.Universe()
assert_unbounded(u)
def | (run_in_tmpdir, sphere_model):
m = sphere_model.materials[0]
univ = sphere_model.geometry.root_universe
colors = {m: 'limegreen'}
for basis in ('xy', 'yz', 'xz'):
univ.plot(
basis=basis,
pixels=(10, 10),
color_by='material',
colors=colors,
)
def test_get_nuclides(uo2):
c = openmc.Cell(fill=uo2)
univ = openmc.Universe(cells=[c])
nucs = univ.get_nuclides()
assert nucs == ['U235', 'O16']
def test_cells():
cells = [openmc.Cell() for i in range(5)]
cells2 = [openmc.Cell() for i in range(3)]
cells[0].fill = openmc.Universe(cells=cells2)
u = openmc.Universe(cells=cells)
assert not (set(u.cells.values()) ^ set(cells))
all_cells = set(u.get_all_cells().values())
assert not (all_cells ^ set(cells + cells2))
def test_get_all_materials(cell_with_lattice):
cells, mats, univ, lattice = cell_with_lattice
test_mats = set(univ.get_all_materials().values())
assert not (test_mats ^ set(mats))
def test_get_all_universes():
c1 = openmc.Cell()
u1 = openmc.Universe(cells=[c1])
c2 = openmc.Cell()
u2 = openmc.Universe(cells=[c2])
c3 = openmc.Cell(fill=u1)
c4 = openmc.Cell(fill=u2)
u3 = openmc.Universe(cells=[c3, c4])
univs = set(u3.get_all_universes().values())
assert not (univs ^ {u1, u2})
def test_create_xml(cell_with_lattice):
cells = [openmc.Cell() for i in range(5)]
u = openmc.Universe(cells=cells)
geom = ET.Element('geom')
u.create_xml_subelement(geom)
cell_elems = geom.findall('cell')
assert len(cell_elems) == len(cells)
assert all(c.get('universe') == str(u.id) for c in cell_elems)
assert not (set(c.get('id') for c in cell_elems) ^
set(str(c.id) for c in cells))
| test_plot | identifier_name |
input-names.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {WorkspacePath} from '../../update-tool/file-system';
import {findInputsOnElementWithAttr, findInputsOnElementWithTag} from '../html-parsing/angular';
import {ResolvedResource} from '../../update-tool/component-resource-collector';
import {Migration} from '../../update-tool/migration';
import {InputNameUpgradeData} from '../data';
import {findAllSubstringIndices} from '../typescript/literal';
import {getVersionUpgradeData, UpgradeData} from '../upgrade-data';
/**
* Migration that walks through every template or stylesheet and replaces outdated input
* names to the new input name. Selectors in stylesheets could also target input
* bindings declared as static attribute. See for example:
*
* e.g. `<my-component color="primary">` becomes `my-component[color]`
*/
export class InputNamesMigration extends Migration<UpgradeData> {
/** Change data that upgrades to the specified target version. */
data: InputNameUpgradeData[] = getVersionUpgradeData(this, 'inputNames');
// Only enable the migration rule if there is upgrade data.
enabled = this.data.length !== 0;
visitStylesheet(stylesheet: ResolvedResource): void {
this.data.forEach(name => {
const currentSelector = `[${name.replace}]`;
const updatedSelector = `[${name.replaceWith}]`;
findAllSubstringIndices(stylesheet.content, currentSelector)
.map(offset => stylesheet.start + offset)
.forEach( |
visitTemplate(template: ResolvedResource): void {
this.data.forEach(name => {
const limitedTo = name.limitedTo;
const relativeOffsets: number[] = [];
if (limitedTo.attributes) {
relativeOffsets.push(
...findInputsOnElementWithAttr(template.content, name.replace, limitedTo.attributes));
}
if (limitedTo.elements) {
relativeOffsets.push(
...findInputsOnElementWithTag(template.content, name.replace, limitedTo.elements));
}
relativeOffsets.map(offset => template.start + offset)
.forEach(
start => this._replaceInputName(
template.filePath, start, name.replace.length, name.replaceWith));
});
}
private _replaceInputName(filePath: WorkspacePath, start: number, width: number,
newName: string) {
this.fileSystem.edit(filePath)
.remove(start, width)
.insertRight(start, newName);
}
} | start => this._replaceInputName(
stylesheet.filePath, start, currentSelector.length, updatedSelector));
});
} | random_line_split |
input-names.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {WorkspacePath} from '../../update-tool/file-system';
import {findInputsOnElementWithAttr, findInputsOnElementWithTag} from '../html-parsing/angular';
import {ResolvedResource} from '../../update-tool/component-resource-collector';
import {Migration} from '../../update-tool/migration';
import {InputNameUpgradeData} from '../data';
import {findAllSubstringIndices} from '../typescript/literal';
import {getVersionUpgradeData, UpgradeData} from '../upgrade-data';
/**
* Migration that walks through every template or stylesheet and replaces outdated input
* names to the new input name. Selectors in stylesheets could also target input
* bindings declared as static attribute. See for example:
*
* e.g. `<my-component color="primary">` becomes `my-component[color]`
*/
export class InputNamesMigration extends Migration<UpgradeData> {
/** Change data that upgrades to the specified target version. */
data: InputNameUpgradeData[] = getVersionUpgradeData(this, 'inputNames');
// Only enable the migration rule if there is upgrade data.
enabled = this.data.length !== 0;
visitStylesheet(stylesheet: ResolvedResource): void {
this.data.forEach(name => {
const currentSelector = `[${name.replace}]`;
const updatedSelector = `[${name.replaceWith}]`;
findAllSubstringIndices(stylesheet.content, currentSelector)
.map(offset => stylesheet.start + offset)
.forEach(
start => this._replaceInputName(
stylesheet.filePath, start, currentSelector.length, updatedSelector));
});
}
| (template: ResolvedResource): void {
this.data.forEach(name => {
const limitedTo = name.limitedTo;
const relativeOffsets: number[] = [];
if (limitedTo.attributes) {
relativeOffsets.push(
...findInputsOnElementWithAttr(template.content, name.replace, limitedTo.attributes));
}
if (limitedTo.elements) {
relativeOffsets.push(
...findInputsOnElementWithTag(template.content, name.replace, limitedTo.elements));
}
relativeOffsets.map(offset => template.start + offset)
.forEach(
start => this._replaceInputName(
template.filePath, start, name.replace.length, name.replaceWith));
});
}
private _replaceInputName(filePath: WorkspacePath, start: number, width: number,
newName: string) {
this.fileSystem.edit(filePath)
.remove(start, width)
.insertRight(start, newName);
}
}
| visitTemplate | identifier_name |
input-names.ts | /**
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {WorkspacePath} from '../../update-tool/file-system';
import {findInputsOnElementWithAttr, findInputsOnElementWithTag} from '../html-parsing/angular';
import {ResolvedResource} from '../../update-tool/component-resource-collector';
import {Migration} from '../../update-tool/migration';
import {InputNameUpgradeData} from '../data';
import {findAllSubstringIndices} from '../typescript/literal';
import {getVersionUpgradeData, UpgradeData} from '../upgrade-data';
/**
* Migration that walks through every template or stylesheet and replaces outdated input
* names to the new input name. Selectors in stylesheets could also target input
* bindings declared as static attribute. See for example:
*
* e.g. `<my-component color="primary">` becomes `my-component[color]`
*/
export class InputNamesMigration extends Migration<UpgradeData> {
/** Change data that upgrades to the specified target version. */
data: InputNameUpgradeData[] = getVersionUpgradeData(this, 'inputNames');
// Only enable the migration rule if there is upgrade data.
enabled = this.data.length !== 0;
visitStylesheet(stylesheet: ResolvedResource): void {
this.data.forEach(name => {
const currentSelector = `[${name.replace}]`;
const updatedSelector = `[${name.replaceWith}]`;
findAllSubstringIndices(stylesheet.content, currentSelector)
.map(offset => stylesheet.start + offset)
.forEach(
start => this._replaceInputName(
stylesheet.filePath, start, currentSelector.length, updatedSelector));
});
}
visitTemplate(template: ResolvedResource): void {
this.data.forEach(name => {
const limitedTo = name.limitedTo;
const relativeOffsets: number[] = [];
if (limitedTo.attributes) {
relativeOffsets.push(
...findInputsOnElementWithAttr(template.content, name.replace, limitedTo.attributes));
}
if (limitedTo.elements) |
relativeOffsets.map(offset => template.start + offset)
.forEach(
start => this._replaceInputName(
template.filePath, start, name.replace.length, name.replaceWith));
});
}
private _replaceInputName(filePath: WorkspacePath, start: number, width: number,
newName: string) {
this.fileSystem.edit(filePath)
.remove(start, width)
.insertRight(start, newName);
}
}
| {
relativeOffsets.push(
...findInputsOnElementWithTag(template.content, name.replace, limitedTo.elements));
} | conditional_block |
util.d.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license | export declare function normalizeGenFileSuffix(srcFileSuffix: string): string;
export declare function summaryFileName(fileName: string): string;
export declare function summaryForJitFileName(fileName: string, forceSourceFile?: boolean): string;
export declare function stripSummaryForJitFileSuffix(filePath: string): string;
export declare function summaryForJitName(symbolName: string): string;
export declare function stripSummaryForJitNameSuffix(symbolName: string): string;
export declare function isLoweredSymbol(name: string): boolean;
export declare function createLoweredSymbol(id: number): string; | */
export declare function ngfactoryFilePath(filePath: string, forceSourceFile?: boolean): string;
export declare function stripGeneratedFileSuffix(filePath: string): string;
export declare function isGeneratedFile(filePath: string): boolean;
export declare function splitTypescriptSuffix(path: string, forceSourceFile?: boolean): string[]; | random_line_split |
mod.rs | use super::{Event, Ins, Prop, Mod, FDVar, Propagator, LeXY, GeXY, LeXYC, GeXYC, LeXC, GeXC};
use std::rc::{Rc, Weak};
/// X = Y
pub struct EqXY;
impl EqXY {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>) {
// TODO merge or at least intersect domains
LeXY::new(model.clone(), x.clone(), y.clone());
GeXY::new(model, x, y);
}
}
/// X = Y + C
pub struct EqXYC;
impl EqXYC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
// TODO merge or at least intersect domains
LeXYC::new(model.clone(), x.clone(), y.clone(), c);
GeXYC::new(model, x, y, c);
}
}
/// X = C
pub struct EqXC;
impl EqXC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, c: int) {
// TODO merge
LeXC::new(model.clone(), x.clone(), c);
GeXC::new(model, x, c);
}
}
/// X != Y
pub struct NeqXY;
impl NeqXY {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>) {
NeqXYCxy::new(model, x, y, 0);
}
}
/// X != Y + C
pub struct NeqXYC;
impl NeqXYC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
NeqXYCxy::new(model, x, y, c);
}
}
/// X != C
pub struct NeqXC;
#[allow(unused_variable)]
impl NeqXC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, c: int) {
x.remove(c);
}
}
struct NeqXYCxy : Prop {
c: int
}
impl NeqXYCxy {
fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) |
fn x(&self) -> Rc<FDVar> {
self.vars.get(0).clone()
}
fn y(&self) -> Rc<FDVar> {
self.vars.get(1).clone()
}
}
impl Propagator for NeqXYCxy {
fn id(&self) -> uint {
self.id
}
fn model(&self) -> Weak<Mod> {
self.model.clone()
}
fn events(&self) -> Vec<(uint, Event)> {
vec![(self.y().id, Ins), (self.x().id, Ins)]
}
fn propagate(&self) -> Vec<uint> {
if self.x().is_instanciated() {
self.unregister();
self.y().remove(self.x().min() - self.c)
}
else if self.y().is_instanciated() {
self.unregister();
self.x().remove(self.y().min() + self.c)
} else {
vec![]
}
}
}
#[cfg(test)]
mod tests;
| {
let id = model.propagators.borrow().len();
let this = NeqXYCxy { model: model.downgrade(), id: id, vars: vec![x, y], c: c};
let p = Rc::new((box this) as Box<Propagator>);
model.add_prop(p);
} | identifier_body |
mod.rs | use super::{Event, Ins, Prop, Mod, FDVar, Propagator, LeXY, GeXY, LeXYC, GeXYC, LeXC, GeXC};
use std::rc::{Rc, Weak};
/// X = Y
pub struct EqXY;
impl EqXY {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>) {
// TODO merge or at least intersect domains
LeXY::new(model.clone(), x.clone(), y.clone());
GeXY::new(model, x, y);
}
}
/// X = Y + C
pub struct EqXYC;
impl EqXYC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
// TODO merge or at least intersect domains
LeXYC::new(model.clone(), x.clone(), y.clone(), c);
GeXYC::new(model, x, y, c);
}
}
/// X = C
pub struct EqXC;
impl EqXC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, c: int) {
// TODO merge
LeXC::new(model.clone(), x.clone(), c);
GeXC::new(model, x, c);
}
}
/// X != Y
pub struct NeqXY;
impl NeqXY {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>) {
NeqXYCxy::new(model, x, y, 0);
}
}
/// X != Y + C
pub struct NeqXYC;
impl NeqXYC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
NeqXYCxy::new(model, x, y, c);
}
}
/// X != C
pub struct NeqXC;
#[allow(unused_variable)]
impl NeqXC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, c: int) {
x.remove(c);
}
}
struct NeqXYCxy : Prop {
c: int
}
impl NeqXYCxy {
fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
let id = model.propagators.borrow().len();
let this = NeqXYCxy { model: model.downgrade(), id: id, vars: vec![x, y], c: c};
let p = Rc::new((box this) as Box<Propagator>);
model.add_prop(p);
}
fn x(&self) -> Rc<FDVar> {
self.vars.get(0).clone()
}
fn y(&self) -> Rc<FDVar> {
self.vars.get(1).clone()
}
}
impl Propagator for NeqXYCxy {
fn id(&self) -> uint {
self.id
}
fn model(&self) -> Weak<Mod> {
self.model.clone()
}
fn events(&self) -> Vec<(uint, Event)> {
vec![(self.y().id, Ins), (self.x().id, Ins)]
}
fn propagate(&self) -> Vec<uint> {
if self.x().is_instanciated() |
else if self.y().is_instanciated() {
self.unregister();
self.x().remove(self.y().min() + self.c)
} else {
vec![]
}
}
}
#[cfg(test)]
mod tests;
| {
self.unregister();
self.y().remove(self.x().min() - self.c)
} | conditional_block |
mod.rs | use super::{Event, Ins, Prop, Mod, FDVar, Propagator, LeXY, GeXY, LeXYC, GeXYC, LeXC, GeXC};
use std::rc::{Rc, Weak};
/// X = Y
pub struct EqXY;
impl EqXY {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>) {
// TODO merge or at least intersect domains
LeXY::new(model.clone(), x.clone(), y.clone());
GeXY::new(model, x, y);
}
}
/// X = Y + C
pub struct EqXYC;
impl EqXYC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
// TODO merge or at least intersect domains
LeXYC::new(model.clone(), x.clone(), y.clone(), c);
GeXYC::new(model, x, y, c);
}
}
/// X = C
pub struct EqXC;
impl EqXC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, c: int) {
// TODO merge
LeXC::new(model.clone(), x.clone(), c);
GeXC::new(model, x, c);
}
}
/// X != Y
pub struct NeqXY;
impl NeqXY {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>) {
NeqXYCxy::new(model, x, y, 0);
}
}
/// X != Y + C
pub struct NeqXYC;
impl NeqXYC {
pub fn | (model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
NeqXYCxy::new(model, x, y, c);
}
}
/// X != C
pub struct NeqXC;
#[allow(unused_variable)]
impl NeqXC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, c: int) {
x.remove(c);
}
}
struct NeqXYCxy : Prop {
c: int
}
impl NeqXYCxy {
fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
let id = model.propagators.borrow().len();
let this = NeqXYCxy { model: model.downgrade(), id: id, vars: vec![x, y], c: c};
let p = Rc::new((box this) as Box<Propagator>);
model.add_prop(p);
}
fn x(&self) -> Rc<FDVar> {
self.vars.get(0).clone()
}
fn y(&self) -> Rc<FDVar> {
self.vars.get(1).clone()
}
}
impl Propagator for NeqXYCxy {
fn id(&self) -> uint {
self.id
}
fn model(&self) -> Weak<Mod> {
self.model.clone()
}
fn events(&self) -> Vec<(uint, Event)> {
vec![(self.y().id, Ins), (self.x().id, Ins)]
}
fn propagate(&self) -> Vec<uint> {
if self.x().is_instanciated() {
self.unregister();
self.y().remove(self.x().min() - self.c)
}
else if self.y().is_instanciated() {
self.unregister();
self.x().remove(self.y().min() + self.c)
} else {
vec![]
}
}
}
#[cfg(test)]
mod tests;
| new | identifier_name |
mod.rs | use super::{Event, Ins, Prop, Mod, FDVar, Propagator, LeXY, GeXY, LeXYC, GeXYC, LeXC, GeXC};
use std::rc::{Rc, Weak};
/// X = Y
pub struct EqXY;
impl EqXY {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>) {
// TODO merge or at least intersect domains
LeXY::new(model.clone(), x.clone(), y.clone());
GeXY::new(model, x, y);
}
}
/// X = Y + C
pub struct EqXYC;
impl EqXYC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
// TODO merge or at least intersect domains
LeXYC::new(model.clone(), x.clone(), y.clone(), c);
GeXYC::new(model, x, y, c);
}
}
/// X = C
pub struct EqXC;
impl EqXC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, c: int) {
// TODO merge
LeXC::new(model.clone(), x.clone(), c);
GeXC::new(model, x, c);
}
}
/// X != Y
pub struct NeqXY;
impl NeqXY {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>) {
NeqXYCxy::new(model, x, y, 0);
}
}
/// X != Y + C
pub struct NeqXYC;
impl NeqXYC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
NeqXYCxy::new(model, x, y, c);
}
}
/// X != C
pub struct NeqXC;
#[allow(unused_variable)]
impl NeqXC {
pub fn new(model: Rc<Mod>, x: Rc<FDVar>, c: int) {
x.remove(c);
}
}
struct NeqXYCxy : Prop {
c: int
}
impl NeqXYCxy { | let p = Rc::new((box this) as Box<Propagator>);
model.add_prop(p);
}
fn x(&self) -> Rc<FDVar> {
self.vars.get(0).clone()
}
fn y(&self) -> Rc<FDVar> {
self.vars.get(1).clone()
}
}
impl Propagator for NeqXYCxy {
fn id(&self) -> uint {
self.id
}
fn model(&self) -> Weak<Mod> {
self.model.clone()
}
fn events(&self) -> Vec<(uint, Event)> {
vec![(self.y().id, Ins), (self.x().id, Ins)]
}
fn propagate(&self) -> Vec<uint> {
if self.x().is_instanciated() {
self.unregister();
self.y().remove(self.x().min() - self.c)
}
else if self.y().is_instanciated() {
self.unregister();
self.x().remove(self.y().min() + self.c)
} else {
vec![]
}
}
}
#[cfg(test)]
mod tests; | fn new(model: Rc<Mod>, x: Rc<FDVar>, y: Rc<FDVar>, c: int) {
let id = model.propagators.borrow().len();
let this = NeqXYCxy { model: model.downgrade(), id: id, vars: vec![x, y], c: c}; | random_line_split |
unit-test.ts | import gulp = require('gulp');
import path = require('path');
import gulpMerge = require('merge2');
import {PROJECT_ROOT} from '../constants';
import {sequenceTask} from '../task_helpers';
const karma = require('karma');
/** Copies deps for unit tests to the build output. */
gulp.task(':build:test:vendor', function() {
const npmVendorFiles = [
'@angular', 'core-js/client', 'rxjs', 'systemjs/dist', 'zone.js/dist'
];
return gulpMerge(
npmVendorFiles.map(function(root) {
const glob = path.join(root, '**/*.+(js|js.map)');
return gulp.src(path.join('node_modules', glob))
.pipe(gulp.dest(path.join('dist/vendor', root)));
}));
});
/** Builds dependencies for unit tests. */
gulp.task(':test:deps', sequenceTask(
'clean',
[
':build:test:vendor',
':build:components:spec'
]
));
/**
* [Watch task] Build unit test dependencies, and rebuild whenever sources are changed.
* This should only be used when running tests locally.
*/
gulp.task(':test:watch', sequenceTask(':test:deps', ':watch:components:spec'));
/** Build unit test dependencies and then inlines resources (html, css) into the JS output. */
gulp.task(':test:deps:inline', sequenceTask(':test:deps', ':inline-resources'));
/**
* [Watch task] Runs the unit tests, rebuilding and re-testing when sources change.
* Does not inline resources.
*
* This task should be used when running unit tests locally.
*/
gulp.task('test', [':test:watch'], (done: (error?: Error) => void) => {
new karma.Server({
configFile: path.join(PROJECT_ROOT, 'tools/test/karma.conf.js')
}, onKarmaFinished(done)).start();
});
/**
* Runs the unit tests once with inlined resources (html, css). Does not watch for changes.
*
* This task should be used when running tests on the CI server.
*/
gulp.task('test:single-run', [':test:deps:inline'], (done: (error?: Error) => void) => {
new karma.Server({
configFile: path.join(PROJECT_ROOT, 'tools/test/karma.conf.js'),
singleRun: true,
autoWatch: false,
}, onKarmaFinished(done)).start();
});
/** Function to create a karma callback that properly reports to gulp. */
function | (doneFn: (error?: Error) => void) {
return (exitCode: number) => {
// Immediately exit the process if Karma reported errors, because due to
// potential running Saucelabs browsers gulp won't exit properly.
exitCode === 0 ? doneFn() : process.exit(exitCode);
};
}
| onKarmaFinished | identifier_name |
unit-test.ts | import gulp = require('gulp');
import path = require('path');
import gulpMerge = require('merge2');
import {PROJECT_ROOT} from '../constants';
import {sequenceTask} from '../task_helpers';
const karma = require('karma');
/** Copies deps for unit tests to the build output. */
gulp.task(':build:test:vendor', function() {
const npmVendorFiles = [
'@angular', 'core-js/client', 'rxjs', 'systemjs/dist', 'zone.js/dist'
];
return gulpMerge(
npmVendorFiles.map(function(root) {
const glob = path.join(root, '**/*.+(js|js.map)');
return gulp.src(path.join('node_modules', glob))
.pipe(gulp.dest(path.join('dist/vendor', root)));
}));
});
/** Builds dependencies for unit tests. */
gulp.task(':test:deps', sequenceTask(
'clean',
[
':build:test:vendor',
':build:components:spec'
]
));
/**
* [Watch task] Build unit test dependencies, and rebuild whenever sources are changed.
* This should only be used when running tests locally.
*/
gulp.task(':test:watch', sequenceTask(':test:deps', ':watch:components:spec'));
/** Build unit test dependencies and then inlines resources (html, css) into the JS output. */
gulp.task(':test:deps:inline', sequenceTask(':test:deps', ':inline-resources'));
/**
* [Watch task] Runs the unit tests, rebuilding and re-testing when sources change.
* Does not inline resources.
*
* This task should be used when running unit tests locally.
*/
gulp.task('test', [':test:watch'], (done: (error?: Error) => void) => {
new karma.Server({
configFile: path.join(PROJECT_ROOT, 'tools/test/karma.conf.js')
}, onKarmaFinished(done)).start();
});
/**
* Runs the unit tests once with inlined resources (html, css). Does not watch for changes.
*
* This task should be used when running tests on the CI server.
*/
gulp.task('test:single-run', [':test:deps:inline'], (done: (error?: Error) => void) => {
new karma.Server({
configFile: path.join(PROJECT_ROOT, 'tools/test/karma.conf.js'),
singleRun: true,
autoWatch: false,
}, onKarmaFinished(done)).start();
});
/** Function to create a karma callback that properly reports to gulp. */
function onKarmaFinished(doneFn: (error?: Error) => void) | {
return (exitCode: number) => {
// Immediately exit the process if Karma reported errors, because due to
// potential running Saucelabs browsers gulp won't exit properly.
exitCode === 0 ? doneFn() : process.exit(exitCode);
};
} | identifier_body | |
unit-test.ts | import gulp = require('gulp');
import path = require('path');
import gulpMerge = require('merge2');
import {PROJECT_ROOT} from '../constants';
import {sequenceTask} from '../task_helpers';
const karma = require('karma');
/** Copies deps for unit tests to the build output. */
gulp.task(':build:test:vendor', function() {
const npmVendorFiles = [
'@angular', 'core-js/client', 'rxjs', 'systemjs/dist', 'zone.js/dist'
];
return gulpMerge(
npmVendorFiles.map(function(root) {
const glob = path.join(root, '**/*.+(js|js.map)');
return gulp.src(path.join('node_modules', glob))
.pipe(gulp.dest(path.join('dist/vendor', root)));
}));
});
/** Builds dependencies for unit tests. */ | ':build:components:spec'
]
));
/**
* [Watch task] Build unit test dependencies, and rebuild whenever sources are changed.
* This should only be used when running tests locally.
*/
gulp.task(':test:watch', sequenceTask(':test:deps', ':watch:components:spec'));
/** Build unit test dependencies and then inlines resources (html, css) into the JS output. */
gulp.task(':test:deps:inline', sequenceTask(':test:deps', ':inline-resources'));
/**
* [Watch task] Runs the unit tests, rebuilding and re-testing when sources change.
* Does not inline resources.
*
* This task should be used when running unit tests locally.
*/
gulp.task('test', [':test:watch'], (done: (error?: Error) => void) => {
new karma.Server({
configFile: path.join(PROJECT_ROOT, 'tools/test/karma.conf.js')
}, onKarmaFinished(done)).start();
});
/**
* Runs the unit tests once with inlined resources (html, css). Does not watch for changes.
*
* This task should be used when running tests on the CI server.
*/
gulp.task('test:single-run', [':test:deps:inline'], (done: (error?: Error) => void) => {
new karma.Server({
configFile: path.join(PROJECT_ROOT, 'tools/test/karma.conf.js'),
singleRun: true,
autoWatch: false,
}, onKarmaFinished(done)).start();
});
/** Function to create a karma callback that properly reports to gulp. */
function onKarmaFinished(doneFn: (error?: Error) => void) {
return (exitCode: number) => {
// Immediately exit the process if Karma reported errors, because due to
// potential running Saucelabs browsers gulp won't exit properly.
exitCode === 0 ? doneFn() : process.exit(exitCode);
};
} | gulp.task(':test:deps', sequenceTask(
'clean',
[
':build:test:vendor', | random_line_split |
goodsDetails.js | /**
* Created by 殿麒 on 2015/11/3.
*/
function GetQueryString(name)
{
var reg = new RegExp("(^|&)"+ name +"=([^&]*)(&|$)");
var r = window.location.search.substr(1).match(reg);
if(r!=null)return unescape(r[2]); return null;
}
purchase.controller('goodsDetails',function($rootScope,$scope,$location,$cookieStore,goodsCartcookie,purchasePost,getAccessInfo){
var self_url = GetQueryString("productId");
if(self_url!= undefined){
var productId = self_url;
}else{
var productId = $rootScope.GOODSINFO.productId;
}
var data = {
productId:productId,
sign:'sign',
accessInfo:getAccessInfo.accessInfo
}
var path = 'product/detail';
purchasePost.postData(data,path).success(function(data){
var imgs = data.productInfo.images;
// 这里因为数组中是字符串不是对象所以需要想将字符串变为对象
var imgArr = [];
for(var i = 0,len = imgs.length; i < len;i++){
var obj = {src:imgs[i]};
imgArr.push(obj);
}
$scope.imgs = imgArr;
$scope.goodsName = data.productInfo.title; | console.log(data);
$scope.goodsInfo = data.productInfo;
});
$scope.addGoodscart = function(){
var goodscart_list = $cookieStore.get('goodscart_list');
$rootScope.GOODSCART_NUM += 1;
$rootScope.GOODSCART_MONEY += $scope.goodsInfo.price;
// 添加cookie
goodsCartcookie.add_goodsCart_cookie(goodscart_list,$scope.goodsInfo);
}
});
purchase.controller('comment',function($rootScope,$scope,$cookieStore,purchasePost){
var self_url = GetQueryString("productId");
if(self_url!= undefined){
var productId = self_url;
}else{
var productId = $rootScope.GOODSINFO.productId;
}
var requestPageInfo = {
pageNo:1,
pageSize:2
};
var shopId = $rootScope.GOODSINFO.shopId || GetQueryString("shopId") || null;
var data = {
shopId:shopId,
requestPageInfo:requestPageInfo
}
var path = 'shop/reviewList';
purchasePost.postData(data,path).success(function(data){
$scope.totalCount = data["responsePageInfo"].totalCount;
$scope.item_respList = data["item_respList"];
});
}); | $scope.saleNum = data.productInfo.salesCnt;
$scope.limitBuy = data.productInfo.subTitle;
$scope.nowSale = data.productInfo.price;
$scope.originSale = data.productInfo.marketPrice || $scope.nowSale; | random_line_split |
goodsDetails.js | /**
* Created by 殿麒 on 2015/11/3.
*/
function GetQueryString(name)
{
| chase.controller('goodsDetails',function($rootScope,$scope,$location,$cookieStore,goodsCartcookie,purchasePost,getAccessInfo){
var self_url = GetQueryString("productId");
if(self_url!= undefined){
var productId = self_url;
}else{
var productId = $rootScope.GOODSINFO.productId;
}
var data = {
productId:productId,
sign:'sign',
accessInfo:getAccessInfo.accessInfo
}
var path = 'product/detail';
purchasePost.postData(data,path).success(function(data){
var imgs = data.productInfo.images;
// 这里因为数组中是字符串不是对象所以需要想将字符串变为对象
var imgArr = [];
for(var i = 0,len = imgs.length; i < len;i++){
var obj = {src:imgs[i]};
imgArr.push(obj);
}
$scope.imgs = imgArr;
$scope.goodsName = data.productInfo.title;
$scope.saleNum = data.productInfo.salesCnt;
$scope.limitBuy = data.productInfo.subTitle;
$scope.nowSale = data.productInfo.price;
$scope.originSale = data.productInfo.marketPrice || $scope.nowSale;
console.log(data);
$scope.goodsInfo = data.productInfo;
});
$scope.addGoodscart = function(){
var goodscart_list = $cookieStore.get('goodscart_list');
$rootScope.GOODSCART_NUM += 1;
$rootScope.GOODSCART_MONEY += $scope.goodsInfo.price;
// 添加cookie
goodsCartcookie.add_goodsCart_cookie(goodscart_list,$scope.goodsInfo);
}
});
purchase.controller('comment',function($rootScope,$scope,$cookieStore,purchasePost){
var self_url = GetQueryString("productId");
if(self_url!= undefined){
var productId = self_url;
}else{
var productId = $rootScope.GOODSINFO.productId;
}
var requestPageInfo = {
pageNo:1,
pageSize:2
};
var shopId = $rootScope.GOODSINFO.shopId || GetQueryString("shopId") || null;
var data = {
shopId:shopId,
requestPageInfo:requestPageInfo
}
var path = 'shop/reviewList';
purchasePost.postData(data,path).success(function(data){
$scope.totalCount = data["responsePageInfo"].totalCount;
$scope.item_respList = data["item_respList"];
});
}); | var reg = new RegExp("(^|&)"+ name +"=([^&]*)(&|$)");
var r = window.location.search.substr(1).match(reg);
if(r!=null)return unescape(r[2]); return null;
}
pur | identifier_body |
goodsDetails.js | /**
* Created by 殿麒 on 2015/11/3.
*/
function GetQ | e)
{
var reg = new RegExp("(^|&)"+ name +"=([^&]*)(&|$)");
var r = window.location.search.substr(1).match(reg);
if(r!=null)return unescape(r[2]); return null;
}
purchase.controller('goodsDetails',function($rootScope,$scope,$location,$cookieStore,goodsCartcookie,purchasePost,getAccessInfo){
var self_url = GetQueryString("productId");
if(self_url!= undefined){
var productId = self_url;
}else{
var productId = $rootScope.GOODSINFO.productId;
}
var data = {
productId:productId,
sign:'sign',
accessInfo:getAccessInfo.accessInfo
}
var path = 'product/detail';
purchasePost.postData(data,path).success(function(data){
var imgs = data.productInfo.images;
// 这里因为数组中是字符串不是对象所以需要想将字符串变为对象
var imgArr = [];
for(var i = 0,len = imgs.length; i < len;i++){
var obj = {src:imgs[i]};
imgArr.push(obj);
}
$scope.imgs = imgArr;
$scope.goodsName = data.productInfo.title;
$scope.saleNum = data.productInfo.salesCnt;
$scope.limitBuy = data.productInfo.subTitle;
$scope.nowSale = data.productInfo.price;
$scope.originSale = data.productInfo.marketPrice || $scope.nowSale;
console.log(data);
$scope.goodsInfo = data.productInfo;
});
$scope.addGoodscart = function(){
var goodscart_list = $cookieStore.get('goodscart_list');
$rootScope.GOODSCART_NUM += 1;
$rootScope.GOODSCART_MONEY += $scope.goodsInfo.price;
// 添加cookie
goodsCartcookie.add_goodsCart_cookie(goodscart_list,$scope.goodsInfo);
}
});
purchase.controller('comment',function($rootScope,$scope,$cookieStore,purchasePost){
var self_url = GetQueryString("productId");
if(self_url!= undefined){
var productId = self_url;
}else{
var productId = $rootScope.GOODSINFO.productId;
}
var requestPageInfo = {
pageNo:1,
pageSize:2
};
var shopId = $rootScope.GOODSINFO.shopId || GetQueryString("shopId") || null;
var data = {
shopId:shopId,
requestPageInfo:requestPageInfo
}
var path = 'shop/reviewList';
purchasePost.postData(data,path).success(function(data){
$scope.totalCount = data["responsePageInfo"].totalCount;
$scope.item_respList = data["item_respList"];
});
}); | ueryString(nam | identifier_name |
goodsDetails.js | /**
* Created by 殿麒 on 2015/11/3.
*/
function GetQueryString(name)
{
var reg = new RegExp("(^|&)"+ name +"=([^&]*)(&|$)");
var r = window.location.search.substr(1).match(reg);
if(r!=null)return unescape(r[2]); return null;
}
purchase.controller('goodsDetails',function($rootScope,$scope,$location,$cookieStore,goodsCartcookie,purchasePost,getAccessInfo){
var self_url = GetQueryString("productId");
if(self_url!= undefined){
var productId = self_url;
}else{
| var data = {
productId:productId,
sign:'sign',
accessInfo:getAccessInfo.accessInfo
}
var path = 'product/detail';
purchasePost.postData(data,path).success(function(data){
var imgs = data.productInfo.images;
// 这里因为数组中是字符串不是对象所以需要想将字符串变为对象
var imgArr = [];
for(var i = 0,len = imgs.length; i < len;i++){
var obj = {src:imgs[i]};
imgArr.push(obj);
}
$scope.imgs = imgArr;
$scope.goodsName = data.productInfo.title;
$scope.saleNum = data.productInfo.salesCnt;
$scope.limitBuy = data.productInfo.subTitle;
$scope.nowSale = data.productInfo.price;
$scope.originSale = data.productInfo.marketPrice || $scope.nowSale;
console.log(data);
$scope.goodsInfo = data.productInfo;
});
$scope.addGoodscart = function(){
var goodscart_list = $cookieStore.get('goodscart_list');
$rootScope.GOODSCART_NUM += 1;
$rootScope.GOODSCART_MONEY += $scope.goodsInfo.price;
// 添加cookie
goodsCartcookie.add_goodsCart_cookie(goodscart_list,$scope.goodsInfo);
}
});
purchase.controller('comment',function($rootScope,$scope,$cookieStore,purchasePost){
var self_url = GetQueryString("productId");
if(self_url!= undefined){
var productId = self_url;
}else{
var productId = $rootScope.GOODSINFO.productId;
}
var requestPageInfo = {
pageNo:1,
pageSize:2
};
var shopId = $rootScope.GOODSINFO.shopId || GetQueryString("shopId") || null;
var data = {
shopId:shopId,
requestPageInfo:requestPageInfo
}
var path = 'shop/reviewList';
purchasePost.postData(data,path).success(function(data){
$scope.totalCount = data["responsePageInfo"].totalCount;
$scope.item_respList = data["item_respList"];
});
}); | var productId = $rootScope.GOODSINFO.productId;
}
| conditional_block |
package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyAzureMgmtDeploymentmanager(PythonPackage):
"""Microsoft Azure Deployment Manager Client Library for Python."""
homepage = "https://github.com/Azure/azure-sdk-for-python"
pypi = "azure-mgmt-deploymentmanager/azure-mgmt-deploymentmanager-0.2.0.zip"
version('0.2.0', sha256='46e342227993fc9acab1dda42f2eb566b522a8c945ab9d0eea56276b46f6d730')
depends_on('py-setuptools', type='build')
depends_on('py-msrest@0.5.0:', type=('build', 'run'))
depends_on('py-msrestazure@0.4.32:1', type=('build', 'run')) | depends_on('py-azure-common@1.1:1', type=('build', 'run'))
depends_on('py-azure-mgmt-nspkg', when='^python@:2', type=('build', 'run')) | random_line_split | |
package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyAzureMgmtDeploymentmanager(PythonPackage):
| """Microsoft Azure Deployment Manager Client Library for Python."""
homepage = "https://github.com/Azure/azure-sdk-for-python"
pypi = "azure-mgmt-deploymentmanager/azure-mgmt-deploymentmanager-0.2.0.zip"
version('0.2.0', sha256='46e342227993fc9acab1dda42f2eb566b522a8c945ab9d0eea56276b46f6d730')
depends_on('py-setuptools', type='build')
depends_on('py-msrest@0.5.0:', type=('build', 'run'))
depends_on('py-msrestazure@0.4.32:1', type=('build', 'run'))
depends_on('py-azure-common@1.1:1', type=('build', 'run'))
depends_on('py-azure-mgmt-nspkg', when='^python@:2', type=('build', 'run')) | identifier_body | |
package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class | (PythonPackage):
"""Microsoft Azure Deployment Manager Client Library for Python."""
homepage = "https://github.com/Azure/azure-sdk-for-python"
pypi = "azure-mgmt-deploymentmanager/azure-mgmt-deploymentmanager-0.2.0.zip"
version('0.2.0', sha256='46e342227993fc9acab1dda42f2eb566b522a8c945ab9d0eea56276b46f6d730')
depends_on('py-setuptools', type='build')
depends_on('py-msrest@0.5.0:', type=('build', 'run'))
depends_on('py-msrestazure@0.4.32:1', type=('build', 'run'))
depends_on('py-azure-common@1.1:1', type=('build', 'run'))
depends_on('py-azure-mgmt-nspkg', when='^python@:2', type=('build', 'run'))
| PyAzureMgmtDeploymentmanager | identifier_name |
tabs.py | """
This module is essentially a broker to xmodule/tabs.py -- it was originally introduced to
perform some LMS-specific tab display gymnastics for the Entrance Exams feature
"""
from django.conf import settings
from django.utils.translation import ugettext as _, ugettext_noop
from courseware.access import has_access
from courseware.entrance_exams import user_must_complete_entrance_exam
from student.models import UserProfile
from openedx.core.lib.course_tabs import CourseTabPluginManager
from student.models import CourseEnrollment
from xmodule.tabs import CourseTab, CourseTabList, key_checker
from xmodule.tabs import StaticTab
class EnrolledTab(CourseTab):
"""
A base class for any view types that require a user to be enrolled.
"""
@classmethod
def is_enabled(cls, course, user=None):
if user is None:
return True
return bool(CourseEnrollment.is_enrolled(user, course.id) or has_access(user, 'staff', course, course.id))
class CoursewareTab(EnrolledTab):
"""
The main courseware view.
"""
type = 'courseware'
title = ugettext_noop('Courseware')
priority = 10
view_name = 'courseware'
is_movable = False
is_default = False
is_visible_to_sneak_peek = True
class CourseInfoTab(CourseTab):
"""
The course info view.
"""
type = 'course_info'
title = ugettext_noop('Course Info')
priority = 20
view_name = 'info'
tab_id = 'info'
is_movable = False
is_default = False
is_visible_to_sneak_peek = True
@classmethod
def is_enabled(cls, course, user=None):
return True
class SyllabusTab(EnrolledTab):
"""
A tab for the course syllabus.
"""
type = 'syllabus'
title = ugettext_noop('Syllabus')
priority = 30
view_name = 'syllabus'
allow_multiple = True
is_default = False
is_visible_to_sneak_peek = True
@classmethod
def is_enabled(cls, course, user=None):
if not super(SyllabusTab, cls).is_enabled(course, user=user):
return False
return getattr(course, 'syllabus_present', False)
class ProgressTab(EnrolledTab):
"""
The course progress view.
"""
type = 'progress'
title = ugettext_noop('Progress')
priority = 40
view_name = 'progress'
is_hideable = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ProgressTab, cls).is_enabled(course, user=user):
return False
return not course.hide_progress_tab
class TextbookTabsBase(CourseTab):
"""
Abstract class for textbook collection tabs classes.
"""
# Translators: 'Textbooks' refers to the tab in the course that leads to the course' textbooks
title = ugettext_noop("Textbooks")
is_collection = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return user is None or user.is_authenticated()
@classmethod
def items(cls, course):
"""
A generator for iterating through all the SingleTextbookTab book objects associated with this
collection of textbooks.
"""
raise NotImplementedError()
class TextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all textbook tabs.
"""
type = 'textbooks'
priority = None
view_name = 'book'
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
parent_is_enabled = super(TextbookTabs, cls).is_enabled(course, user)
return settings.FEATURES.get('ENABLE_TEXTBOOK') and parent_is_enabled
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.textbooks):
yield SingleTextbookTab(
name=textbook.title,
tab_id='textbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class PDFTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all PDF textbook tabs.
"""
type = 'pdf_textbooks'
priority = None
view_name = 'pdf_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.pdf_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='pdftextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class HtmlTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all Html textbook tabs.
"""
type = 'html_textbooks'
priority = None
view_name = 'html_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.html_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='htmltextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class LinkTab(CourseTab):
"""
Abstract class for tabs that contain external links.
"""
link_value = ''
def __init__(self, tab_dict=None, name=None, link=None):
self.link_value = tab_dict['link'] if tab_dict else link
def link_value_func(_course, _reverse_func):
""" Returns the link_value as the link. """
return self.link_value
self.type = tab_dict['type']
tab_dict['link_func'] = link_value_func
super(LinkTab, self).__init__(tab_dict)
def __getitem__(self, key):
if key == 'link':
return self.link_value
else:
return super(LinkTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'link':
self.link_value = value
else:
super(LinkTab, self).__setitem__(key, value)
def to_json(self):
to_json_val = super(LinkTab, self).to_json()
to_json_val.update({'link': self.link_value})
return to_json_val
def __eq__(self, other):
if not super(LinkTab, self).__eq__(other):
return False
return self.link_value == other.get('link')
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return True
class ExternalDiscussionCourseTab(LinkTab):
"""
A course tab that links to an external discussion service.
"""
type = 'external_discussion'
# Translators: 'Discussion' refers to the tab in the courseware that leads to the discussion forums
title = ugettext_noop('Discussion')
priority = None
is_default = False
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalDiscussionCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link'])(tab_dict, raise_error))
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ExternalDiscussionCourseTab, cls).is_enabled(course, user=user):
return False
return course.discussion_link
class ExternalLinkCourseTab(LinkTab):
"""
A course tab containing an external link.
"""
type = 'external_link'
priority = None
is_default = False # An external link tab is not added to a course by default
allow_multiple = True
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalLinkCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link', 'name'])(tab_dict, raise_error))
class SingleTextbookTab(CourseTab):
"""
A tab representing a single textbook. It is created temporarily when enumerating all textbooks within a
Textbook collection tab. It should not be serialized or persisted.
"""
type = 'single_textbook'
is_movable = False
is_collection_item = True
priority = None
def __init__(self, name, tab_id, view_name, index):
def link_func(course, reverse_func, index=index):
""" Constructs a link for textbooks from a view name, a course, and an index. """
return reverse_func(view_name, args=[unicode(course.id), index])
tab_dict = dict()
tab_dict['name'] = name
tab_dict['tab_id'] = tab_id
tab_dict['link_func'] = link_func
super(SingleTextbookTab, self).__init__(tab_dict)
def to_json(self):
raise NotImplementedError('SingleTextbookTab should not be serialized.')
def get_course_tab_list(request, course):
"""
Retrieves the course tab list from xmodule.tabs and manipulates the set as necessary
"""
user = request.user
is_user_enrolled = user.is_authenticated() and CourseEnrollment.is_enrolled(user, course.id)
xmodule_tab_list = CourseTabList.iterate_displayable(
course,
user=user,
settings=settings,
is_user_authenticated=user.is_authenticated(),
is_user_staff=has_access(user, 'staff', course, course.id),
is_user_enrolled=is_user_enrolled,
is_user_sneakpeek=not UserProfile.has_registered(user),
)
# Now that we've loaded the tabs for this course, perform the Entrance Exam work.
# If the user has to take an entrance exam, we'll need to hide away all but the
# "Courseware" tab. The tab is then renamed as "Entrance Exam".
course_tab_list = []
for tab in xmodule_tab_list:
if user_must_complete_entrance_exam(request, user, course):
# Hide all of the tabs except for 'Courseware'
# Rename 'Courseware' tab to 'Entrance Exam'
if tab.type is not 'courseware':
continue
tab.name = _("Entrance Exam")
course_tab_list.append(tab)
# Add in any dynamic tabs, i.e. those that are not persisted
course_tab_list += _get_dynamic_tabs(course, user)
return course_tab_list
def _get_dynamic_tabs(course, user):
"""
Returns the dynamic tab types for the current user.
Note: dynamic tabs are those that are not persisted in the course, but are
instead added dynamically based upon the user's role.
"""
dynamic_tabs = list()
for tab_type in CourseTabPluginManager.get_tab_types():
if getattr(tab_type, "is_dynamic", False):
|
dynamic_tabs.sort(key=lambda dynamic_tab: dynamic_tab.name)
return dynamic_tabs
| tab = tab_type(dict())
if tab.is_enabled(course, user=user):
dynamic_tabs.append(tab) | conditional_block |
tabs.py | """
This module is essentially a broker to xmodule/tabs.py -- it was originally introduced to
perform some LMS-specific tab display gymnastics for the Entrance Exams feature
"""
from django.conf import settings
from django.utils.translation import ugettext as _, ugettext_noop
from courseware.access import has_access
from courseware.entrance_exams import user_must_complete_entrance_exam
from student.models import UserProfile
from openedx.core.lib.course_tabs import CourseTabPluginManager
from student.models import CourseEnrollment
from xmodule.tabs import CourseTab, CourseTabList, key_checker
from xmodule.tabs import StaticTab
class EnrolledTab(CourseTab):
"""
A base class for any view types that require a user to be enrolled.
"""
@classmethod
def is_enabled(cls, course, user=None):
if user is None:
return True
return bool(CourseEnrollment.is_enrolled(user, course.id) or has_access(user, 'staff', course, course.id))
class CoursewareTab(EnrolledTab):
"""
The main courseware view.
"""
type = 'courseware'
title = ugettext_noop('Courseware')
priority = 10
view_name = 'courseware'
is_movable = False
is_default = False
is_visible_to_sneak_peek = True
class CourseInfoTab(CourseTab):
"""
The course info view.
"""
type = 'course_info'
title = ugettext_noop('Course Info')
priority = 20
view_name = 'info'
tab_id = 'info'
is_movable = False
is_default = False
is_visible_to_sneak_peek = True
@classmethod
def is_enabled(cls, course, user=None):
return True
class SyllabusTab(EnrolledTab):
"""
A tab for the course syllabus.
"""
type = 'syllabus'
title = ugettext_noop('Syllabus')
priority = 30
view_name = 'syllabus'
allow_multiple = True
is_default = False
is_visible_to_sneak_peek = True
@classmethod
def is_enabled(cls, course, user=None):
if not super(SyllabusTab, cls).is_enabled(course, user=user):
return False
return getattr(course, 'syllabus_present', False)
class ProgressTab(EnrolledTab):
"""
The course progress view.
"""
type = 'progress'
title = ugettext_noop('Progress')
priority = 40
view_name = 'progress'
is_hideable = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ProgressTab, cls).is_enabled(course, user=user):
return False
return not course.hide_progress_tab
class TextbookTabsBase(CourseTab):
"""
Abstract class for textbook collection tabs classes.
"""
# Translators: 'Textbooks' refers to the tab in the course that leads to the course' textbooks
title = ugettext_noop("Textbooks")
is_collection = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return user is None or user.is_authenticated()
@classmethod
def items(cls, course):
"""
A generator for iterating through all the SingleTextbookTab book objects associated with this
collection of textbooks.
"""
raise NotImplementedError()
class TextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all textbook tabs.
"""
type = 'textbooks'
priority = None
view_name = 'book'
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
parent_is_enabled = super(TextbookTabs, cls).is_enabled(course, user)
return settings.FEATURES.get('ENABLE_TEXTBOOK') and parent_is_enabled
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.textbooks):
yield SingleTextbookTab(
name=textbook.title,
tab_id='textbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class PDFTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all PDF textbook tabs.
"""
type = 'pdf_textbooks'
priority = None
view_name = 'pdf_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.pdf_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='pdftextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class HtmlTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all Html textbook tabs.
"""
type = 'html_textbooks'
priority = None
view_name = 'html_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.html_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='htmltextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class LinkTab(CourseTab):
"""
Abstract class for tabs that contain external links.
"""
link_value = ''
def __init__(self, tab_dict=None, name=None, link=None):
self.link_value = tab_dict['link'] if tab_dict else link
def link_value_func(_course, _reverse_func):
""" Returns the link_value as the link. """
return self.link_value
self.type = tab_dict['type']
tab_dict['link_func'] = link_value_func
super(LinkTab, self).__init__(tab_dict)
def __getitem__(self, key):
if key == 'link':
return self.link_value
else:
return super(LinkTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'link':
self.link_value = value
else:
super(LinkTab, self).__setitem__(key, value)
def to_json(self):
to_json_val = super(LinkTab, self).to_json()
to_json_val.update({'link': self.link_value})
return to_json_val
def __eq__(self, other):
if not super(LinkTab, self).__eq__(other):
return False
return self.link_value == other.get('link')
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return True
class ExternalDiscussionCourseTab(LinkTab):
"""
A course tab that links to an external discussion service.
"""
type = 'external_discussion'
# Translators: 'Discussion' refers to the tab in the courseware that leads to the discussion forums
title = ugettext_noop('Discussion')
priority = None
is_default = False
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalDiscussionCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link'])(tab_dict, raise_error))
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ExternalDiscussionCourseTab, cls).is_enabled(course, user=user):
return False
return course.discussion_link
class ExternalLinkCourseTab(LinkTab):
|
class SingleTextbookTab(CourseTab):
"""
A tab representing a single textbook. It is created temporarily when enumerating all textbooks within a
Textbook collection tab. It should not be serialized or persisted.
"""
type = 'single_textbook'
is_movable = False
is_collection_item = True
priority = None
def __init__(self, name, tab_id, view_name, index):
def link_func(course, reverse_func, index=index):
""" Constructs a link for textbooks from a view name, a course, and an index. """
return reverse_func(view_name, args=[unicode(course.id), index])
tab_dict = dict()
tab_dict['name'] = name
tab_dict['tab_id'] = tab_id
tab_dict['link_func'] = link_func
super(SingleTextbookTab, self).__init__(tab_dict)
def to_json(self):
raise NotImplementedError('SingleTextbookTab should not be serialized.')
def get_course_tab_list(request, course):
"""
Retrieves the course tab list from xmodule.tabs and manipulates the set as necessary
"""
user = request.user
is_user_enrolled = user.is_authenticated() and CourseEnrollment.is_enrolled(user, course.id)
xmodule_tab_list = CourseTabList.iterate_displayable(
course,
user=user,
settings=settings,
is_user_authenticated=user.is_authenticated(),
is_user_staff=has_access(user, 'staff', course, course.id),
is_user_enrolled=is_user_enrolled,
is_user_sneakpeek=not UserProfile.has_registered(user),
)
# Now that we've loaded the tabs for this course, perform the Entrance Exam work.
# If the user has to take an entrance exam, we'll need to hide away all but the
# "Courseware" tab. The tab is then renamed as "Entrance Exam".
course_tab_list = []
for tab in xmodule_tab_list:
if user_must_complete_entrance_exam(request, user, course):
# Hide all of the tabs except for 'Courseware'
# Rename 'Courseware' tab to 'Entrance Exam'
if tab.type is not 'courseware':
continue
tab.name = _("Entrance Exam")
course_tab_list.append(tab)
# Add in any dynamic tabs, i.e. those that are not persisted
course_tab_list += _get_dynamic_tabs(course, user)
return course_tab_list
def _get_dynamic_tabs(course, user):
"""
Returns the dynamic tab types for the current user.
Note: dynamic tabs are those that are not persisted in the course, but are
instead added dynamically based upon the user's role.
"""
dynamic_tabs = list()
for tab_type in CourseTabPluginManager.get_tab_types():
if getattr(tab_type, "is_dynamic", False):
tab = tab_type(dict())
if tab.is_enabled(course, user=user):
dynamic_tabs.append(tab)
dynamic_tabs.sort(key=lambda dynamic_tab: dynamic_tab.name)
return dynamic_tabs
| """
A course tab containing an external link.
"""
type = 'external_link'
priority = None
is_default = False # An external link tab is not added to a course by default
allow_multiple = True
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalLinkCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link', 'name'])(tab_dict, raise_error)) | identifier_body |
tabs.py | """
This module is essentially a broker to xmodule/tabs.py -- it was originally introduced to
perform some LMS-specific tab display gymnastics for the Entrance Exams feature
"""
from django.conf import settings
from django.utils.translation import ugettext as _, ugettext_noop
from courseware.access import has_access
from courseware.entrance_exams import user_must_complete_entrance_exam
from student.models import UserProfile
from openedx.core.lib.course_tabs import CourseTabPluginManager
from student.models import CourseEnrollment
from xmodule.tabs import CourseTab, CourseTabList, key_checker
from xmodule.tabs import StaticTab
class EnrolledTab(CourseTab):
"""
A base class for any view types that require a user to be enrolled.
"""
@classmethod
def is_enabled(cls, course, user=None):
if user is None:
return True
return bool(CourseEnrollment.is_enrolled(user, course.id) or has_access(user, 'staff', course, course.id))
class CoursewareTab(EnrolledTab):
"""
The main courseware view.
"""
type = 'courseware'
title = ugettext_noop('Courseware')
priority = 10
view_name = 'courseware'
is_movable = False
is_default = False
is_visible_to_sneak_peek = True
class CourseInfoTab(CourseTab):
"""
The course info view.
"""
type = 'course_info'
title = ugettext_noop('Course Info')
priority = 20
view_name = 'info'
tab_id = 'info'
is_movable = False
is_default = False
is_visible_to_sneak_peek = True
@classmethod
def | (cls, course, user=None):
return True
class SyllabusTab(EnrolledTab):
"""
A tab for the course syllabus.
"""
type = 'syllabus'
title = ugettext_noop('Syllabus')
priority = 30
view_name = 'syllabus'
allow_multiple = True
is_default = False
is_visible_to_sneak_peek = True
@classmethod
def is_enabled(cls, course, user=None):
if not super(SyllabusTab, cls).is_enabled(course, user=user):
return False
return getattr(course, 'syllabus_present', False)
class ProgressTab(EnrolledTab):
"""
The course progress view.
"""
type = 'progress'
title = ugettext_noop('Progress')
priority = 40
view_name = 'progress'
is_hideable = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ProgressTab, cls).is_enabled(course, user=user):
return False
return not course.hide_progress_tab
class TextbookTabsBase(CourseTab):
"""
Abstract class for textbook collection tabs classes.
"""
# Translators: 'Textbooks' refers to the tab in the course that leads to the course' textbooks
title = ugettext_noop("Textbooks")
is_collection = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return user is None or user.is_authenticated()
@classmethod
def items(cls, course):
"""
A generator for iterating through all the SingleTextbookTab book objects associated with this
collection of textbooks.
"""
raise NotImplementedError()
class TextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all textbook tabs.
"""
type = 'textbooks'
priority = None
view_name = 'book'
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
parent_is_enabled = super(TextbookTabs, cls).is_enabled(course, user)
return settings.FEATURES.get('ENABLE_TEXTBOOK') and parent_is_enabled
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.textbooks):
yield SingleTextbookTab(
name=textbook.title,
tab_id='textbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class PDFTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all PDF textbook tabs.
"""
type = 'pdf_textbooks'
priority = None
view_name = 'pdf_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.pdf_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='pdftextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class HtmlTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all Html textbook tabs.
"""
type = 'html_textbooks'
priority = None
view_name = 'html_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.html_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='htmltextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class LinkTab(CourseTab):
"""
Abstract class for tabs that contain external links.
"""
link_value = ''
def __init__(self, tab_dict=None, name=None, link=None):
self.link_value = tab_dict['link'] if tab_dict else link
def link_value_func(_course, _reverse_func):
""" Returns the link_value as the link. """
return self.link_value
self.type = tab_dict['type']
tab_dict['link_func'] = link_value_func
super(LinkTab, self).__init__(tab_dict)
def __getitem__(self, key):
if key == 'link':
return self.link_value
else:
return super(LinkTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'link':
self.link_value = value
else:
super(LinkTab, self).__setitem__(key, value)
def to_json(self):
to_json_val = super(LinkTab, self).to_json()
to_json_val.update({'link': self.link_value})
return to_json_val
def __eq__(self, other):
if not super(LinkTab, self).__eq__(other):
return False
return self.link_value == other.get('link')
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return True
class ExternalDiscussionCourseTab(LinkTab):
"""
A course tab that links to an external discussion service.
"""
type = 'external_discussion'
# Translators: 'Discussion' refers to the tab in the courseware that leads to the discussion forums
title = ugettext_noop('Discussion')
priority = None
is_default = False
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalDiscussionCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link'])(tab_dict, raise_error))
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ExternalDiscussionCourseTab, cls).is_enabled(course, user=user):
return False
return course.discussion_link
class ExternalLinkCourseTab(LinkTab):
"""
A course tab containing an external link.
"""
type = 'external_link'
priority = None
is_default = False # An external link tab is not added to a course by default
allow_multiple = True
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalLinkCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link', 'name'])(tab_dict, raise_error))
class SingleTextbookTab(CourseTab):
"""
A tab representing a single textbook. It is created temporarily when enumerating all textbooks within a
Textbook collection tab. It should not be serialized or persisted.
"""
type = 'single_textbook'
is_movable = False
is_collection_item = True
priority = None
def __init__(self, name, tab_id, view_name, index):
def link_func(course, reverse_func, index=index):
""" Constructs a link for textbooks from a view name, a course, and an index. """
return reverse_func(view_name, args=[unicode(course.id), index])
tab_dict = dict()
tab_dict['name'] = name
tab_dict['tab_id'] = tab_id
tab_dict['link_func'] = link_func
super(SingleTextbookTab, self).__init__(tab_dict)
def to_json(self):
raise NotImplementedError('SingleTextbookTab should not be serialized.')
def get_course_tab_list(request, course):
"""
Retrieves the course tab list from xmodule.tabs and manipulates the set as necessary
"""
user = request.user
is_user_enrolled = user.is_authenticated() and CourseEnrollment.is_enrolled(user, course.id)
xmodule_tab_list = CourseTabList.iterate_displayable(
course,
user=user,
settings=settings,
is_user_authenticated=user.is_authenticated(),
is_user_staff=has_access(user, 'staff', course, course.id),
is_user_enrolled=is_user_enrolled,
is_user_sneakpeek=not UserProfile.has_registered(user),
)
# Now that we've loaded the tabs for this course, perform the Entrance Exam work.
# If the user has to take an entrance exam, we'll need to hide away all but the
# "Courseware" tab. The tab is then renamed as "Entrance Exam".
course_tab_list = []
for tab in xmodule_tab_list:
if user_must_complete_entrance_exam(request, user, course):
# Hide all of the tabs except for 'Courseware'
# Rename 'Courseware' tab to 'Entrance Exam'
if tab.type is not 'courseware':
continue
tab.name = _("Entrance Exam")
course_tab_list.append(tab)
# Add in any dynamic tabs, i.e. those that are not persisted
course_tab_list += _get_dynamic_tabs(course, user)
return course_tab_list
def _get_dynamic_tabs(course, user):
"""
Returns the dynamic tab types for the current user.
Note: dynamic tabs are those that are not persisted in the course, but are
instead added dynamically based upon the user's role.
"""
dynamic_tabs = list()
for tab_type in CourseTabPluginManager.get_tab_types():
if getattr(tab_type, "is_dynamic", False):
tab = tab_type(dict())
if tab.is_enabled(course, user=user):
dynamic_tabs.append(tab)
dynamic_tabs.sort(key=lambda dynamic_tab: dynamic_tab.name)
return dynamic_tabs
| is_enabled | identifier_name |
tabs.py | """
This module is essentially a broker to xmodule/tabs.py -- it was originally introduced to
perform some LMS-specific tab display gymnastics for the Entrance Exams feature
"""
from django.conf import settings
from django.utils.translation import ugettext as _, ugettext_noop
from courseware.access import has_access
from courseware.entrance_exams import user_must_complete_entrance_exam
from student.models import UserProfile
from openedx.core.lib.course_tabs import CourseTabPluginManager
from student.models import CourseEnrollment
from xmodule.tabs import CourseTab, CourseTabList, key_checker
from xmodule.tabs import StaticTab
class EnrolledTab(CourseTab):
"""
A base class for any view types that require a user to be enrolled.
"""
@classmethod
def is_enabled(cls, course, user=None):
if user is None:
return True
return bool(CourseEnrollment.is_enrolled(user, course.id) or has_access(user, 'staff', course, course.id))
class CoursewareTab(EnrolledTab):
"""
The main courseware view.
"""
type = 'courseware'
title = ugettext_noop('Courseware')
priority = 10
view_name = 'courseware'
is_movable = False
is_default = False
is_visible_to_sneak_peek = True
class CourseInfoTab(CourseTab):
"""
The course info view.
"""
type = 'course_info'
title = ugettext_noop('Course Info')
priority = 20
view_name = 'info'
tab_id = 'info'
is_movable = False
is_default = False
is_visible_to_sneak_peek = True
@classmethod
def is_enabled(cls, course, user=None):
return True
class SyllabusTab(EnrolledTab):
"""
A tab for the course syllabus.
"""
type = 'syllabus'
title = ugettext_noop('Syllabus')
priority = 30
view_name = 'syllabus'
allow_multiple = True
is_default = False
is_visible_to_sneak_peek = True
@classmethod
def is_enabled(cls, course, user=None):
if not super(SyllabusTab, cls).is_enabled(course, user=user):
return False
return getattr(course, 'syllabus_present', False)
class ProgressTab(EnrolledTab):
"""
The course progress view.
"""
type = 'progress'
title = ugettext_noop('Progress')
priority = 40
view_name = 'progress'
is_hideable = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ProgressTab, cls).is_enabled(course, user=user):
return False
return not course.hide_progress_tab
class TextbookTabsBase(CourseTab):
"""
Abstract class for textbook collection tabs classes.
"""
# Translators: 'Textbooks' refers to the tab in the course that leads to the course' textbooks
title = ugettext_noop("Textbooks")
is_collection = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return user is None or user.is_authenticated()
@classmethod
def items(cls, course):
"""
A generator for iterating through all the SingleTextbookTab book objects associated with this
collection of textbooks.
"""
raise NotImplementedError()
class TextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all textbook tabs.
"""
type = 'textbooks'
priority = None
view_name = 'book'
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
parent_is_enabled = super(TextbookTabs, cls).is_enabled(course, user)
return settings.FEATURES.get('ENABLE_TEXTBOOK') and parent_is_enabled
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.textbooks):
yield SingleTextbookTab(
name=textbook.title,
tab_id='textbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class PDFTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all PDF textbook tabs.
"""
type = 'pdf_textbooks'
priority = None
view_name = 'pdf_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.pdf_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='pdftextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class HtmlTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all Html textbook tabs.
"""
type = 'html_textbooks'
priority = None
view_name = 'html_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.html_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='htmltextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class LinkTab(CourseTab):
"""
Abstract class for tabs that contain external links.
"""
link_value = ''
def __init__(self, tab_dict=None, name=None, link=None):
self.link_value = tab_dict['link'] if tab_dict else link
def link_value_func(_course, _reverse_func):
""" Returns the link_value as the link. """
return self.link_value
self.type = tab_dict['type']
tab_dict['link_func'] = link_value_func
super(LinkTab, self).__init__(tab_dict)
def __getitem__(self, key):
if key == 'link':
return self.link_value
else:
return super(LinkTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'link':
self.link_value = value
else:
super(LinkTab, self).__setitem__(key, value)
def to_json(self):
to_json_val = super(LinkTab, self).to_json()
to_json_val.update({'link': self.link_value})
return to_json_val
def __eq__(self, other):
if not super(LinkTab, self).__eq__(other):
return False
return self.link_value == other.get('link')
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return True
class ExternalDiscussionCourseTab(LinkTab):
"""
A course tab that links to an external discussion service.
"""
type = 'external_discussion'
# Translators: 'Discussion' refers to the tab in the courseware that leads to the discussion forums
title = ugettext_noop('Discussion')
priority = None
is_default = False
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalDiscussionCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link'])(tab_dict, raise_error))
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ExternalDiscussionCourseTab, cls).is_enabled(course, user=user):
return False
return course.discussion_link
class ExternalLinkCourseTab(LinkTab):
"""
A course tab containing an external link.
"""
type = 'external_link'
priority = None
is_default = False # An external link tab is not added to a course by default
allow_multiple = True
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalLinkCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link', 'name'])(tab_dict, raise_error))
class SingleTextbookTab(CourseTab):
"""
A tab representing a single textbook. It is created temporarily when enumerating all textbooks within a
Textbook collection tab. It should not be serialized or persisted.
"""
type = 'single_textbook'
is_movable = False
is_collection_item = True
priority = None
def __init__(self, name, tab_id, view_name, index):
def link_func(course, reverse_func, index=index):
""" Constructs a link for textbooks from a view name, a course, and an index. """
return reverse_func(view_name, args=[unicode(course.id), index])
tab_dict = dict()
tab_dict['name'] = name
tab_dict['tab_id'] = tab_id
tab_dict['link_func'] = link_func
super(SingleTextbookTab, self).__init__(tab_dict)
def to_json(self):
raise NotImplementedError('SingleTextbookTab should not be serialized.')
def get_course_tab_list(request, course):
"""
Retrieves the course tab list from xmodule.tabs and manipulates the set as necessary
"""
user = request.user
is_user_enrolled = user.is_authenticated() and CourseEnrollment.is_enrolled(user, course.id)
xmodule_tab_list = CourseTabList.iterate_displayable(
course,
user=user,
settings=settings,
is_user_authenticated=user.is_authenticated(),
is_user_staff=has_access(user, 'staff', course, course.id),
is_user_enrolled=is_user_enrolled,
is_user_sneakpeek=not UserProfile.has_registered(user),
)
# Now that we've loaded the tabs for this course, perform the Entrance Exam work.
# If the user has to take an entrance exam, we'll need to hide away all but the
# "Courseware" tab. The tab is then renamed as "Entrance Exam".
course_tab_list = []
for tab in xmodule_tab_list: | # Rename 'Courseware' tab to 'Entrance Exam'
if tab.type is not 'courseware':
continue
tab.name = _("Entrance Exam")
course_tab_list.append(tab)
# Add in any dynamic tabs, i.e. those that are not persisted
course_tab_list += _get_dynamic_tabs(course, user)
return course_tab_list
def _get_dynamic_tabs(course, user):
"""
Returns the dynamic tab types for the current user.
Note: dynamic tabs are those that are not persisted in the course, but are
instead added dynamically based upon the user's role.
"""
dynamic_tabs = list()
for tab_type in CourseTabPluginManager.get_tab_types():
if getattr(tab_type, "is_dynamic", False):
tab = tab_type(dict())
if tab.is_enabled(course, user=user):
dynamic_tabs.append(tab)
dynamic_tabs.sort(key=lambda dynamic_tab: dynamic_tab.name)
return dynamic_tabs | if user_must_complete_entrance_exam(request, user, course):
# Hide all of the tabs except for 'Courseware' | random_line_split |
ModelSpec.js | var Model = require('../lib/Model');
var Properties = require('../lib/Properties');
describe('Model', function() {
it('should assign properties to the object', function () {
var aModel = new Model('test', {
a: Properties.string
}, {a: 'test'});
expect(aModel.a).toEqual('test');
});
it('should rename properties with specified attribute', function () {
var aModel = new Model('test', {
a: Properties.string('b')
}, {b: 'test'});
expect(aModel.a).toEqual('test');
expect(aModel.b).toBeUndefined();
});
it('should have a method toString that returns the model name', function () {
var aModel = new Model('test', {
a: Properties.string
}, {a: 'test'});
expect(aModel.toString()).toEqual('[test]');
});
it('should have a method toJSON that returns a JSON representation of model', function () {
var aModel = new Model('test', {
a: Properties.string
}, {a: 'test'});
expect(aModel.toJSON()).toEqual({a: 'test'});
});
| }); | random_line_split | |
get-with-headers.py | #!/usr/bin/env python
"""This does HTTP GET requests given a host:port and path and returns
a subset of the headers plus the body of the result."""
from __future__ import absolute_import, print_function
import json
import os
import sys
from edenscm.mercurial import util
httplib = util.httplib
try:
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
except ImportError:
pass
twice = False
if "--twice" in sys.argv:
sys.argv.remove("--twice")
twice = True
headeronly = False
if "--headeronly" in sys.argv:
sys.argv.remove("--headeronly")
headeronly = True
formatjson = False
if "--json" in sys.argv:
sys.argv.remove("--json")
formatjson = True
hgproto = None
if "--hgproto" in sys.argv:
idx = sys.argv.index("--hgproto")
hgproto = sys.argv[idx + 1]
sys.argv.pop(idx)
sys.argv.pop(idx)
tag = None
def | (host, path, show):
assert not path.startswith("/"), path
global tag
headers = {}
if tag:
headers["If-None-Match"] = tag
if hgproto:
headers["X-HgProto-1"] = hgproto
conn = httplib.HTTPConnection(host)
conn.request("GET", "/" + path, None, headers)
response = conn.getresponse()
print(response.status, response.reason)
if show[:1] == ["-"]:
show = sorted(h for h, v in response.getheaders() if h.lower() not in show)
for h in [h.lower() for h in show]:
if response.getheader(h, None) is not None:
print("%s: %s" % (h, response.getheader(h)))
if not headeronly:
print()
data = response.read()
# Pretty print JSON. This also has the beneficial side-effect
# of verifying emitted JSON is well-formed.
if formatjson:
# json.dumps() will print trailing newlines. Eliminate them
# to make tests easier to write.
data = json.loads(data)
lines = json.dumps(data, sort_keys=True, indent=2).splitlines()
for line in lines:
print(line.rstrip())
else:
sys.stdout.write(data)
if twice and response.getheader("ETag", None):
tag = response.getheader("ETag")
return response.status
status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
if twice:
status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
if 200 <= status <= 305:
sys.exit(0)
sys.exit(1)
| request | identifier_name |
get-with-headers.py | #!/usr/bin/env python
"""This does HTTP GET requests given a host:port and path and returns
a subset of the headers plus the body of the result."""
from __future__ import absolute_import, print_function
import json
import os
import sys
from edenscm.mercurial import util
httplib = util.httplib
try:
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
except ImportError:
pass
twice = False
if "--twice" in sys.argv:
sys.argv.remove("--twice")
twice = True
headeronly = False
if "--headeronly" in sys.argv:
sys.argv.remove("--headeronly")
headeronly = True
formatjson = False
if "--json" in sys.argv:
sys.argv.remove("--json")
formatjson = True
hgproto = None
if "--hgproto" in sys.argv:
idx = sys.argv.index("--hgproto")
hgproto = sys.argv[idx + 1]
sys.argv.pop(idx)
sys.argv.pop(idx)
tag = None
def request(host, path, show):
|
status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
if twice:
status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
if 200 <= status <= 305:
sys.exit(0)
sys.exit(1)
| assert not path.startswith("/"), path
global tag
headers = {}
if tag:
headers["If-None-Match"] = tag
if hgproto:
headers["X-HgProto-1"] = hgproto
conn = httplib.HTTPConnection(host)
conn.request("GET", "/" + path, None, headers)
response = conn.getresponse()
print(response.status, response.reason)
if show[:1] == ["-"]:
show = sorted(h for h, v in response.getheaders() if h.lower() not in show)
for h in [h.lower() for h in show]:
if response.getheader(h, None) is not None:
print("%s: %s" % (h, response.getheader(h)))
if not headeronly:
print()
data = response.read()
# Pretty print JSON. This also has the beneficial side-effect
# of verifying emitted JSON is well-formed.
if formatjson:
# json.dumps() will print trailing newlines. Eliminate them
# to make tests easier to write.
data = json.loads(data)
lines = json.dumps(data, sort_keys=True, indent=2).splitlines()
for line in lines:
print(line.rstrip())
else:
sys.stdout.write(data)
if twice and response.getheader("ETag", None):
tag = response.getheader("ETag")
return response.status | identifier_body |
get-with-headers.py | #!/usr/bin/env python
"""This does HTTP GET requests given a host:port and path and returns
a subset of the headers plus the body of the result."""
from __future__ import absolute_import, print_function
import json
import os
import sys
from edenscm.mercurial import util
httplib = util.httplib
try:
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
except ImportError:
pass
twice = False
if "--twice" in sys.argv:
sys.argv.remove("--twice") | if "--headeronly" in sys.argv:
sys.argv.remove("--headeronly")
headeronly = True
formatjson = False
if "--json" in sys.argv:
sys.argv.remove("--json")
formatjson = True
hgproto = None
if "--hgproto" in sys.argv:
idx = sys.argv.index("--hgproto")
hgproto = sys.argv[idx + 1]
sys.argv.pop(idx)
sys.argv.pop(idx)
tag = None
def request(host, path, show):
assert not path.startswith("/"), path
global tag
headers = {}
if tag:
headers["If-None-Match"] = tag
if hgproto:
headers["X-HgProto-1"] = hgproto
conn = httplib.HTTPConnection(host)
conn.request("GET", "/" + path, None, headers)
response = conn.getresponse()
print(response.status, response.reason)
if show[:1] == ["-"]:
show = sorted(h for h, v in response.getheaders() if h.lower() not in show)
for h in [h.lower() for h in show]:
if response.getheader(h, None) is not None:
print("%s: %s" % (h, response.getheader(h)))
if not headeronly:
print()
data = response.read()
# Pretty print JSON. This also has the beneficial side-effect
# of verifying emitted JSON is well-formed.
if formatjson:
# json.dumps() will print trailing newlines. Eliminate them
# to make tests easier to write.
data = json.loads(data)
lines = json.dumps(data, sort_keys=True, indent=2).splitlines()
for line in lines:
print(line.rstrip())
else:
sys.stdout.write(data)
if twice and response.getheader("ETag", None):
tag = response.getheader("ETag")
return response.status
status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
if twice:
status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
if 200 <= status <= 305:
sys.exit(0)
sys.exit(1) | twice = True
headeronly = False | random_line_split |
get-with-headers.py | #!/usr/bin/env python
"""This does HTTP GET requests given a host:port and path and returns
a subset of the headers plus the body of the result."""
from __future__ import absolute_import, print_function
import json
import os
import sys
from edenscm.mercurial import util
httplib = util.httplib
try:
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
except ImportError:
pass
twice = False
if "--twice" in sys.argv:
sys.argv.remove("--twice")
twice = True
headeronly = False
if "--headeronly" in sys.argv:
sys.argv.remove("--headeronly")
headeronly = True
formatjson = False
if "--json" in sys.argv:
sys.argv.remove("--json")
formatjson = True
hgproto = None
if "--hgproto" in sys.argv:
idx = sys.argv.index("--hgproto")
hgproto = sys.argv[idx + 1]
sys.argv.pop(idx)
sys.argv.pop(idx)
tag = None
def request(host, path, show):
assert not path.startswith("/"), path
global tag
headers = {}
if tag:
headers["If-None-Match"] = tag
if hgproto:
headers["X-HgProto-1"] = hgproto
conn = httplib.HTTPConnection(host)
conn.request("GET", "/" + path, None, headers)
response = conn.getresponse()
print(response.status, response.reason)
if show[:1] == ["-"]:
show = sorted(h for h, v in response.getheaders() if h.lower() not in show)
for h in [h.lower() for h in show]:
if response.getheader(h, None) is not None:
print("%s: %s" % (h, response.getheader(h)))
if not headeronly:
print()
data = response.read()
# Pretty print JSON. This also has the beneficial side-effect
# of verifying emitted JSON is well-formed.
if formatjson:
# json.dumps() will print trailing newlines. Eliminate them
# to make tests easier to write.
|
else:
sys.stdout.write(data)
if twice and response.getheader("ETag", None):
tag = response.getheader("ETag")
return response.status
status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
if twice:
status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
if 200 <= status <= 305:
sys.exit(0)
sys.exit(1)
| data = json.loads(data)
lines = json.dumps(data, sort_keys=True, indent=2).splitlines()
for line in lines:
print(line.rstrip()) | conditional_block |
task_7_6.py | #Задача 7. Вариант 6
#компьютер загадывает название одного из семи городов России, имеющих действующий метрополитен, а игрок должен его угадать.
#Борщёва В.О
#28.03.2016
import random
subways=('Москва','Санкт-Петербург','Нижний Новгород','Новосибирск','Самара','Екатеринбург','Казань')
subway=random.randint(0,6)
rand=subways[subway]
ball=100
print('я загадал один город,имеющий дейстующий метрополитен')
#print(rand)
otvet=0
while (otvet)!=(rand):
otvet=input("Введите один из городов:")
if(otvet)!=(rand):
print("Вы не угадали. Попробуйте снова.")
ball/=2
elif (otvet)==(rand):
print("Ваш счет:"+ str(ball))
break
input(" Нажмите Enter для выхода")
| conditional_block | ||
task_7_6.py | #Задача 7. Вариант 6
#компьютер загадывает название одного из семи городов России, имеющих действующий метрополитен, а игрок должен его угадать.
#Борщёва В.О
#28.03.2016
import random
subways=('Москва','Санкт-Петербург','Нижний Новгород','Новосибирск','Самара','Екатеринбург','Казань')
subway=random.randint(0,6)
rand=subways[subway]
ball=100
print('я загадал один город,имеющий дейстующий метрополитен')
#print(rand)
otvet=0
while (otvet)!=(rand):
otvet=input("Введите один из городов:")
if(otvet)!=(rand):
print("Вы не угадали. Попробуйте снова.")
| elif (otvet)==(rand):
print("Ваш счет:"+ str(ball))
break
input(" Нажмите Enter для выхода") | ball/=2
| random_line_split |
config.rs | extern crate serde_json;
use log::LogLevelFilter;
use logger::MetricsLoggerFactory;
use logger::MetricsLogger;
use self::serde_json::Value;
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::collections::BTreeMap;
// This is the config file that reads all the json from metricsconfig.json. We can initially use
// an environment variable to locate this file or can be passed in.
// The worker thread and the app thread will both read from this file.
#[allow(non_upper_case_globals)]
const logger: fn() -> &'static MetricsLogger = MetricsLoggerFactory::get_logger;
pub struct Config {
parsed_json: Option<BTreeMap<String, Value>>,
}
impl Config {
pub fn new() -> Config {
Config { parsed_json: None }
}
pub fn create_and_write_json(&mut self, file_name: &str, json: &str) {
logger().log(LogLevelFilter::Debug,
format!("file: {}", file_name).as_str());
let f = File::create(file_name);
match f {
Ok(mut t) => {
let _ = t.write(json.as_bytes());
}
Err(e) => panic!("cannot open file: {}", e),
};
}
pub fn init(&mut self, file_name: &str) -> bool {
// TODO: Need to make this look at env variable or take a path to the file.
logger().log(LogLevelFilter::Debug,
format!("config file: {}", file_name).as_str());
let path = Path::new(file_name);
let display = path.display();
// Open the path in read-only mode.
let mut file = match File::open(&path) {
Err(why) => {
logger().log(LogLevelFilter::Error,
format!("couldn't open {}: {}", display, Error::description(&why))
.as_str());
return false;
}
Ok(file) => file,
};
// Read the file contents into a string, returns `io::Result<usize>`
let mut s = String::new();
match file.read_to_string(&mut s) {
Err(why) => {
logger().log(LogLevelFilter::Error, format!("Error: {}", why).as_str());
return false;
}
Ok(_) => {
logger().log(LogLevelFilter::Debug,
format!("file contains: {}", s).as_str())
}
}
self.parse_json(s);
true
}
fn parse_json(&mut self, json_string: String) {
// It's ok to unwrap here because if something is wrong here, we want to
// know and expose the bug.
let data: Value = serde_json::from_str(&json_string).unwrap();
self.parsed_json = Some(data.as_object().unwrap().clone());
}
pub fn get(&mut self, key: &str) -> Option<Value> {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
if val == None {
None
} else {
Some(val.unwrap().clone())
}
} else {
panic!("Data not parsed");
}
}
pub fn get_string(&mut self, key: &str) -> String {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::String(nv) => nv.clone(),
_ => panic!("Expected a String Value"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
pub fn get_u64(&mut self, key: &str) -> u64 {
println!("Getting u64 value for {}", key);
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::U64(nv) => nv.clone(),
_ => panic!("Expected a u64"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! config_file_found {
it "should open the config file when it exists" {
use std::fs;
let mut cfg = Config::new();
// Create sample config file
let file = "test.json";
cfg.create_and_write_json(file, "{\"cid\": \"123456\"}");
let found = cfg.init(file);
// No longer need the sample config file, delete it
match fs::remove_file(file) {
Ok(_) => println!("deleted file {}", file),
Err(e) => println!("Error deleting {}: {}", file, e)
}
assert_eq!(found, true);
}
it "should return false if config file not found" {
let mut cfg = Config::new();
let found = cfg.init("nosuchfile.json");
assert_eq!(found, false);
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! parsing_file {
before_each {
// If the import is removed, it will not compile, but it gives a warning
// unless you have the following line. Most likely a compiler bug.
#[allow(unused_imports)]
use config::serde_json::Value;
let s = r#"{ "sendInterval": 10,
"saveInterval": 2,
"startTime": 0,
"savePath": "testSavePath",
"logPath": "/Volumes/development/metrics_controller/log" }"#.to_string();
let mut cfg = Config::new();
cfg.parse_json(s);
}
it "get_u64 should return a u64 for an existing key" {
let start_time = cfg.get_u64("startTime");
assert_eq!(start_time, 0);
} |
failing "get_u64 should fail for a missing key" {
cfg.get_u64("start22Time");
}
it "get_string should return a string for an existing key" {
let save_path: String = cfg.get_string("savePath").to_string();
assert_eq!(save_path, "testSavePath");
}
failing "get_string should fail for a missing key" {
cfg.get_string("save22Path").to_string();
}
it "get should return a value for an existing key" {
match cfg.get("sendInterval") {
Some(v) => assert_eq!(v, Value::U64(10)),
None => {
assert!(false);
},
}
}
it "get should return None for a missing key" {
let val: Option<Value> = cfg.get("send22Interval");
match val {
Some(_) => assert!(false),
None => {
assert!(true);
},
}
}
} | random_line_split | |
config.rs | extern crate serde_json;
use log::LogLevelFilter;
use logger::MetricsLoggerFactory;
use logger::MetricsLogger;
use self::serde_json::Value;
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::collections::BTreeMap;
// This is the config file that reads all the json from metricsconfig.json. We can initially use
// an environment variable to locate this file or can be passed in.
// The worker thread and the app thread will both read from this file.
#[allow(non_upper_case_globals)]
const logger: fn() -> &'static MetricsLogger = MetricsLoggerFactory::get_logger;
pub struct Config {
parsed_json: Option<BTreeMap<String, Value>>,
}
impl Config {
pub fn new() -> Config {
Config { parsed_json: None }
}
pub fn create_and_write_json(&mut self, file_name: &str, json: &str) {
logger().log(LogLevelFilter::Debug,
format!("file: {}", file_name).as_str());
let f = File::create(file_name);
match f {
Ok(mut t) => {
let _ = t.write(json.as_bytes());
}
Err(e) => panic!("cannot open file: {}", e),
};
}
pub fn init(&mut self, file_name: &str) -> bool |
fn parse_json(&mut self, json_string: String) {
// It's ok to unwrap here because if something is wrong here, we want to
// know and expose the bug.
let data: Value = serde_json::from_str(&json_string).unwrap();
self.parsed_json = Some(data.as_object().unwrap().clone());
}
pub fn get(&mut self, key: &str) -> Option<Value> {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
if val == None {
None
} else {
Some(val.unwrap().clone())
}
} else {
panic!("Data not parsed");
}
}
pub fn get_string(&mut self, key: &str) -> String {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::String(nv) => nv.clone(),
_ => panic!("Expected a String Value"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
pub fn get_u64(&mut self, key: &str) -> u64 {
println!("Getting u64 value for {}", key);
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::U64(nv) => nv.clone(),
_ => panic!("Expected a u64"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! config_file_found {
it "should open the config file when it exists" {
use std::fs;
let mut cfg = Config::new();
// Create sample config file
let file = "test.json";
cfg.create_and_write_json(file, "{\"cid\": \"123456\"}");
let found = cfg.init(file);
// No longer need the sample config file, delete it
match fs::remove_file(file) {
Ok(_) => println!("deleted file {}", file),
Err(e) => println!("Error deleting {}: {}", file, e)
}
assert_eq!(found, true);
}
it "should return false if config file not found" {
let mut cfg = Config::new();
let found = cfg.init("nosuchfile.json");
assert_eq!(found, false);
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! parsing_file {
before_each {
// If the import is removed, it will not compile, but it gives a warning
// unless you have the following line. Most likely a compiler bug.
#[allow(unused_imports)]
use config::serde_json::Value;
let s = r#"{ "sendInterval": 10,
"saveInterval": 2,
"startTime": 0,
"savePath": "testSavePath",
"logPath": "/Volumes/development/metrics_controller/log" }"#.to_string();
let mut cfg = Config::new();
cfg.parse_json(s);
}
it "get_u64 should return a u64 for an existing key" {
let start_time = cfg.get_u64("startTime");
assert_eq!(start_time, 0);
}
failing "get_u64 should fail for a missing key" {
cfg.get_u64("start22Time");
}
it "get_string should return a string for an existing key" {
let save_path: String = cfg.get_string("savePath").to_string();
assert_eq!(save_path, "testSavePath");
}
failing "get_string should fail for a missing key" {
cfg.get_string("save22Path").to_string();
}
it "get should return a value for an existing key" {
match cfg.get("sendInterval") {
Some(v) => assert_eq!(v, Value::U64(10)),
None => {
assert!(false);
},
}
}
it "get should return None for a missing key" {
let val: Option<Value> = cfg.get("send22Interval");
match val {
Some(_) => assert!(false),
None => {
assert!(true);
},
}
}
}
| {
// TODO: Need to make this look at env variable or take a path to the file.
logger().log(LogLevelFilter::Debug,
format!("config file: {}", file_name).as_str());
let path = Path::new(file_name);
let display = path.display();
// Open the path in read-only mode.
let mut file = match File::open(&path) {
Err(why) => {
logger().log(LogLevelFilter::Error,
format!("couldn't open {}: {}", display, Error::description(&why))
.as_str());
return false;
}
Ok(file) => file,
};
// Read the file contents into a string, returns `io::Result<usize>`
let mut s = String::new();
match file.read_to_string(&mut s) {
Err(why) => {
logger().log(LogLevelFilter::Error, format!("Error: {}", why).as_str());
return false;
}
Ok(_) => {
logger().log(LogLevelFilter::Debug,
format!("file contains: {}", s).as_str())
}
}
self.parse_json(s);
true
} | identifier_body |
config.rs | extern crate serde_json;
use log::LogLevelFilter;
use logger::MetricsLoggerFactory;
use logger::MetricsLogger;
use self::serde_json::Value;
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::collections::BTreeMap;
// This is the config file that reads all the json from metricsconfig.json. We can initially use
// an environment variable to locate this file or can be passed in.
// The worker thread and the app thread will both read from this file.
#[allow(non_upper_case_globals)]
const logger: fn() -> &'static MetricsLogger = MetricsLoggerFactory::get_logger;
pub struct Config {
parsed_json: Option<BTreeMap<String, Value>>,
}
impl Config {
pub fn new() -> Config {
Config { parsed_json: None }
}
pub fn create_and_write_json(&mut self, file_name: &str, json: &str) {
logger().log(LogLevelFilter::Debug,
format!("file: {}", file_name).as_str());
let f = File::create(file_name);
match f {
Ok(mut t) => {
let _ = t.write(json.as_bytes());
}
Err(e) => panic!("cannot open file: {}", e),
};
}
pub fn init(&mut self, file_name: &str) -> bool {
// TODO: Need to make this look at env variable or take a path to the file.
logger().log(LogLevelFilter::Debug,
format!("config file: {}", file_name).as_str());
let path = Path::new(file_name);
let display = path.display();
// Open the path in read-only mode.
let mut file = match File::open(&path) {
Err(why) => {
logger().log(LogLevelFilter::Error,
format!("couldn't open {}: {}", display, Error::description(&why))
.as_str());
return false;
}
Ok(file) => file,
};
// Read the file contents into a string, returns `io::Result<usize>`
let mut s = String::new();
match file.read_to_string(&mut s) {
Err(why) => {
logger().log(LogLevelFilter::Error, format!("Error: {}", why).as_str());
return false;
}
Ok(_) => {
logger().log(LogLevelFilter::Debug,
format!("file contains: {}", s).as_str())
}
}
self.parse_json(s);
true
}
fn parse_json(&mut self, json_string: String) {
// It's ok to unwrap here because if something is wrong here, we want to
// know and expose the bug.
let data: Value = serde_json::from_str(&json_string).unwrap();
self.parsed_json = Some(data.as_object().unwrap().clone());
}
pub fn get(&mut self, key: &str) -> Option<Value> {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
if val == None {
None
} else {
Some(val.unwrap().clone())
}
} else {
panic!("Data not parsed");
}
}
pub fn | (&mut self, key: &str) -> String {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::String(nv) => nv.clone(),
_ => panic!("Expected a String Value"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
pub fn get_u64(&mut self, key: &str) -> u64 {
println!("Getting u64 value for {}", key);
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::U64(nv) => nv.clone(),
_ => panic!("Expected a u64"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! config_file_found {
it "should open the config file when it exists" {
use std::fs;
let mut cfg = Config::new();
// Create sample config file
let file = "test.json";
cfg.create_and_write_json(file, "{\"cid\": \"123456\"}");
let found = cfg.init(file);
// No longer need the sample config file, delete it
match fs::remove_file(file) {
Ok(_) => println!("deleted file {}", file),
Err(e) => println!("Error deleting {}: {}", file, e)
}
assert_eq!(found, true);
}
it "should return false if config file not found" {
let mut cfg = Config::new();
let found = cfg.init("nosuchfile.json");
assert_eq!(found, false);
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! parsing_file {
before_each {
// If the import is removed, it will not compile, but it gives a warning
// unless you have the following line. Most likely a compiler bug.
#[allow(unused_imports)]
use config::serde_json::Value;
let s = r#"{ "sendInterval": 10,
"saveInterval": 2,
"startTime": 0,
"savePath": "testSavePath",
"logPath": "/Volumes/development/metrics_controller/log" }"#.to_string();
let mut cfg = Config::new();
cfg.parse_json(s);
}
it "get_u64 should return a u64 for an existing key" {
let start_time = cfg.get_u64("startTime");
assert_eq!(start_time, 0);
}
failing "get_u64 should fail for a missing key" {
cfg.get_u64("start22Time");
}
it "get_string should return a string for an existing key" {
let save_path: String = cfg.get_string("savePath").to_string();
assert_eq!(save_path, "testSavePath");
}
failing "get_string should fail for a missing key" {
cfg.get_string("save22Path").to_string();
}
it "get should return a value for an existing key" {
match cfg.get("sendInterval") {
Some(v) => assert_eq!(v, Value::U64(10)),
None => {
assert!(false);
},
}
}
it "get should return None for a missing key" {
let val: Option<Value> = cfg.get("send22Interval");
match val {
Some(_) => assert!(false),
None => {
assert!(true);
},
}
}
}
| get_string | identifier_name |
config.rs | extern crate serde_json;
use log::LogLevelFilter;
use logger::MetricsLoggerFactory;
use logger::MetricsLogger;
use self::serde_json::Value;
use std::error::Error;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::collections::BTreeMap;
// This is the config file that reads all the json from metricsconfig.json. We can initially use
// an environment variable to locate this file or can be passed in.
// The worker thread and the app thread will both read from this file.
#[allow(non_upper_case_globals)]
const logger: fn() -> &'static MetricsLogger = MetricsLoggerFactory::get_logger;
pub struct Config {
parsed_json: Option<BTreeMap<String, Value>>,
}
impl Config {
pub fn new() -> Config {
Config { parsed_json: None }
}
pub fn create_and_write_json(&mut self, file_name: &str, json: &str) {
logger().log(LogLevelFilter::Debug,
format!("file: {}", file_name).as_str());
let f = File::create(file_name);
match f {
Ok(mut t) => |
Err(e) => panic!("cannot open file: {}", e),
};
}
pub fn init(&mut self, file_name: &str) -> bool {
// TODO: Need to make this look at env variable or take a path to the file.
logger().log(LogLevelFilter::Debug,
format!("config file: {}", file_name).as_str());
let path = Path::new(file_name);
let display = path.display();
// Open the path in read-only mode.
let mut file = match File::open(&path) {
Err(why) => {
logger().log(LogLevelFilter::Error,
format!("couldn't open {}: {}", display, Error::description(&why))
.as_str());
return false;
}
Ok(file) => file,
};
// Read the file contents into a string, returns `io::Result<usize>`
let mut s = String::new();
match file.read_to_string(&mut s) {
Err(why) => {
logger().log(LogLevelFilter::Error, format!("Error: {}", why).as_str());
return false;
}
Ok(_) => {
logger().log(LogLevelFilter::Debug,
format!("file contains: {}", s).as_str())
}
}
self.parse_json(s);
true
}
fn parse_json(&mut self, json_string: String) {
// It's ok to unwrap here because if something is wrong here, we want to
// know and expose the bug.
let data: Value = serde_json::from_str(&json_string).unwrap();
self.parsed_json = Some(data.as_object().unwrap().clone());
}
pub fn get(&mut self, key: &str) -> Option<Value> {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
if val == None {
None
} else {
Some(val.unwrap().clone())
}
} else {
panic!("Data not parsed");
}
}
pub fn get_string(&mut self, key: &str) -> String {
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::String(nv) => nv.clone(),
_ => panic!("Expected a String Value"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
pub fn get_u64(&mut self, key: &str) -> u64 {
println!("Getting u64 value for {}", key);
if let Some(ref mut parsed_json) = self.parsed_json {
let val = parsed_json.get(key);
match val {
Some(v) => {
let nv = v.clone();
match nv {
Value::U64(nv) => nv.clone(),
_ => panic!("Expected a u64"),
}
},
None => panic!("Value not found"),
}
} else {
panic!("Data not parsed");
}
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! config_file_found {
it "should open the config file when it exists" {
use std::fs;
let mut cfg = Config::new();
// Create sample config file
let file = "test.json";
cfg.create_and_write_json(file, "{\"cid\": \"123456\"}");
let found = cfg.init(file);
// No longer need the sample config file, delete it
match fs::remove_file(file) {
Ok(_) => println!("deleted file {}", file),
Err(e) => println!("Error deleting {}: {}", file, e)
}
assert_eq!(found, true);
}
it "should return false if config file not found" {
let mut cfg = Config::new();
let found = cfg.init("nosuchfile.json");
assert_eq!(found, false);
}
}
#[cfg(not(feature = "integration"))]
#[cfg(test)]
describe! parsing_file {
before_each {
// If the import is removed, it will not compile, but it gives a warning
// unless you have the following line. Most likely a compiler bug.
#[allow(unused_imports)]
use config::serde_json::Value;
let s = r#"{ "sendInterval": 10,
"saveInterval": 2,
"startTime": 0,
"savePath": "testSavePath",
"logPath": "/Volumes/development/metrics_controller/log" }"#.to_string();
let mut cfg = Config::new();
cfg.parse_json(s);
}
it "get_u64 should return a u64 for an existing key" {
let start_time = cfg.get_u64("startTime");
assert_eq!(start_time, 0);
}
failing "get_u64 should fail for a missing key" {
cfg.get_u64("start22Time");
}
it "get_string should return a string for an existing key" {
let save_path: String = cfg.get_string("savePath").to_string();
assert_eq!(save_path, "testSavePath");
}
failing "get_string should fail for a missing key" {
cfg.get_string("save22Path").to_string();
}
it "get should return a value for an existing key" {
match cfg.get("sendInterval") {
Some(v) => assert_eq!(v, Value::U64(10)),
None => {
assert!(false);
},
}
}
it "get should return None for a missing key" {
let val: Option<Value> = cfg.get("send22Interval");
match val {
Some(_) => assert!(false),
None => {
assert!(true);
},
}
}
}
| {
let _ = t.write(json.as_bytes());
} | conditional_block |
__init__.py | # -*- coding: utf-8 -*-
#
##############################################################################
#
# Authors: Adrien Peiffer
# Copyright (c) 2014 Acsone SA/NV (http://www.acsone.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
| from . import account_companyweb_report_wizard
from . import partner_update_companyweb | random_line_split | |
unix.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Named pipes
This module contains the ability to communicate over named pipes with
synchronous I/O. On windows, this corresponds to talking over a Named Pipe,
while on Unix it corresponds to UNIX domain sockets.
These pipes are similar to TCP in the sense that you can have both a stream to a
server and a server itself. The server provided accepts other `UnixStream`
instances as clients.
*/
#![allow(missing_doc)]
use prelude::*;
use c_str::ToCStr;
use clone::Clone;
use io::pipe::PipeStream;
use io::{Listener, Acceptor, Reader, Writer, IoResult};
use kinds::Send;
use rt::rtio::{IoFactory, LocalIo, RtioUnixListener};
use rt::rtio::{RtioUnixAcceptor, RtioPipe};
/// A stream which communicates over a named pipe.
pub struct UnixStream {
obj: PipeStream,
}
impl UnixStream {
fn new(obj: ~RtioPipe:Send) -> UnixStream {
UnixStream { obj: PipeStream::new(obj) }
}
/// Connect to a pipe named by `path`. This will attempt to open a
/// connection to the underlying socket.
///
/// The returned stream will be closed when the object falls out of scope.
///
/// # Example
///
/// ```rust
/// # #![allow(unused_must_use)]
/// use std::io::net::unix::UnixStream;
///
/// let server = Path::new("path/to/my/socket");
/// let mut stream = UnixStream::connect(&server);
/// stream.write([1, 2, 3]);
/// ```
pub fn connect<P: ToCStr>(path: &P) -> IoResult<UnixStream> {
LocalIo::maybe_raise(|io| {
io.unix_connect(&path.to_c_str()).map(UnixStream::new)
})
}
}
impl Clone for UnixStream {
fn clone(&self) -> UnixStream {
UnixStream { obj: self.obj.clone() }
}
}
impl Reader for UnixStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { self.obj.read(buf) }
}
impl Writer for UnixStream {
fn write(&mut self, buf: &[u8]) -> IoResult<()> { self.obj.write(buf) }
}
/// A value that can listen for incoming named pipe connection requests.
pub struct UnixListener {
/// The internal, opaque runtime Unix listener.
obj: ~RtioUnixListener:Send,
}
impl UnixListener {
/// Creates a new listener, ready to receive incoming connections on the
/// specified socket. The server will be named by `path`.
///
/// This listener will be closed when it falls out of scope.
///
/// # Example
///
/// ```
/// # fn main() {}
/// # fn foo() {
/// # #![allow(unused_must_use)]
/// use std::io::net::unix::UnixListener;
/// use std::io::{Listener, Acceptor};
///
/// let server = Path::new("/path/to/my/socket");
/// let stream = UnixListener::bind(&server);
/// for mut client in stream.listen().incoming() {
/// client.write([1, 2, 3, 4]);
/// }
/// # }
/// ```
pub fn bind<P: ToCStr>(path: &P) -> IoResult<UnixListener> {
LocalIo::maybe_raise(|io| {
io.unix_bind(&path.to_c_str()).map(|s| UnixListener { obj: s })
})
}
}
impl Listener<UnixStream, UnixAcceptor> for UnixListener {
fn listen(self) -> IoResult<UnixAcceptor> {
self.obj.listen().map(|obj| UnixAcceptor { obj: obj })
}
}
/// A value that can accept named pipe connections, returned from `listen()`.
pub struct UnixAcceptor {
/// The internal, opaque runtime Unix acceptor.
obj: ~RtioUnixAcceptor:Send,
}
impl Acceptor<UnixStream> for UnixAcceptor {
fn accept(&mut self) -> IoResult<UnixStream> {
self.obj.accept().map(UnixStream::new)
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use super::*;
use io::*;
use io::test::*;
pub fn smalltest(server: proc(UnixStream):Send, client: proc(UnixStream):Send) {
let path1 = next_test_unix();
let path2 = path1.clone();
let mut acceptor = UnixListener::bind(&path1).listen();
spawn(proc() {
match UnixStream::connect(&path2) {
Ok(c) => client(c),
Err(e) => fail!("failed connect: {}", e),
}
});
match acceptor.accept() {
Ok(c) => server(c),
Err(e) => fail!("failed accept: {}", e),
}
}
iotest!(fn bind_error() {
let path = "path/to/nowhere";
match UnixListener::bind(&path) {
Ok(..) => fail!(),
Err(e) => {
assert!(e.kind == PermissionDenied || e.kind == FileNotFound ||
e.kind == InvalidInput);
}
}
})
iotest!(fn connect_error() {
let path = if cfg!(windows) {
r"\\.\pipe\this_should_not_exist_ever"
} else {
"path/to/nowhere"
};
match UnixStream::connect(&path) {
Ok(..) => fail!(),
Err(e) => {
assert!(e.kind == FileNotFound || e.kind == OtherIoError);
}
}
})
iotest!(fn smoke() {
smalltest(proc(mut server) {
let mut buf = [0];
server.read(buf).unwrap();
assert!(buf[0] == 99);
}, proc(mut client) {
client.write([99]).unwrap();
})
})
iotest!(fn read_eof() {
smalltest(proc(mut server) {
let mut buf = [0];
assert!(server.read(buf).is_err());
assert!(server.read(buf).is_err());
}, proc(_client) {
// drop the client
})
} #[ignore(cfg(windows))]) // FIXME(#12516)
iotest!(fn write_begone() {
smalltest(proc(mut server) {
let buf = [0];
loop {
match server.write(buf) {
Ok(..) => {}
Err(e) => {
assert!(e.kind == BrokenPipe ||
e.kind == NotConnected ||
e.kind == ConnectionReset,
"unknown error {:?}", e);
break;
}
}
}
}, proc(_client) {
// drop the client
})
})
iotest!(fn accept_lots() {
let times = 10;
let path1 = next_test_unix();
let path2 = path1.clone();
let mut acceptor = match UnixListener::bind(&path1).listen() {
Ok(a) => a,
Err(e) => fail!("failed listen: {}", e),
};
spawn(proc() {
for _ in range(0, times) {
let mut stream = UnixStream::connect(&path2);
match stream.write([100]) {
Ok(..) => {}
Err(e) => fail!("failed write: {}", e)
}
}
});
for _ in range(0, times) {
let mut client = acceptor.accept();
let mut buf = [0];
match client.read(buf) {
Ok(..) => {}
Err(e) => fail!("failed read/accept: {}", e),
}
assert_eq!(buf[0], 100);
}
})
#[cfg(unix)]
iotest!(fn path_exists() {
let path = next_test_unix();
let _acceptor = UnixListener::bind(&path).listen();
assert!(path.exists());
})
iotest!(fn unix_clone_smoke() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
let mut buf = [0, 0];
debug!("client reading");
assert_eq!(s.read(buf), Ok(1));
assert_eq!(buf[0], 1);
debug!("client writing");
s.write([2]).unwrap();
debug!("client dropping");
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
spawn(proc() {
let mut s2 = s2;
rx1.recv();
debug!("writer writing");
s2.write([1]).unwrap();
debug!("writer done");
tx2.send(());
});
tx1.send(());
let mut buf = [0, 0];
debug!("reader reading");
assert_eq!(s1.read(buf), Ok(1));
debug!("reader done");
rx2.recv();
})
iotest!(fn unix_clone_two_read() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
let (tx1, rx) = channel();
let tx2 = tx1.clone();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
s.write([1]).unwrap();
rx.recv();
s.write([2]).unwrap();
rx.recv();
});
| let s2 = s1.clone();
let (done, rx) = channel();
spawn(proc() {
let mut s2 = s2;
let mut buf = [0, 0];
s2.read(buf).unwrap();
tx2.send(());
done.send(());
});
let mut buf = [0, 0];
s1.read(buf).unwrap();
tx1.send(());
rx.recv();
})
iotest!(fn unix_clone_two_write() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
let mut buf = [0, 1];
s.read(buf).unwrap();
s.read(buf).unwrap();
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (tx, rx) = channel();
spawn(proc() {
let mut s2 = s2;
s2.write([1]).unwrap();
tx.send(());
});
s1.write([2]).unwrap();
rx.recv();
})
} | let mut s1 = acceptor.accept().unwrap(); | random_line_split |
unix.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Named pipes
This module contains the ability to communicate over named pipes with
synchronous I/O. On windows, this corresponds to talking over a Named Pipe,
while on Unix it corresponds to UNIX domain sockets.
These pipes are similar to TCP in the sense that you can have both a stream to a
server and a server itself. The server provided accepts other `UnixStream`
instances as clients.
*/
#![allow(missing_doc)]
use prelude::*;
use c_str::ToCStr;
use clone::Clone;
use io::pipe::PipeStream;
use io::{Listener, Acceptor, Reader, Writer, IoResult};
use kinds::Send;
use rt::rtio::{IoFactory, LocalIo, RtioUnixListener};
use rt::rtio::{RtioUnixAcceptor, RtioPipe};
/// A stream which communicates over a named pipe.
pub struct UnixStream {
obj: PipeStream,
}
impl UnixStream {
fn new(obj: ~RtioPipe:Send) -> UnixStream {
UnixStream { obj: PipeStream::new(obj) }
}
/// Connect to a pipe named by `path`. This will attempt to open a
/// connection to the underlying socket.
///
/// The returned stream will be closed when the object falls out of scope.
///
/// # Example
///
/// ```rust
/// # #![allow(unused_must_use)]
/// use std::io::net::unix::UnixStream;
///
/// let server = Path::new("path/to/my/socket");
/// let mut stream = UnixStream::connect(&server);
/// stream.write([1, 2, 3]);
/// ```
pub fn connect<P: ToCStr>(path: &P) -> IoResult<UnixStream> {
LocalIo::maybe_raise(|io| {
io.unix_connect(&path.to_c_str()).map(UnixStream::new)
})
}
}
impl Clone for UnixStream {
fn clone(&self) -> UnixStream |
}
impl Reader for UnixStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { self.obj.read(buf) }
}
impl Writer for UnixStream {
fn write(&mut self, buf: &[u8]) -> IoResult<()> { self.obj.write(buf) }
}
/// A value that can listen for incoming named pipe connection requests.
pub struct UnixListener {
/// The internal, opaque runtime Unix listener.
obj: ~RtioUnixListener:Send,
}
impl UnixListener {
/// Creates a new listener, ready to receive incoming connections on the
/// specified socket. The server will be named by `path`.
///
/// This listener will be closed when it falls out of scope.
///
/// # Example
///
/// ```
/// # fn main() {}
/// # fn foo() {
/// # #![allow(unused_must_use)]
/// use std::io::net::unix::UnixListener;
/// use std::io::{Listener, Acceptor};
///
/// let server = Path::new("/path/to/my/socket");
/// let stream = UnixListener::bind(&server);
/// for mut client in stream.listen().incoming() {
/// client.write([1, 2, 3, 4]);
/// }
/// # }
/// ```
pub fn bind<P: ToCStr>(path: &P) -> IoResult<UnixListener> {
LocalIo::maybe_raise(|io| {
io.unix_bind(&path.to_c_str()).map(|s| UnixListener { obj: s })
})
}
}
impl Listener<UnixStream, UnixAcceptor> for UnixListener {
fn listen(self) -> IoResult<UnixAcceptor> {
self.obj.listen().map(|obj| UnixAcceptor { obj: obj })
}
}
/// A value that can accept named pipe connections, returned from `listen()`.
pub struct UnixAcceptor {
/// The internal, opaque runtime Unix acceptor.
obj: ~RtioUnixAcceptor:Send,
}
impl Acceptor<UnixStream> for UnixAcceptor {
fn accept(&mut self) -> IoResult<UnixStream> {
self.obj.accept().map(UnixStream::new)
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use super::*;
use io::*;
use io::test::*;
pub fn smalltest(server: proc(UnixStream):Send, client: proc(UnixStream):Send) {
let path1 = next_test_unix();
let path2 = path1.clone();
let mut acceptor = UnixListener::bind(&path1).listen();
spawn(proc() {
match UnixStream::connect(&path2) {
Ok(c) => client(c),
Err(e) => fail!("failed connect: {}", e),
}
});
match acceptor.accept() {
Ok(c) => server(c),
Err(e) => fail!("failed accept: {}", e),
}
}
iotest!(fn bind_error() {
let path = "path/to/nowhere";
match UnixListener::bind(&path) {
Ok(..) => fail!(),
Err(e) => {
assert!(e.kind == PermissionDenied || e.kind == FileNotFound ||
e.kind == InvalidInput);
}
}
})
iotest!(fn connect_error() {
let path = if cfg!(windows) {
r"\\.\pipe\this_should_not_exist_ever"
} else {
"path/to/nowhere"
};
match UnixStream::connect(&path) {
Ok(..) => fail!(),
Err(e) => {
assert!(e.kind == FileNotFound || e.kind == OtherIoError);
}
}
})
iotest!(fn smoke() {
smalltest(proc(mut server) {
let mut buf = [0];
server.read(buf).unwrap();
assert!(buf[0] == 99);
}, proc(mut client) {
client.write([99]).unwrap();
})
})
iotest!(fn read_eof() {
smalltest(proc(mut server) {
let mut buf = [0];
assert!(server.read(buf).is_err());
assert!(server.read(buf).is_err());
}, proc(_client) {
// drop the client
})
} #[ignore(cfg(windows))]) // FIXME(#12516)
iotest!(fn write_begone() {
smalltest(proc(mut server) {
let buf = [0];
loop {
match server.write(buf) {
Ok(..) => {}
Err(e) => {
assert!(e.kind == BrokenPipe ||
e.kind == NotConnected ||
e.kind == ConnectionReset,
"unknown error {:?}", e);
break;
}
}
}
}, proc(_client) {
// drop the client
})
})
iotest!(fn accept_lots() {
let times = 10;
let path1 = next_test_unix();
let path2 = path1.clone();
let mut acceptor = match UnixListener::bind(&path1).listen() {
Ok(a) => a,
Err(e) => fail!("failed listen: {}", e),
};
spawn(proc() {
for _ in range(0, times) {
let mut stream = UnixStream::connect(&path2);
match stream.write([100]) {
Ok(..) => {}
Err(e) => fail!("failed write: {}", e)
}
}
});
for _ in range(0, times) {
let mut client = acceptor.accept();
let mut buf = [0];
match client.read(buf) {
Ok(..) => {}
Err(e) => fail!("failed read/accept: {}", e),
}
assert_eq!(buf[0], 100);
}
})
#[cfg(unix)]
iotest!(fn path_exists() {
let path = next_test_unix();
let _acceptor = UnixListener::bind(&path).listen();
assert!(path.exists());
})
iotest!(fn unix_clone_smoke() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
let mut buf = [0, 0];
debug!("client reading");
assert_eq!(s.read(buf), Ok(1));
assert_eq!(buf[0], 1);
debug!("client writing");
s.write([2]).unwrap();
debug!("client dropping");
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
spawn(proc() {
let mut s2 = s2;
rx1.recv();
debug!("writer writing");
s2.write([1]).unwrap();
debug!("writer done");
tx2.send(());
});
tx1.send(());
let mut buf = [0, 0];
debug!("reader reading");
assert_eq!(s1.read(buf), Ok(1));
debug!("reader done");
rx2.recv();
})
iotest!(fn unix_clone_two_read() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
let (tx1, rx) = channel();
let tx2 = tx1.clone();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
s.write([1]).unwrap();
rx.recv();
s.write([2]).unwrap();
rx.recv();
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (done, rx) = channel();
spawn(proc() {
let mut s2 = s2;
let mut buf = [0, 0];
s2.read(buf).unwrap();
tx2.send(());
done.send(());
});
let mut buf = [0, 0];
s1.read(buf).unwrap();
tx1.send(());
rx.recv();
})
iotest!(fn unix_clone_two_write() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
let mut buf = [0, 1];
s.read(buf).unwrap();
s.read(buf).unwrap();
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (tx, rx) = channel();
spawn(proc() {
let mut s2 = s2;
s2.write([1]).unwrap();
tx.send(());
});
s1.write([2]).unwrap();
rx.recv();
})
}
| {
UnixStream { obj: self.obj.clone() }
} | identifier_body |
unix.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Named pipes
This module contains the ability to communicate over named pipes with
synchronous I/O. On windows, this corresponds to talking over a Named Pipe,
while on Unix it corresponds to UNIX domain sockets.
These pipes are similar to TCP in the sense that you can have both a stream to a
server and a server itself. The server provided accepts other `UnixStream`
instances as clients.
*/
#![allow(missing_doc)]
use prelude::*;
use c_str::ToCStr;
use clone::Clone;
use io::pipe::PipeStream;
use io::{Listener, Acceptor, Reader, Writer, IoResult};
use kinds::Send;
use rt::rtio::{IoFactory, LocalIo, RtioUnixListener};
use rt::rtio::{RtioUnixAcceptor, RtioPipe};
/// A stream which communicates over a named pipe.
pub struct UnixStream {
obj: PipeStream,
}
impl UnixStream {
fn new(obj: ~RtioPipe:Send) -> UnixStream {
UnixStream { obj: PipeStream::new(obj) }
}
/// Connect to a pipe named by `path`. This will attempt to open a
/// connection to the underlying socket.
///
/// The returned stream will be closed when the object falls out of scope.
///
/// # Example
///
/// ```rust
/// # #![allow(unused_must_use)]
/// use std::io::net::unix::UnixStream;
///
/// let server = Path::new("path/to/my/socket");
/// let mut stream = UnixStream::connect(&server);
/// stream.write([1, 2, 3]);
/// ```
pub fn connect<P: ToCStr>(path: &P) -> IoResult<UnixStream> {
LocalIo::maybe_raise(|io| {
io.unix_connect(&path.to_c_str()).map(UnixStream::new)
})
}
}
impl Clone for UnixStream {
fn clone(&self) -> UnixStream {
UnixStream { obj: self.obj.clone() }
}
}
impl Reader for UnixStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { self.obj.read(buf) }
}
impl Writer for UnixStream {
fn write(&mut self, buf: &[u8]) -> IoResult<()> { self.obj.write(buf) }
}
/// A value that can listen for incoming named pipe connection requests.
pub struct UnixListener {
/// The internal, opaque runtime Unix listener.
obj: ~RtioUnixListener:Send,
}
impl UnixListener {
/// Creates a new listener, ready to receive incoming connections on the
/// specified socket. The server will be named by `path`.
///
/// This listener will be closed when it falls out of scope.
///
/// # Example
///
/// ```
/// # fn main() {}
/// # fn foo() {
/// # #![allow(unused_must_use)]
/// use std::io::net::unix::UnixListener;
/// use std::io::{Listener, Acceptor};
///
/// let server = Path::new("/path/to/my/socket");
/// let stream = UnixListener::bind(&server);
/// for mut client in stream.listen().incoming() {
/// client.write([1, 2, 3, 4]);
/// }
/// # }
/// ```
pub fn | <P: ToCStr>(path: &P) -> IoResult<UnixListener> {
LocalIo::maybe_raise(|io| {
io.unix_bind(&path.to_c_str()).map(|s| UnixListener { obj: s })
})
}
}
impl Listener<UnixStream, UnixAcceptor> for UnixListener {
fn listen(self) -> IoResult<UnixAcceptor> {
self.obj.listen().map(|obj| UnixAcceptor { obj: obj })
}
}
/// A value that can accept named pipe connections, returned from `listen()`.
pub struct UnixAcceptor {
/// The internal, opaque runtime Unix acceptor.
obj: ~RtioUnixAcceptor:Send,
}
impl Acceptor<UnixStream> for UnixAcceptor {
fn accept(&mut self) -> IoResult<UnixStream> {
self.obj.accept().map(UnixStream::new)
}
}
#[cfg(test)]
mod tests {
use prelude::*;
use super::*;
use io::*;
use io::test::*;
pub fn smalltest(server: proc(UnixStream):Send, client: proc(UnixStream):Send) {
let path1 = next_test_unix();
let path2 = path1.clone();
let mut acceptor = UnixListener::bind(&path1).listen();
spawn(proc() {
match UnixStream::connect(&path2) {
Ok(c) => client(c),
Err(e) => fail!("failed connect: {}", e),
}
});
match acceptor.accept() {
Ok(c) => server(c),
Err(e) => fail!("failed accept: {}", e),
}
}
iotest!(fn bind_error() {
let path = "path/to/nowhere";
match UnixListener::bind(&path) {
Ok(..) => fail!(),
Err(e) => {
assert!(e.kind == PermissionDenied || e.kind == FileNotFound ||
e.kind == InvalidInput);
}
}
})
iotest!(fn connect_error() {
let path = if cfg!(windows) {
r"\\.\pipe\this_should_not_exist_ever"
} else {
"path/to/nowhere"
};
match UnixStream::connect(&path) {
Ok(..) => fail!(),
Err(e) => {
assert!(e.kind == FileNotFound || e.kind == OtherIoError);
}
}
})
iotest!(fn smoke() {
smalltest(proc(mut server) {
let mut buf = [0];
server.read(buf).unwrap();
assert!(buf[0] == 99);
}, proc(mut client) {
client.write([99]).unwrap();
})
})
iotest!(fn read_eof() {
smalltest(proc(mut server) {
let mut buf = [0];
assert!(server.read(buf).is_err());
assert!(server.read(buf).is_err());
}, proc(_client) {
// drop the client
})
} #[ignore(cfg(windows))]) // FIXME(#12516)
iotest!(fn write_begone() {
smalltest(proc(mut server) {
let buf = [0];
loop {
match server.write(buf) {
Ok(..) => {}
Err(e) => {
assert!(e.kind == BrokenPipe ||
e.kind == NotConnected ||
e.kind == ConnectionReset,
"unknown error {:?}", e);
break;
}
}
}
}, proc(_client) {
// drop the client
})
})
iotest!(fn accept_lots() {
let times = 10;
let path1 = next_test_unix();
let path2 = path1.clone();
let mut acceptor = match UnixListener::bind(&path1).listen() {
Ok(a) => a,
Err(e) => fail!("failed listen: {}", e),
};
spawn(proc() {
for _ in range(0, times) {
let mut stream = UnixStream::connect(&path2);
match stream.write([100]) {
Ok(..) => {}
Err(e) => fail!("failed write: {}", e)
}
}
});
for _ in range(0, times) {
let mut client = acceptor.accept();
let mut buf = [0];
match client.read(buf) {
Ok(..) => {}
Err(e) => fail!("failed read/accept: {}", e),
}
assert_eq!(buf[0], 100);
}
})
#[cfg(unix)]
iotest!(fn path_exists() {
let path = next_test_unix();
let _acceptor = UnixListener::bind(&path).listen();
assert!(path.exists());
})
iotest!(fn unix_clone_smoke() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
let mut buf = [0, 0];
debug!("client reading");
assert_eq!(s.read(buf), Ok(1));
assert_eq!(buf[0], 1);
debug!("client writing");
s.write([2]).unwrap();
debug!("client dropping");
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
spawn(proc() {
let mut s2 = s2;
rx1.recv();
debug!("writer writing");
s2.write([1]).unwrap();
debug!("writer done");
tx2.send(());
});
tx1.send(());
let mut buf = [0, 0];
debug!("reader reading");
assert_eq!(s1.read(buf), Ok(1));
debug!("reader done");
rx2.recv();
})
iotest!(fn unix_clone_two_read() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
let (tx1, rx) = channel();
let tx2 = tx1.clone();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
s.write([1]).unwrap();
rx.recv();
s.write([2]).unwrap();
rx.recv();
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (done, rx) = channel();
spawn(proc() {
let mut s2 = s2;
let mut buf = [0, 0];
s2.read(buf).unwrap();
tx2.send(());
done.send(());
});
let mut buf = [0, 0];
s1.read(buf).unwrap();
tx1.send(());
rx.recv();
})
iotest!(fn unix_clone_two_write() {
let addr = next_test_unix();
let mut acceptor = UnixListener::bind(&addr).listen();
spawn(proc() {
let mut s = UnixStream::connect(&addr);
let mut buf = [0, 1];
s.read(buf).unwrap();
s.read(buf).unwrap();
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (tx, rx) = channel();
spawn(proc() {
let mut s2 = s2;
s2.write([1]).unwrap();
tx.send(());
});
s1.write([2]).unwrap();
rx.recv();
})
}
| bind | identifier_name |
video.ts | /*{# Copyright (c) 2012 Turbulenz Limited #}*/
/*
* @title: Video playback
* @description:
* This sample shows how to play a video into a texture.
*/
/*{{ javascript("jslib/observer.js") }}*/
/*{{ javascript("jslib/requesthandler.js") }}*/
/*{{ javascript("jslib/utilities.js") }}*/
/*{{ javascript("jslib/services/turbulenzservices.js") }}*/
/*{{ javascript("jslib/services/turbulenzbridge.js") }}*/
/*{{ javascript("jslib/services/gamesession.js") }}*/
/*{{ javascript("jslib/services/mappingtable.js") }}*/
/*global TurbulenzEngine: true */
/*global TurbulenzServices: false */
/*global RequestHandler: false */
TurbulenzEngine.onload = function onloadFn()
{
var graphicsDevice = TurbulenzEngine.createGraphicsDevice({});
// IE detection while WebGL implementation is incomplete
if (graphicsDevice && graphicsDevice.renderer === "Internet Explorer")
{
window.alert("The video sample is not supported on Internet Explorer");
return;
}
var soundDevice = TurbulenzEngine.createSoundDevice({});
var mathDevice = TurbulenzEngine.createMathDevice({});
var requestHandler = RequestHandler.create({});
var video;
var videoPosition = -1;
var shader, technique;
var texture;
var clearColor = mathDevice.v4Build(0, 0, 0, 1);
var clipSpace = mathDevice.v4Build(1, -1, 0, 0);
var videoColor = mathDevice.v4Build(1, 1, 1, 1);
var primitive = graphicsDevice.PRIMITIVE_TRIANGLE_STRIP;
var semantics = graphicsDevice.createSemantics(['POSITION', 'TEXCOORD0']);
var vertexBuffer = graphicsDevice.createVertexBuffer({
numVertices: 4,
attributes: [graphicsDevice.VERTEXFORMAT_FLOAT2,
graphicsDevice.VERTEXFORMAT_FLOAT2],
dynamic: false,
data: [
-1.0, 1.0, 0.0, 1.0,
1.0, 1.0, 1.0, 1.0,
-1.0, -1.0, 0.0, 0.0,
1.0, -1.0, 1.0, 0.0
]
});
var source = soundDevice.createGlobalSource({
looping: true
});
var sound;
var assetsToLoad = 3;
function mappingTableReceived(mappingTable)
{
var videoURL;
if (graphicsDevice.isSupported("FILEFORMAT_WEBM"))
{
videoURL = mappingTable.getURL("videos/turbulenzanimation.webm");
}
else
{
videoURL = mappingTable.getURL("videos/turbulenzanimation.mp4");
}
graphicsDevice.createVideo({
src: videoURL,
looping: true,
onload: function (v)
{
if (v)
{
video = v;
assetsToLoad -= 1;
}
else
{
window.alert("Failed to load video!");
}
}
});
var soundURL;
if (soundDevice.isSupported("FILEFORMAT_OGG"))
{
soundURL = mappingTable.getURL("sounds/turbulenzanimation.ogg");
}
else
{
soundURL = mappingTable.getURL("sounds/turbulenzanimation.mp3");
}
soundDevice.createSound({
src: soundURL,
onload : function (s)
{
if (s)
{
sound = s;
assetsToLoad -= 1;
}
else
{
window.alert('Failed to load sound!');
}
}
});
function shaderLoaded(shaderText)
{
if (shaderText)
{
var shaderParameters = JSON.parse(shaderText);
shader = graphicsDevice.createShader(shaderParameters);
technique = shader.getTechnique("video");
assetsToLoad -= 1;
}
else
{
window.alert("Failed to load shader!");
}
}
requestHandler.request({
src: mappingTable.getURL("shaders/video.cgfx"),
onload: shaderLoaded
});
}
var gameSession;
function sessionCreated()
|
gameSession = TurbulenzServices.createGameSession(requestHandler, sessionCreated);
//==========================================================================
// Main loop.
//==========================================================================
var fpsElement = document.getElementById("fpscounter");
var lastFPS = "";
var nextUpdate = 0;
function displayPerformance()
{
var currentTime = TurbulenzEngine.time;
if (currentTime > nextUpdate)
{
nextUpdate = (currentTime + 0.1);
var fpsText = (graphicsDevice.fps).toFixed(2);
if (lastFPS !== fpsText)
{
lastFPS = fpsText;
fpsElement.innerHTML = fpsText + " fps";
}
}
}
function mainLoop()
{
soundDevice.update();
if (graphicsDevice.beginFrame())
{
var deviceWidth = graphicsDevice.width;
var deviceHeight = graphicsDevice.height;
var aspectRatio = (deviceWidth / deviceHeight);
var videoWidth = video.width;
var videoHeight = video.height;
var videoAspectRatio = (videoWidth / videoHeight);
var x, y;
if (aspectRatio < videoAspectRatio)
{
x = 1;
y = aspectRatio / videoAspectRatio;
}
else //if (aspectRatio >= videoAspectRatio)
{
x = videoAspectRatio / aspectRatio;
y = 1;
}
var currentVideoPosition = video.tell;
if (currentVideoPosition &&
videoPosition !== currentVideoPosition)
{
if (currentVideoPosition < videoPosition)
{
// looped, sync
source.seek(videoPosition);
}
videoPosition = currentVideoPosition;
texture.setData(video);
}
graphicsDevice.clear(clearColor);
graphicsDevice.setTechnique(technique);
technique.texture = texture;
technique.clipSpace = mathDevice.v4Build(x, -y, 0, 0, clipSpace);
technique.color = videoColor;
graphicsDevice.setStream(vertexBuffer, semantics);
graphicsDevice.draw(primitive, 4);
graphicsDevice.endFrame();
if (fpsElement)
{
displayPerformance();
}
}
}
var intervalID;
function loadingLoop()
{
if (assetsToLoad === 0)
{
TurbulenzEngine.clearInterval(intervalID);
source.play(sound);
video.play();
texture = graphicsDevice.createTexture({
width: video.width,
height: video.height,
mipmaps: false,
format: 'R8G8B8',
dynamic: true,
data: video
});
videoPosition = video.tell;
intervalID = TurbulenzEngine.setInterval(mainLoop, 1000 / 60);
}
}
intervalID = TurbulenzEngine.setInterval(loadingLoop, 100);
// Create a scene destroy callback to run when the window is closed
TurbulenzEngine.onunload = function destroyScene()
{
TurbulenzEngine.clearInterval(intervalID);
if (texture)
{
texture.destroy();
texture = null;
}
if (shader)
{
shader.destroy();
technique = null;
shader = null;
}
if (video)
{
video.destroy();
video = null;
}
if (vertexBuffer)
{
vertexBuffer.destroy();
vertexBuffer = null;
}
if (source)
{
source.destroy();
source = null;
}
if (sound)
{
sound.destroy();
sound = null;
}
fpsElement = null;
if (gameSession)
{
gameSession.destroy();
gameSession = null;
}
};
};
| {
TurbulenzServices.createMappingTable(
requestHandler,
gameSession,
mappingTableReceived
);
} | identifier_body |
video.ts | /*{# Copyright (c) 2012 Turbulenz Limited #}*/
/*
* @title: Video playback
* @description:
* This sample shows how to play a video into a texture.
*/
/*{{ javascript("jslib/observer.js") }}*/
/*{{ javascript("jslib/requesthandler.js") }}*/
/*{{ javascript("jslib/utilities.js") }}*/
/*{{ javascript("jslib/services/turbulenzservices.js") }}*/
/*{{ javascript("jslib/services/turbulenzbridge.js") }}*/
/*{{ javascript("jslib/services/gamesession.js") }}*/
/*{{ javascript("jslib/services/mappingtable.js") }}*/
/*global TurbulenzEngine: true */
/*global TurbulenzServices: false */
/*global RequestHandler: false */
TurbulenzEngine.onload = function onloadFn()
{
var graphicsDevice = TurbulenzEngine.createGraphicsDevice({});
// IE detection while WebGL implementation is incomplete
if (graphicsDevice && graphicsDevice.renderer === "Internet Explorer")
{
window.alert("The video sample is not supported on Internet Explorer");
return;
}
var soundDevice = TurbulenzEngine.createSoundDevice({});
var mathDevice = TurbulenzEngine.createMathDevice({});
var requestHandler = RequestHandler.create({});
var video;
var videoPosition = -1;
var shader, technique;
var texture;
var clearColor = mathDevice.v4Build(0, 0, 0, 1);
var clipSpace = mathDevice.v4Build(1, -1, 0, 0);
var videoColor = mathDevice.v4Build(1, 1, 1, 1);
var primitive = graphicsDevice.PRIMITIVE_TRIANGLE_STRIP;
var semantics = graphicsDevice.createSemantics(['POSITION', 'TEXCOORD0']);
var vertexBuffer = graphicsDevice.createVertexBuffer({
numVertices: 4,
attributes: [graphicsDevice.VERTEXFORMAT_FLOAT2,
graphicsDevice.VERTEXFORMAT_FLOAT2],
dynamic: false,
data: [
-1.0, 1.0, 0.0, 1.0,
1.0, 1.0, 1.0, 1.0,
-1.0, -1.0, 0.0, 0.0,
1.0, -1.0, 1.0, 0.0
]
});
var source = soundDevice.createGlobalSource({
looping: true
});
var sound;
var assetsToLoad = 3;
function mappingTableReceived(mappingTable)
{
var videoURL;
if (graphicsDevice.isSupported("FILEFORMAT_WEBM"))
{
videoURL = mappingTable.getURL("videos/turbulenzanimation.webm");
}
else
{
videoURL = mappingTable.getURL("videos/turbulenzanimation.mp4");
}
graphicsDevice.createVideo({
src: videoURL,
looping: true,
onload: function (v)
{
if (v)
{
video = v;
assetsToLoad -= 1;
}
else
{
window.alert("Failed to load video!");
}
}
});
var soundURL;
if (soundDevice.isSupported("FILEFORMAT_OGG"))
{
soundURL = mappingTable.getURL("sounds/turbulenzanimation.ogg");
}
else
{
soundURL = mappingTable.getURL("sounds/turbulenzanimation.mp3");
}
soundDevice.createSound({
src: soundURL,
onload : function (s)
{
if (s)
{
sound = s;
assetsToLoad -= 1;
}
else
{
window.alert('Failed to load sound!');
}
}
});
function shaderLoaded(shaderText)
{
if (shaderText)
{
var shaderParameters = JSON.parse(shaderText);
shader = graphicsDevice.createShader(shaderParameters);
technique = shader.getTechnique("video");
assetsToLoad -= 1;
}
else
{
window.alert("Failed to load shader!");
}
}
requestHandler.request({
src: mappingTable.getURL("shaders/video.cgfx"),
onload: shaderLoaded
});
}
var gameSession;
function sessionCreated()
{
TurbulenzServices.createMappingTable(
requestHandler,
gameSession,
mappingTableReceived
);
}
gameSession = TurbulenzServices.createGameSession(requestHandler, sessionCreated);
//==========================================================================
// Main loop.
//==========================================================================
var fpsElement = document.getElementById("fpscounter");
var lastFPS = "";
var nextUpdate = 0;
function displayPerformance()
{
var currentTime = TurbulenzEngine.time;
if (currentTime > nextUpdate)
{
nextUpdate = (currentTime + 0.1);
var fpsText = (graphicsDevice.fps).toFixed(2);
if (lastFPS !== fpsText)
{
lastFPS = fpsText;
fpsElement.innerHTML = fpsText + " fps";
}
}
}
function mainLoop()
{
soundDevice.update();
if (graphicsDevice.beginFrame())
{
var deviceWidth = graphicsDevice.width;
var deviceHeight = graphicsDevice.height;
var aspectRatio = (deviceWidth / deviceHeight);
var videoWidth = video.width;
var videoHeight = video.height;
var videoAspectRatio = (videoWidth / videoHeight); | var x, y;
if (aspectRatio < videoAspectRatio)
{
x = 1;
y = aspectRatio / videoAspectRatio;
}
else //if (aspectRatio >= videoAspectRatio)
{
x = videoAspectRatio / aspectRatio;
y = 1;
}
var currentVideoPosition = video.tell;
if (currentVideoPosition &&
videoPosition !== currentVideoPosition)
{
if (currentVideoPosition < videoPosition)
{
// looped, sync
source.seek(videoPosition);
}
videoPosition = currentVideoPosition;
texture.setData(video);
}
graphicsDevice.clear(clearColor);
graphicsDevice.setTechnique(technique);
technique.texture = texture;
technique.clipSpace = mathDevice.v4Build(x, -y, 0, 0, clipSpace);
technique.color = videoColor;
graphicsDevice.setStream(vertexBuffer, semantics);
graphicsDevice.draw(primitive, 4);
graphicsDevice.endFrame();
if (fpsElement)
{
displayPerformance();
}
}
}
var intervalID;
function loadingLoop()
{
if (assetsToLoad === 0)
{
TurbulenzEngine.clearInterval(intervalID);
source.play(sound);
video.play();
texture = graphicsDevice.createTexture({
width: video.width,
height: video.height,
mipmaps: false,
format: 'R8G8B8',
dynamic: true,
data: video
});
videoPosition = video.tell;
intervalID = TurbulenzEngine.setInterval(mainLoop, 1000 / 60);
}
}
intervalID = TurbulenzEngine.setInterval(loadingLoop, 100);
// Create a scene destroy callback to run when the window is closed
TurbulenzEngine.onunload = function destroyScene()
{
TurbulenzEngine.clearInterval(intervalID);
if (texture)
{
texture.destroy();
texture = null;
}
if (shader)
{
shader.destroy();
technique = null;
shader = null;
}
if (video)
{
video.destroy();
video = null;
}
if (vertexBuffer)
{
vertexBuffer.destroy();
vertexBuffer = null;
}
if (source)
{
source.destroy();
source = null;
}
if (sound)
{
sound.destroy();
sound = null;
}
fpsElement = null;
if (gameSession)
{
gameSession.destroy();
gameSession = null;
}
};
}; | random_line_split | |
video.ts | /*{# Copyright (c) 2012 Turbulenz Limited #}*/
/*
* @title: Video playback
* @description:
* This sample shows how to play a video into a texture.
*/
/*{{ javascript("jslib/observer.js") }}*/
/*{{ javascript("jslib/requesthandler.js") }}*/
/*{{ javascript("jslib/utilities.js") }}*/
/*{{ javascript("jslib/services/turbulenzservices.js") }}*/
/*{{ javascript("jslib/services/turbulenzbridge.js") }}*/
/*{{ javascript("jslib/services/gamesession.js") }}*/
/*{{ javascript("jslib/services/mappingtable.js") }}*/
/*global TurbulenzEngine: true */
/*global TurbulenzServices: false */
/*global RequestHandler: false */
TurbulenzEngine.onload = function onloadFn()
{
var graphicsDevice = TurbulenzEngine.createGraphicsDevice({});
// IE detection while WebGL implementation is incomplete
if (graphicsDevice && graphicsDevice.renderer === "Internet Explorer")
{
window.alert("The video sample is not supported on Internet Explorer");
return;
}
var soundDevice = TurbulenzEngine.createSoundDevice({});
var mathDevice = TurbulenzEngine.createMathDevice({});
var requestHandler = RequestHandler.create({});
var video;
var videoPosition = -1;
var shader, technique;
var texture;
var clearColor = mathDevice.v4Build(0, 0, 0, 1);
var clipSpace = mathDevice.v4Build(1, -1, 0, 0);
var videoColor = mathDevice.v4Build(1, 1, 1, 1);
var primitive = graphicsDevice.PRIMITIVE_TRIANGLE_STRIP;
var semantics = graphicsDevice.createSemantics(['POSITION', 'TEXCOORD0']);
var vertexBuffer = graphicsDevice.createVertexBuffer({
numVertices: 4,
attributes: [graphicsDevice.VERTEXFORMAT_FLOAT2,
graphicsDevice.VERTEXFORMAT_FLOAT2],
dynamic: false,
data: [
-1.0, 1.0, 0.0, 1.0,
1.0, 1.0, 1.0, 1.0,
-1.0, -1.0, 0.0, 0.0,
1.0, -1.0, 1.0, 0.0
]
});
var source = soundDevice.createGlobalSource({
looping: true
});
var sound;
var assetsToLoad = 3;
function mappingTableReceived(mappingTable)
{
var videoURL;
if (graphicsDevice.isSupported("FILEFORMAT_WEBM"))
{
videoURL = mappingTable.getURL("videos/turbulenzanimation.webm");
}
else
{
videoURL = mappingTable.getURL("videos/turbulenzanimation.mp4");
}
graphicsDevice.createVideo({
src: videoURL,
looping: true,
onload: function (v)
{
if (v)
{
video = v;
assetsToLoad -= 1;
}
else
{
window.alert("Failed to load video!");
}
}
});
var soundURL;
if (soundDevice.isSupported("FILEFORMAT_OGG"))
{
soundURL = mappingTable.getURL("sounds/turbulenzanimation.ogg");
}
else
{
soundURL = mappingTable.getURL("sounds/turbulenzanimation.mp3");
}
soundDevice.createSound({
src: soundURL,
onload : function (s)
{
if (s)
{
sound = s;
assetsToLoad -= 1;
}
else
{
window.alert('Failed to load sound!');
}
}
});
function shaderLoaded(shaderText)
{
if (shaderText)
{
var shaderParameters = JSON.parse(shaderText);
shader = graphicsDevice.createShader(shaderParameters);
technique = shader.getTechnique("video");
assetsToLoad -= 1;
}
else
{
window.alert("Failed to load shader!");
}
}
requestHandler.request({
src: mappingTable.getURL("shaders/video.cgfx"),
onload: shaderLoaded
});
}
var gameSession;
function sessionCreated()
{
TurbulenzServices.createMappingTable(
requestHandler,
gameSession,
mappingTableReceived
);
}
gameSession = TurbulenzServices.createGameSession(requestHandler, sessionCreated);
//==========================================================================
// Main loop.
//==========================================================================
var fpsElement = document.getElementById("fpscounter");
var lastFPS = "";
var nextUpdate = 0;
function displayPerformance()
{
var currentTime = TurbulenzEngine.time;
if (currentTime > nextUpdate)
{
nextUpdate = (currentTime + 0.1);
var fpsText = (graphicsDevice.fps).toFixed(2);
if (lastFPS !== fpsText)
{
lastFPS = fpsText;
fpsElement.innerHTML = fpsText + " fps";
}
}
}
function mainLoop()
{
soundDevice.update();
if (graphicsDevice.beginFrame())
{
var deviceWidth = graphicsDevice.width;
var deviceHeight = graphicsDevice.height;
var aspectRatio = (deviceWidth / deviceHeight);
var videoWidth = video.width;
var videoHeight = video.height;
var videoAspectRatio = (videoWidth / videoHeight);
var x, y;
if (aspectRatio < videoAspectRatio)
{
x = 1;
y = aspectRatio / videoAspectRatio;
}
else //if (aspectRatio >= videoAspectRatio)
{
x = videoAspectRatio / aspectRatio;
y = 1;
}
var currentVideoPosition = video.tell;
if (currentVideoPosition &&
videoPosition !== currentVideoPosition)
|
graphicsDevice.clear(clearColor);
graphicsDevice.setTechnique(technique);
technique.texture = texture;
technique.clipSpace = mathDevice.v4Build(x, -y, 0, 0, clipSpace);
technique.color = videoColor;
graphicsDevice.setStream(vertexBuffer, semantics);
graphicsDevice.draw(primitive, 4);
graphicsDevice.endFrame();
if (fpsElement)
{
displayPerformance();
}
}
}
var intervalID;
function loadingLoop()
{
if (assetsToLoad === 0)
{
TurbulenzEngine.clearInterval(intervalID);
source.play(sound);
video.play();
texture = graphicsDevice.createTexture({
width: video.width,
height: video.height,
mipmaps: false,
format: 'R8G8B8',
dynamic: true,
data: video
});
videoPosition = video.tell;
intervalID = TurbulenzEngine.setInterval(mainLoop, 1000 / 60);
}
}
intervalID = TurbulenzEngine.setInterval(loadingLoop, 100);
// Create a scene destroy callback to run when the window is closed
TurbulenzEngine.onunload = function destroyScene()
{
TurbulenzEngine.clearInterval(intervalID);
if (texture)
{
texture.destroy();
texture = null;
}
if (shader)
{
shader.destroy();
technique = null;
shader = null;
}
if (video)
{
video.destroy();
video = null;
}
if (vertexBuffer)
{
vertexBuffer.destroy();
vertexBuffer = null;
}
if (source)
{
source.destroy();
source = null;
}
if (sound)
{
sound.destroy();
sound = null;
}
fpsElement = null;
if (gameSession)
{
gameSession.destroy();
gameSession = null;
}
};
};
| {
if (currentVideoPosition < videoPosition)
{
// looped, sync
source.seek(videoPosition);
}
videoPosition = currentVideoPosition;
texture.setData(video);
} | conditional_block |
video.ts | /*{# Copyright (c) 2012 Turbulenz Limited #}*/
/*
* @title: Video playback
* @description:
* This sample shows how to play a video into a texture.
*/
/*{{ javascript("jslib/observer.js") }}*/
/*{{ javascript("jslib/requesthandler.js") }}*/
/*{{ javascript("jslib/utilities.js") }}*/
/*{{ javascript("jslib/services/turbulenzservices.js") }}*/
/*{{ javascript("jslib/services/turbulenzbridge.js") }}*/
/*{{ javascript("jslib/services/gamesession.js") }}*/
/*{{ javascript("jslib/services/mappingtable.js") }}*/
/*global TurbulenzEngine: true */
/*global TurbulenzServices: false */
/*global RequestHandler: false */
TurbulenzEngine.onload = function onloadFn()
{
var graphicsDevice = TurbulenzEngine.createGraphicsDevice({});
// IE detection while WebGL implementation is incomplete
if (graphicsDevice && graphicsDevice.renderer === "Internet Explorer")
{
window.alert("The video sample is not supported on Internet Explorer");
return;
}
var soundDevice = TurbulenzEngine.createSoundDevice({});
var mathDevice = TurbulenzEngine.createMathDevice({});
var requestHandler = RequestHandler.create({});
var video;
var videoPosition = -1;
var shader, technique;
var texture;
var clearColor = mathDevice.v4Build(0, 0, 0, 1);
var clipSpace = mathDevice.v4Build(1, -1, 0, 0);
var videoColor = mathDevice.v4Build(1, 1, 1, 1);
var primitive = graphicsDevice.PRIMITIVE_TRIANGLE_STRIP;
var semantics = graphicsDevice.createSemantics(['POSITION', 'TEXCOORD0']);
var vertexBuffer = graphicsDevice.createVertexBuffer({
numVertices: 4,
attributes: [graphicsDevice.VERTEXFORMAT_FLOAT2,
graphicsDevice.VERTEXFORMAT_FLOAT2],
dynamic: false,
data: [
-1.0, 1.0, 0.0, 1.0,
1.0, 1.0, 1.0, 1.0,
-1.0, -1.0, 0.0, 0.0,
1.0, -1.0, 1.0, 0.0
]
});
var source = soundDevice.createGlobalSource({
looping: true
});
var sound;
var assetsToLoad = 3;
function | (mappingTable)
{
var videoURL;
if (graphicsDevice.isSupported("FILEFORMAT_WEBM"))
{
videoURL = mappingTable.getURL("videos/turbulenzanimation.webm");
}
else
{
videoURL = mappingTable.getURL("videos/turbulenzanimation.mp4");
}
graphicsDevice.createVideo({
src: videoURL,
looping: true,
onload: function (v)
{
if (v)
{
video = v;
assetsToLoad -= 1;
}
else
{
window.alert("Failed to load video!");
}
}
});
var soundURL;
if (soundDevice.isSupported("FILEFORMAT_OGG"))
{
soundURL = mappingTable.getURL("sounds/turbulenzanimation.ogg");
}
else
{
soundURL = mappingTable.getURL("sounds/turbulenzanimation.mp3");
}
soundDevice.createSound({
src: soundURL,
onload : function (s)
{
if (s)
{
sound = s;
assetsToLoad -= 1;
}
else
{
window.alert('Failed to load sound!');
}
}
});
function shaderLoaded(shaderText)
{
if (shaderText)
{
var shaderParameters = JSON.parse(shaderText);
shader = graphicsDevice.createShader(shaderParameters);
technique = shader.getTechnique("video");
assetsToLoad -= 1;
}
else
{
window.alert("Failed to load shader!");
}
}
requestHandler.request({
src: mappingTable.getURL("shaders/video.cgfx"),
onload: shaderLoaded
});
}
var gameSession;
function sessionCreated()
{
TurbulenzServices.createMappingTable(
requestHandler,
gameSession,
mappingTableReceived
);
}
gameSession = TurbulenzServices.createGameSession(requestHandler, sessionCreated);
//==========================================================================
// Main loop.
//==========================================================================
var fpsElement = document.getElementById("fpscounter");
var lastFPS = "";
var nextUpdate = 0;
function displayPerformance()
{
var currentTime = TurbulenzEngine.time;
if (currentTime > nextUpdate)
{
nextUpdate = (currentTime + 0.1);
var fpsText = (graphicsDevice.fps).toFixed(2);
if (lastFPS !== fpsText)
{
lastFPS = fpsText;
fpsElement.innerHTML = fpsText + " fps";
}
}
}
function mainLoop()
{
soundDevice.update();
if (graphicsDevice.beginFrame())
{
var deviceWidth = graphicsDevice.width;
var deviceHeight = graphicsDevice.height;
var aspectRatio = (deviceWidth / deviceHeight);
var videoWidth = video.width;
var videoHeight = video.height;
var videoAspectRatio = (videoWidth / videoHeight);
var x, y;
if (aspectRatio < videoAspectRatio)
{
x = 1;
y = aspectRatio / videoAspectRatio;
}
else //if (aspectRatio >= videoAspectRatio)
{
x = videoAspectRatio / aspectRatio;
y = 1;
}
var currentVideoPosition = video.tell;
if (currentVideoPosition &&
videoPosition !== currentVideoPosition)
{
if (currentVideoPosition < videoPosition)
{
// looped, sync
source.seek(videoPosition);
}
videoPosition = currentVideoPosition;
texture.setData(video);
}
graphicsDevice.clear(clearColor);
graphicsDevice.setTechnique(technique);
technique.texture = texture;
technique.clipSpace = mathDevice.v4Build(x, -y, 0, 0, clipSpace);
technique.color = videoColor;
graphicsDevice.setStream(vertexBuffer, semantics);
graphicsDevice.draw(primitive, 4);
graphicsDevice.endFrame();
if (fpsElement)
{
displayPerformance();
}
}
}
var intervalID;
function loadingLoop()
{
if (assetsToLoad === 0)
{
TurbulenzEngine.clearInterval(intervalID);
source.play(sound);
video.play();
texture = graphicsDevice.createTexture({
width: video.width,
height: video.height,
mipmaps: false,
format: 'R8G8B8',
dynamic: true,
data: video
});
videoPosition = video.tell;
intervalID = TurbulenzEngine.setInterval(mainLoop, 1000 / 60);
}
}
intervalID = TurbulenzEngine.setInterval(loadingLoop, 100);
// Create a scene destroy callback to run when the window is closed
TurbulenzEngine.onunload = function destroyScene()
{
TurbulenzEngine.clearInterval(intervalID);
if (texture)
{
texture.destroy();
texture = null;
}
if (shader)
{
shader.destroy();
technique = null;
shader = null;
}
if (video)
{
video.destroy();
video = null;
}
if (vertexBuffer)
{
vertexBuffer.destroy();
vertexBuffer = null;
}
if (source)
{
source.destroy();
source = null;
}
if (sound)
{
sound.destroy();
sound = null;
}
fpsElement = null;
if (gameSession)
{
gameSession.destroy();
gameSession = null;
}
};
};
| mappingTableReceived | identifier_name |
numeric.rs | use backend::Backend;
use expression::{Expression, SelectableExpression, NonAggregate};
use query_builder::*;
use types;
macro_rules! numeric_operation {
($name:ident, $op:expr) => {
pub struct $name<Lhs, Rhs> {
lhs: Lhs,
rhs: Rhs,
}
impl<Lhs, Rhs> $name<Lhs, Rhs> {
pub fn new(left: Lhs, right: Rhs) -> Self {
$name {
lhs: left,
rhs: right,
}
}
}
impl<Lhs, Rhs> Expression for $name<Lhs, Rhs> where
Lhs: Expression,
Lhs::SqlType: types::ops::$name,
Rhs: Expression,
{
type SqlType = <Lhs::SqlType as types::ops::$name>::Output;
}
impl<Lhs, Rhs, DB> QueryFragment<DB> for $name<Lhs, Rhs> where
DB: Backend,
Lhs: QueryFragment<DB>,
Rhs: QueryFragment<DB>,
{
fn to_sql(&self, out: &mut DB::QueryBuilder) -> BuildQueryResult {
try!(self.lhs.to_sql(out));
out.push_sql($op);
self.rhs.to_sql(out)
}
}
impl<Lhs, Rhs, QS> SelectableExpression<QS> for $name<Lhs, Rhs> where
Lhs: SelectableExpression<QS>,
Rhs: SelectableExpression<QS>,
$name<Lhs, Rhs>: Expression,
{
}
impl<Lhs, Rhs> NonAggregate for $name<Lhs, Rhs> where
Lhs: NonAggregate,
Rhs: NonAggregate,
$name<Lhs, Rhs>: Expression,
{
}
generic_numeric_expr!($name, A, B);
}
} |
numeric_operation!(Add, " + ");
numeric_operation!(Sub, " - ");
numeric_operation!(Mul, " * ");
numeric_operation!(Div, " / "); | random_line_split | |
env.py | from __future__ import with_statement
import os
import sys
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# We need to go back a dir to get the config.
_this_dir = os.path.dirname((os.path.abspath(__file__)))
_parent_dir = os.path.join(_this_dir, '../')
for _p in (_this_dir, _parent_dir):
if _p not in sys.path:
sys.path.append(_p)
from config import API, APP
# Bind some vars for our migrations to use for environmental setup
API_URL_WITH_SLASH = API.LISTEN_URL + "/"
#
# n.b. this is only currently doing API migrations
#
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def merged_ini_py_conf():
"""Update some settings that would be fetched from the ini with those from our
application config.
this could maybe be cleaner with some clever .setdefault('key', default_value)
:return: merged settings dict
"""
conf = config.get_section(config.config_ini_section)
if hasattr(API, 'SQLALCHEMY_DATABASE_URI'):
conf['sqlalchemy.url'] = API.SQLALCHEMY_DATABASE_URI
return conf
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=merged_ini_py_conf().get('sqlalchemy.url'),
target_metadata=target_metadata,
literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
merged_ini_py_conf(),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
|
else:
run_migrations_online()
| run_migrations_offline() | conditional_block |
env.py | from __future__ import with_statement
import os
import sys
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# We need to go back a dir to get the config.
_this_dir = os.path.dirname((os.path.abspath(__file__)))
_parent_dir = os.path.join(_this_dir, '../')
for _p in (_this_dir, _parent_dir):
if _p not in sys.path:
sys.path.append(_p)
from config import API, APP
# Bind some vars for our migrations to use for environmental setup
API_URL_WITH_SLASH = API.LISTEN_URL + "/"
#
# n.b. this is only currently doing API migrations
#
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def merged_ini_py_conf():
"""Update some settings that would be fetched from the ini with those from our
application config.
this could maybe be cleaner with some clever .setdefault('key', default_value)
:return: merged settings dict
"""
conf = config.get_section(config.config_ini_section)
if hasattr(API, 'SQLALCHEMY_DATABASE_URI'):
conf['sqlalchemy.url'] = API.SQLALCHEMY_DATABASE_URI
return conf
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=merged_ini_py_conf().get('sqlalchemy.url'),
target_metadata=target_metadata,
literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context. |
connectable = engine_from_config(
merged_ini_py_conf(),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online() |
""" | random_line_split |
env.py | from __future__ import with_statement
import os
import sys
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# We need to go back a dir to get the config.
_this_dir = os.path.dirname((os.path.abspath(__file__)))
_parent_dir = os.path.join(_this_dir, '../')
for _p in (_this_dir, _parent_dir):
if _p not in sys.path:
sys.path.append(_p)
from config import API, APP
# Bind some vars for our migrations to use for environmental setup
API_URL_WITH_SLASH = API.LISTEN_URL + "/"
#
# n.b. this is only currently doing API migrations
#
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def merged_ini_py_conf():
"""Update some settings that would be fetched from the ini with those from our
application config.
this could maybe be cleaner with some clever .setdefault('key', default_value)
:return: merged settings dict
"""
conf = config.get_section(config.config_ini_section)
if hasattr(API, 'SQLALCHEMY_DATABASE_URI'):
conf['sqlalchemy.url'] = API.SQLALCHEMY_DATABASE_URI
return conf
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=merged_ini_py_conf().get('sqlalchemy.url'),
target_metadata=target_metadata,
literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def | ():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
merged_ini_py_conf(),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| run_migrations_online | identifier_name |
env.py | from __future__ import with_statement
import os
import sys
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# We need to go back a dir to get the config.
_this_dir = os.path.dirname((os.path.abspath(__file__)))
_parent_dir = os.path.join(_this_dir, '../')
for _p in (_this_dir, _parent_dir):
if _p not in sys.path:
sys.path.append(_p)
from config import API, APP
# Bind some vars for our migrations to use for environmental setup
API_URL_WITH_SLASH = API.LISTEN_URL + "/"
#
# n.b. this is only currently doing API migrations
#
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def merged_ini_py_conf():
"""Update some settings that would be fetched from the ini with those from our
application config.
this could maybe be cleaner with some clever .setdefault('key', default_value)
:return: merged settings dict
"""
conf = config.get_section(config.config_ini_section)
if hasattr(API, 'SQLALCHEMY_DATABASE_URI'):
conf['sqlalchemy.url'] = API.SQLALCHEMY_DATABASE_URI
return conf
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(
url=merged_ini_py_conf().get('sqlalchemy.url'),
target_metadata=target_metadata,
literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
|
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| """Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
merged_ini_py_conf(),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations() | identifier_body |
auth.rs | // Copyleft (ↄ) meh. <meh@schizofreni.co> | http://meh.schizofreni.co
//
// This file is part of screenruster.
//
// screenruster is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// screenruster is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with screenruster. If not, see <http://www.gnu.org/licenses/>.
use std::thread;
use std::ops::Deref;
use channel::{self, Receiver, Sender, SendError};
use users;
use log::warn;
use crate::error;
use crate::config;
use super::Authenticate;
pub struct Auth {
receiver: Receiver<Response>,
sender: Sender<Request>,
}
#[derive(Clone, Debug)]
pub enum Request {
Authenticate(String),
}
#[derive(Clone, Debug)]
pub enum Response {
Success,
Failure,
}
impl Auth {
pub fn spawn(config: config::Auth) -> error::Result<Auth> {
let user = users::get_current_username().ok_or(error::Auth::UnknownUser)?;
let mut methods = Vec::<Box<dyn Authenticate>>::new();
#[cfg(feature = "auth-internal")]
methods.push(box super::internal::new(config.get("internal"))?);
#[cfg(feature = "auth-pam")]
methods.push(Box::new(super::pam::new(config.get("pam"))?));
let (sender, i_receiver) = channel::unbounded();
let (i_sender, receiver) = channel::unbounded();
thread::spawn(move || {
'main: while let Ok(request) = receiver.recv() {
match request {
Request::Authenticate(password) => {
| }
}
});
Ok(Auth {
receiver: i_receiver,
sender: i_sender,
})
}
pub fn authenticate<S: Into<String>>(&self, password: S) -> Result<(), SendError<Request>> {
self.sender.send(Request::Authenticate(password.into()))
}
}
impl Deref for Auth {
type Target = Receiver<Response>;
fn deref(&self) -> &Receiver<Response> {
&self.receiver
}
}
| if methods.is_empty() {
warn!("no authentication method");
sender.send(Response::Success).unwrap();
continue 'main;
}
for method in &mut methods {
if let Ok(true) = method.authenticate(user.to_str().unwrap(), &password) {
sender.send(Response::Success).unwrap();
continue 'main;
}
}
sender.send(Response::Failure).unwrap();
}
| conditional_block |
auth.rs | // Copyleft (ↄ) meh. <meh@schizofreni.co> | http://meh.schizofreni.co
//
// This file is part of screenruster.
//
// screenruster is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. | // screenruster is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with screenruster. If not, see <http://www.gnu.org/licenses/>.
use std::thread;
use std::ops::Deref;
use channel::{self, Receiver, Sender, SendError};
use users;
use log::warn;
use crate::error;
use crate::config;
use super::Authenticate;
pub struct Auth {
receiver: Receiver<Response>,
sender: Sender<Request>,
}
#[derive(Clone, Debug)]
pub enum Request {
Authenticate(String),
}
#[derive(Clone, Debug)]
pub enum Response {
Success,
Failure,
}
impl Auth {
pub fn spawn(config: config::Auth) -> error::Result<Auth> {
let user = users::get_current_username().ok_or(error::Auth::UnknownUser)?;
let mut methods = Vec::<Box<dyn Authenticate>>::new();
#[cfg(feature = "auth-internal")]
methods.push(box super::internal::new(config.get("internal"))?);
#[cfg(feature = "auth-pam")]
methods.push(Box::new(super::pam::new(config.get("pam"))?));
let (sender, i_receiver) = channel::unbounded();
let (i_sender, receiver) = channel::unbounded();
thread::spawn(move || {
'main: while let Ok(request) = receiver.recv() {
match request {
Request::Authenticate(password) => {
if methods.is_empty() {
warn!("no authentication method");
sender.send(Response::Success).unwrap();
continue 'main;
}
for method in &mut methods {
if let Ok(true) = method.authenticate(user.to_str().unwrap(), &password) {
sender.send(Response::Success).unwrap();
continue 'main;
}
}
sender.send(Response::Failure).unwrap();
}
}
}
});
Ok(Auth {
receiver: i_receiver,
sender: i_sender,
})
}
pub fn authenticate<S: Into<String>>(&self, password: S) -> Result<(), SendError<Request>> {
self.sender.send(Request::Authenticate(password.into()))
}
}
impl Deref for Auth {
type Target = Receiver<Response>;
fn deref(&self) -> &Receiver<Response> {
&self.receiver
}
} | // | random_line_split |
auth.rs | // Copyleft (ↄ) meh. <meh@schizofreni.co> | http://meh.schizofreni.co
//
// This file is part of screenruster.
//
// screenruster is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// screenruster is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with screenruster. If not, see <http://www.gnu.org/licenses/>.
use std::thread;
use std::ops::Deref;
use channel::{self, Receiver, Sender, SendError};
use users;
use log::warn;
use crate::error;
use crate::config;
use super::Authenticate;
pub struct Auth {
receiver: Receiver<Response>,
sender: Sender<Request>,
}
#[derive(Clone, Debug)]
pub enum Request {
Authenticate(String),
}
#[derive(Clone, Debug)]
pub enum Response {
Success,
Failure,
}
impl Auth {
pub fn spawn(config: config::Auth) -> error::Result<Auth> {
let user = users::get_current_username().ok_or(error::Auth::UnknownUser)?;
let mut methods = Vec::<Box<dyn Authenticate>>::new();
#[cfg(feature = "auth-internal")]
methods.push(box super::internal::new(config.get("internal"))?);
#[cfg(feature = "auth-pam")]
methods.push(Box::new(super::pam::new(config.get("pam"))?));
let (sender, i_receiver) = channel::unbounded();
let (i_sender, receiver) = channel::unbounded();
thread::spawn(move || {
'main: while let Ok(request) = receiver.recv() {
match request {
Request::Authenticate(password) => {
if methods.is_empty() {
warn!("no authentication method");
sender.send(Response::Success).unwrap();
continue 'main;
}
for method in &mut methods {
if let Ok(true) = method.authenticate(user.to_str().unwrap(), &password) {
sender.send(Response::Success).unwrap();
continue 'main;
}
}
sender.send(Response::Failure).unwrap();
}
}
}
});
Ok(Auth {
receiver: i_receiver,
sender: i_sender,
})
}
pub fn au | : Into<String>>(&self, password: S) -> Result<(), SendError<Request>> {
self.sender.send(Request::Authenticate(password.into()))
}
}
impl Deref for Auth {
type Target = Receiver<Response>;
fn deref(&self) -> &Receiver<Response> {
&self.receiver
}
}
| thenticate<S | identifier_name |
plot_weighted_samples.py | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
""" |
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# # fit the model
clf = svm.SVC()
clf.fit(X, Y, sample_weight=sample_weight)
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
pl.contourf(xx, yy, Z, alpha=0.75, cmap=pl.cm.bone)
pl.scatter(X[:, 0], X[:, 1], c=Y, s=sample_weight, alpha=0.9, cmap=pl.cm.bone)
pl.axis('off')
pl.show() | print __doc__
import numpy as np
import pylab as pl
from sklearn import svm | random_line_split |
test_releasehook.py | """
sentry.plugins.base.structs
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
__all__ = ['ReleaseHook']
from sentry.models import Release
from sentry.plugins import ReleaseHook
from sentry.testutils import TestCase
class StartReleaseTest(TestCase):
def test_minimal(self):
project = self.create_project()
version = 'bbee5b51f84611e4b14834363b8514c2'
hook = ReleaseHook(project)
hook.start_release(version)
release = Release.objects.get(
project=project,
version=version,
)
assert release.date_started
class FinishReleaseTest(TestCase):
def test_minimal(self):
project = self.create_project()
version = 'bbee5b51f84611e4b14834363b8514c2'
hook = ReleaseHook(project)
hook.finish_release(version)
release = Release.objects.get( | assert release.date_released | project=project,
version=version,
) | random_line_split |
test_releasehook.py | """
sentry.plugins.base.structs
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
__all__ = ['ReleaseHook']
from sentry.models import Release
from sentry.plugins import ReleaseHook
from sentry.testutils import TestCase
class | (TestCase):
def test_minimal(self):
project = self.create_project()
version = 'bbee5b51f84611e4b14834363b8514c2'
hook = ReleaseHook(project)
hook.start_release(version)
release = Release.objects.get(
project=project,
version=version,
)
assert release.date_started
class FinishReleaseTest(TestCase):
def test_minimal(self):
project = self.create_project()
version = 'bbee5b51f84611e4b14834363b8514c2'
hook = ReleaseHook(project)
hook.finish_release(version)
release = Release.objects.get(
project=project,
version=version,
)
assert release.date_released
| StartReleaseTest | identifier_name |
test_releasehook.py | """
sentry.plugins.base.structs
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
__all__ = ['ReleaseHook']
from sentry.models import Release
from sentry.plugins import ReleaseHook
from sentry.testutils import TestCase
class StartReleaseTest(TestCase):
|
class FinishReleaseTest(TestCase):
def test_minimal(self):
project = self.create_project()
version = 'bbee5b51f84611e4b14834363b8514c2'
hook = ReleaseHook(project)
hook.finish_release(version)
release = Release.objects.get(
project=project,
version=version,
)
assert release.date_released
| def test_minimal(self):
project = self.create_project()
version = 'bbee5b51f84611e4b14834363b8514c2'
hook = ReleaseHook(project)
hook.start_release(version)
release = Release.objects.get(
project=project,
version=version,
)
assert release.date_started | identifier_body |
util.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use common::config;
#[cfg(target_os = "win32")]
use std::os::getenv;
/// Conversion table from triple OS name to Rust SYSNAME
static OS_TABLE: &'static [(&'static str, &'static str)] = &[
("mingw32", "win32"),
("win32", "win32"),
("darwin", "macos"),
("android", "android"),
("linux", "linux"),
("freebsd", "freebsd"),
];
pub fn get_os(triple: &str) -> &'static str {
for &(triple_os, os) in OS_TABLE.iter() {
if triple.contains(triple_os) {
return os
}
}
fail!("Cannot determine OS from triple");
}
#[cfg(target_os = "win32")]
pub fn make_new_path(path: &str) -> ~str {
// Windows just uses PATH as the library search path, so we have to
// maintain the current value while adding our own
match getenv(lib_path_env_var()) {
Some(curr) => {
format!("{}{}{}", path, path_div(), curr)
}
None => path.to_str()
}
}
#[cfg(target_os = "win32")]
pub fn lib_path_env_var() -> ~str { ~"PATH" }
#[cfg(target_os = "win32")]
pub fn path_div() -> ~str |
}
| { ~";" }
pub fn logv(config: &config, s: ~str) {
debug!("{}", s);
if config.verbose { println!("{}", s); } | identifier_body |
util.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use common::config;
#[cfg(target_os = "win32")]
use std::os::getenv;
/// Conversion table from triple OS name to Rust SYSNAME
static OS_TABLE: &'static [(&'static str, &'static str)] = &[
("mingw32", "win32"),
("win32", "win32"),
("darwin", "macos"),
("android", "android"),
("linux", "linux"),
("freebsd", "freebsd"),
];
pub fn get_os(triple: &str) -> &'static str {
for &(triple_os, os) in OS_TABLE.iter() {
if triple.contains(triple_os) {
return os
}
}
fail!("Cannot determine OS from triple");
}
#[cfg(target_os = "win32")]
pub fn make_new_path(path: &str) -> ~str {
// Windows just uses PATH as the library search path, so we have to
// maintain the current value while adding our own
match getenv(lib_path_env_var()) {
Some(curr) => {
format!("{}{}{}", path, path_div(), curr)
}
None => path.to_str()
}
}
#[cfg(target_os = "win32")]
pub fn | () -> ~str { ~"PATH" }
#[cfg(target_os = "win32")]
pub fn path_div() -> ~str { ~";" }
pub fn logv(config: &config, s: ~str) {
debug!("{}", s);
if config.verbose { println!("{}", s); }
}
| lib_path_env_var | identifier_name |
util.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use common::config;
#[cfg(target_os = "win32")]
use std::os::getenv; |
/// Conversion table from triple OS name to Rust SYSNAME
static OS_TABLE: &'static [(&'static str, &'static str)] = &[
("mingw32", "win32"),
("win32", "win32"),
("darwin", "macos"),
("android", "android"),
("linux", "linux"),
("freebsd", "freebsd"),
];
pub fn get_os(triple: &str) -> &'static str {
for &(triple_os, os) in OS_TABLE.iter() {
if triple.contains(triple_os) {
return os
}
}
fail!("Cannot determine OS from triple");
}
#[cfg(target_os = "win32")]
pub fn make_new_path(path: &str) -> ~str {
// Windows just uses PATH as the library search path, so we have to
// maintain the current value while adding our own
match getenv(lib_path_env_var()) {
Some(curr) => {
format!("{}{}{}", path, path_div(), curr)
}
None => path.to_str()
}
}
#[cfg(target_os = "win32")]
pub fn lib_path_env_var() -> ~str { ~"PATH" }
#[cfg(target_os = "win32")]
pub fn path_div() -> ~str { ~";" }
pub fn logv(config: &config, s: ~str) {
debug!("{}", s);
if config.verbose { println!("{}", s); }
} | random_line_split | |
no-landing-pads.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z no-landing-pads
use std::thread;
static mut HIT: bool = false;
struct A;
impl Drop for A {
fn drop(&mut self) {
unsafe { HIT = true; }
}
}
fn main() | {
thread::spawn(move|| -> () {
let _a = A;
panic!();
}).join().err().unwrap();
assert!(unsafe { !HIT });
} | identifier_body | |
no-landing-pads.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z no-landing-pads
use std::thread;
static mut HIT: bool = false;
struct A;
impl Drop for A {
fn drop(&mut self) {
unsafe { HIT = true; }
}
}
fn | () {
thread::spawn(move|| -> () {
let _a = A;
panic!();
}).join().err().unwrap();
assert!(unsafe { !HIT });
}
| main | identifier_name |
no-landing-pads.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | // option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z no-landing-pads
use std::thread;
static mut HIT: bool = false;
struct A;
impl Drop for A {
fn drop(&mut self) {
unsafe { HIT = true; }
}
}
fn main() {
thread::spawn(move|| -> () {
let _a = A;
panic!();
}).join().err().unwrap();
assert!(unsafe { !HIT });
} | random_line_split | |
cookiesetter.rs | /*
* This Source Code Form is subject to the
* terms of the Mozilla Public License, v. 2.0
*
* © Gregor Reitzenstein
*/
use iron::prelude::*;
use iron::AfterMiddleware;
use iron::headers::SetCookie;
use iron::typemap::Key;
use cookie::Cookie;
use api::API;
/// This Struct sets Cookies on outgoing Responses as necessary.
/// (i.e. For auth-tokens)
pub struct CookieSetter;
impl CookieSetter
{
pub fn new(_: &API) -> CookieSetter
{ |
impl AfterMiddleware for CookieSetter
{
fn after(&self, req: &mut Request, mut res: Response) -> IronResult<Response>
{
// If the Request contains a CookieReq struct, set the specified Cookie
if req.extensions.contains::<CookieReq>()
{
let cookievalvec: Vec<[String; 2]> = req.extensions.remove::<CookieReq>().unwrap();
// A Cookie is a slice of two Strings: The key and the associated value
let cookies: Vec<Cookie> = cookievalvec.into_iter().map(|x| Cookie::new(x[1].clone(),x[2].clone())).collect();
res.headers.set(SetCookie(cookies));
}
Ok(res)
}
}
// This Struct notifies CookieSetter to set a cookie.
pub struct CookieReq;
// Key needs to be implented so this Struct can be inserted to req.extensions
impl Key for CookieReq { type Value = Vec<[String; 2]>; } | CookieSetter
}
} | random_line_split |
cookiesetter.rs | /*
* This Source Code Form is subject to the
* terms of the Mozilla Public License, v. 2.0
*
* © Gregor Reitzenstein
*/
use iron::prelude::*;
use iron::AfterMiddleware;
use iron::headers::SetCookie;
use iron::typemap::Key;
use cookie::Cookie;
use api::API;
/// This Struct sets Cookies on outgoing Responses as necessary.
/// (i.e. For auth-tokens)
pub struct CookieSetter;
impl CookieSetter
{
pub fn new(_: &API) -> CookieSetter
{
CookieSetter
}
}
impl AfterMiddleware for CookieSetter
{
fn after(&self, req: &mut Request, mut res: Response) -> IronResult<Response>
{
// If the Request contains a CookieReq struct, set the specified Cookie
if req.extensions.contains::<CookieReq>()
{ | Ok(res)
}
}
// This Struct notifies CookieSetter to set a cookie.
pub struct CookieReq;
// Key needs to be implented so this Struct can be inserted to req.extensions
impl Key for CookieReq { type Value = Vec<[String; 2]>; }
|
let cookievalvec: Vec<[String; 2]> = req.extensions.remove::<CookieReq>().unwrap();
// A Cookie is a slice of two Strings: The key and the associated value
let cookies: Vec<Cookie> = cookievalvec.into_iter().map(|x| Cookie::new(x[1].clone(),x[2].clone())).collect();
res.headers.set(SetCookie(cookies));
}
| conditional_block |
cookiesetter.rs | /*
* This Source Code Form is subject to the
* terms of the Mozilla Public License, v. 2.0
*
* © Gregor Reitzenstein
*/
use iron::prelude::*;
use iron::AfterMiddleware;
use iron::headers::SetCookie;
use iron::typemap::Key;
use cookie::Cookie;
use api::API;
/// This Struct sets Cookies on outgoing Responses as necessary.
/// (i.e. For auth-tokens)
pub struct CookieSetter;
impl CookieSetter
{
pub fn n | _: &API) -> CookieSetter
{
CookieSetter
}
}
impl AfterMiddleware for CookieSetter
{
fn after(&self, req: &mut Request, mut res: Response) -> IronResult<Response>
{
// If the Request contains a CookieReq struct, set the specified Cookie
if req.extensions.contains::<CookieReq>()
{
let cookievalvec: Vec<[String; 2]> = req.extensions.remove::<CookieReq>().unwrap();
// A Cookie is a slice of two Strings: The key and the associated value
let cookies: Vec<Cookie> = cookievalvec.into_iter().map(|x| Cookie::new(x[1].clone(),x[2].clone())).collect();
res.headers.set(SetCookie(cookies));
}
Ok(res)
}
}
// This Struct notifies CookieSetter to set a cookie.
pub struct CookieReq;
// Key needs to be implented so this Struct can be inserted to req.extensions
impl Key for CookieReq { type Value = Vec<[String; 2]>; }
| ew( | identifier_name |
components.tsx | /*
Copyright 2019 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import { Map, Set } from 'immutable';
import React from 'react';
import { Link } from 'react-router-dom';
import { OpenInNew, PersonAdd } from '@material-ui/icons/';
import { IArticleModel, ICategoryModel, IUserModel, ModelId } from '../../../models';
import { Avatar, MagicTimestamp, PseudoAvatar } from '../../components';
import { COMMON_STYLES, IMAGE_BASE } from '../../stylesx';
import { css, stylesheet } from '../../utilx';
interface IModeratorsWidgetProps {
users: Map<string, IUserModel>;
moderatorIds: Array<ModelId>;
superModeratorIds: Array<ModelId>;
openSetModerators(): void;
}
export const MODERATOR_WIDGET_STYLES = stylesheet({
widget: {
display: 'flex',
flexWrap: 'wrap',
justifyContent: 'center',
},
});
export function ModeratorsWidget(props: IModeratorsWidgetProps) {
const { users, moderatorIds, superModeratorIds } = props;
let s = Set(moderatorIds);
if (superModeratorIds) {
s = s.merge(superModeratorIds);
}
const moderators = s.toArray().map((uid: string) => users.get(uid));
if (moderators.length === 0) {
return (
<div onClick={props.openSetModerators} {...css(MODERATOR_WIDGET_STYLES.widget)}>
<PseudoAvatar size={IMAGE_BASE}>
<PersonAdd/>
</PseudoAvatar>
</div>
);
}
if (moderators.length === 1) {
const u = moderators[0];
return (
<div onClick={props.openSetModerators} {...css(MODERATOR_WIDGET_STYLES.widget)}>
<Avatar target={u} size={IMAGE_BASE}/>
</div>
);
}
const ret = [];
let limit = moderators.length;
let extra = false;
if (limit > 4) {
limit = 3;
extra = true;
} else if (limit === 4) {
limit = 4;
}
for (let i = 0; i < limit; i++) {
ret.push(<Avatar target={moderators[i]} size={IMAGE_BASE / 2}/>);
}
if (extra) {
ret.push(<PseudoAvatar size={IMAGE_BASE / 2}>+{moderators.length - 3}</PseudoAvatar>);
}
return (
<div onClick={props.openSetModerators} {...css(MODERATOR_WIDGET_STYLES.widget)}>
{ret}
</div>
);
}
export const TITLE_CELL_STYLES = stylesheet({
superText: {
fontSize: '10px',
fontWeight: '600',
color: 'rgba(0,0,0,0.54)',
},
categoryLabel: {
textTransform: 'uppercase',
marginRight: '12px',
},
mainText: {
display: 'flex',
},
mainTextText: {
lineHeight: '20px',
},
mainTextLink: {
padding: '0 10px',
color: 'rgba(0,0,0,0.54)',
},
});
interface ITitleCellProps {
category?: ICategoryModel;
article: IArticleModel;
link: string;
}
export function TitleCell(props: ITitleCellProps) {
const {
category, | article,
link,
} = props;
const supertext = [];
if (category) {
supertext.push(<span key="label" {...css(TITLE_CELL_STYLES.categoryLabel)}>{category.label}</span>);
}
if (article.sourceCreatedAt) {
supertext.push((
<span key="timestamp">
<MagicTimestamp timestamp={article.sourceCreatedAt} inFuture={false}/>
</span>
));
}
return (
<>
{supertext.length > 0 && <div {...css(TITLE_CELL_STYLES.superText)}>{supertext}</div>}
<div {...css(TITLE_CELL_STYLES.mainText)}>
<div>
<Link to={link} {...css(COMMON_STYLES.cellLink, TITLE_CELL_STYLES.mainTextText)}>
{article.title}
</Link>
</div>
{article.url && (
<div {...css(TITLE_CELL_STYLES.mainTextLink)}>
<a key="link" href={article.url} target="_blank" {...css(COMMON_STYLES.cellLink)}>
<OpenInNew fontSize="small" />
</a>
</div>
)}
</div>
</>
);
} | random_line_split | |
components.tsx | /*
Copyright 2019 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import { Map, Set } from 'immutable';
import React from 'react';
import { Link } from 'react-router-dom';
import { OpenInNew, PersonAdd } from '@material-ui/icons/';
import { IArticleModel, ICategoryModel, IUserModel, ModelId } from '../../../models';
import { Avatar, MagicTimestamp, PseudoAvatar } from '../../components';
import { COMMON_STYLES, IMAGE_BASE } from '../../stylesx';
import { css, stylesheet } from '../../utilx';
interface IModeratorsWidgetProps {
users: Map<string, IUserModel>;
moderatorIds: Array<ModelId>;
superModeratorIds: Array<ModelId>;
openSetModerators(): void;
}
export const MODERATOR_WIDGET_STYLES = stylesheet({
widget: {
display: 'flex',
flexWrap: 'wrap',
justifyContent: 'center',
},
});
export function ModeratorsWidget(props: IModeratorsWidgetProps) {
const { users, moderatorIds, superModeratorIds } = props;
let s = Set(moderatorIds);
if (superModeratorIds) |
const moderators = s.toArray().map((uid: string) => users.get(uid));
if (moderators.length === 0) {
return (
<div onClick={props.openSetModerators} {...css(MODERATOR_WIDGET_STYLES.widget)}>
<PseudoAvatar size={IMAGE_BASE}>
<PersonAdd/>
</PseudoAvatar>
</div>
);
}
if (moderators.length === 1) {
const u = moderators[0];
return (
<div onClick={props.openSetModerators} {...css(MODERATOR_WIDGET_STYLES.widget)}>
<Avatar target={u} size={IMAGE_BASE}/>
</div>
);
}
const ret = [];
let limit = moderators.length;
let extra = false;
if (limit > 4) {
limit = 3;
extra = true;
} else if (limit === 4) {
limit = 4;
}
for (let i = 0; i < limit; i++) {
ret.push(<Avatar target={moderators[i]} size={IMAGE_BASE / 2}/>);
}
if (extra) {
ret.push(<PseudoAvatar size={IMAGE_BASE / 2}>+{moderators.length - 3}</PseudoAvatar>);
}
return (
<div onClick={props.openSetModerators} {...css(MODERATOR_WIDGET_STYLES.widget)}>
{ret}
</div>
);
}
export const TITLE_CELL_STYLES = stylesheet({
superText: {
fontSize: '10px',
fontWeight: '600',
color: 'rgba(0,0,0,0.54)',
},
categoryLabel: {
textTransform: 'uppercase',
marginRight: '12px',
},
mainText: {
display: 'flex',
},
mainTextText: {
lineHeight: '20px',
},
mainTextLink: {
padding: '0 10px',
color: 'rgba(0,0,0,0.54)',
},
});
interface ITitleCellProps {
category?: ICategoryModel;
article: IArticleModel;
link: string;
}
export function TitleCell(props: ITitleCellProps) {
const {
category,
article,
link,
} = props;
const supertext = [];
if (category) {
supertext.push(<span key="label" {...css(TITLE_CELL_STYLES.categoryLabel)}>{category.label}</span>);
}
if (article.sourceCreatedAt) {
supertext.push((
<span key="timestamp">
<MagicTimestamp timestamp={article.sourceCreatedAt} inFuture={false}/>
</span>
));
}
return (
<>
{supertext.length > 0 && <div {...css(TITLE_CELL_STYLES.superText)}>{supertext}</div>}
<div {...css(TITLE_CELL_STYLES.mainText)}>
<div>
<Link to={link} {...css(COMMON_STYLES.cellLink, TITLE_CELL_STYLES.mainTextText)}>
{article.title}
</Link>
</div>
{article.url && (
<div {...css(TITLE_CELL_STYLES.mainTextLink)}>
<a key="link" href={article.url} target="_blank" {...css(COMMON_STYLES.cellLink)}>
<OpenInNew fontSize="small" />
</a>
</div>
)}
</div>
</>
);
}
| {
s = s.merge(superModeratorIds);
} | conditional_block |
check-kibana-settings.service.ts | /*
* Wazuh app - Check Kibana settings service
*
* Copyright (C) 2015-2021 Wazuh, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Find more information about this on the LICENSE file.
*
*/
import { CheckLogger } from '../types/check_logger';
import _ from 'lodash';
import { getUiSettings } from '../../../kibana-services';
export const checkKibanaSettings = (kibanaSettingName: string, defaultAppValue: any, callback?: (checkLogger: CheckLogger, options: {defaultAppValue: any}) => void) => (appConfig: any) => async (checkLogger: CheckLogger) => {
checkLogger.info('Getting settings...');
const valueKibanaSetting = getUiSettings().get(kibanaSettingName);
const settingsAreDifferent = !_.isEqual(
typeof defaultAppValue === 'string' ? stringifySetting(valueKibanaSetting) : valueKibanaSetting,
defaultAppValue
);
checkLogger.info(`Check Kibana setting [${kibanaSettingName}]: ${stringifySetting(valueKibanaSetting)}`);
checkLogger.info(`App setting [${kibanaSettingName}]: ${stringifySetting(defaultAppValue)}`);
checkLogger.info(`Settings mismatch [${kibanaSettingName}]: ${settingsAreDifferent ? 'yes' : 'no'}`);
if ( !valueKibanaSetting || settingsAreDifferent ){
checkLogger.info(`Updating [${kibanaSettingName}] setting...`);
await updateSetting(kibanaSettingName, defaultAppValue);
checkLogger.action(`Updated [${kibanaSettingName}] setting to: ${stringifySetting(defaultAppValue)}`);
callback && callback(checkLogger,{ defaultAppValue });
}
}
async function | (kibanaSettingName, defaultAppValue, retries = 3) {
return await getUiSettings()
.set(kibanaSettingName, null)
.catch(async (error) => {
if (retries > 0) {
return await updateSetting(kibanaSettingName, defaultAppValue, --retries);
}
throw error;
});
}
function stringifySetting(setting: any){
try{
return JSON.stringify(setting);
}catch(error){
return setting;
};
};
| updateSetting | identifier_name |
check-kibana-settings.service.ts | /*
* Wazuh app - Check Kibana settings service
*
* Copyright (C) 2015-2021 Wazuh, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Find more information about this on the LICENSE file.
*
*/
import { CheckLogger } from '../types/check_logger';
import _ from 'lodash';
import { getUiSettings } from '../../../kibana-services';
export const checkKibanaSettings = (kibanaSettingName: string, defaultAppValue: any, callback?: (checkLogger: CheckLogger, options: {defaultAppValue: any}) => void) => (appConfig: any) => async (checkLogger: CheckLogger) => {
checkLogger.info('Getting settings...');
const valueKibanaSetting = getUiSettings().get(kibanaSettingName);
const settingsAreDifferent = !_.isEqual(
typeof defaultAppValue === 'string' ? stringifySetting(valueKibanaSetting) : valueKibanaSetting,
defaultAppValue
);
checkLogger.info(`Check Kibana setting [${kibanaSettingName}]: ${stringifySetting(valueKibanaSetting)}`);
checkLogger.info(`App setting [${kibanaSettingName}]: ${stringifySetting(defaultAppValue)}`);
checkLogger.info(`Settings mismatch [${kibanaSettingName}]: ${settingsAreDifferent ? 'yes' : 'no'}`);
if ( !valueKibanaSetting || settingsAreDifferent ){
checkLogger.info(`Updating [${kibanaSettingName}] setting...`);
await updateSetting(kibanaSettingName, defaultAppValue);
checkLogger.action(`Updated [${kibanaSettingName}] setting to: ${stringifySetting(defaultAppValue)}`);
callback && callback(checkLogger,{ defaultAppValue });
}
}
async function updateSetting(kibanaSettingName, defaultAppValue, retries = 3) {
return await getUiSettings()
.set(kibanaSettingName, null)
.catch(async (error) => {
if (retries > 0) |
throw error;
});
}
function stringifySetting(setting: any){
try{
return JSON.stringify(setting);
}catch(error){
return setting;
};
};
| {
return await updateSetting(kibanaSettingName, defaultAppValue, --retries);
} | conditional_block |
check-kibana-settings.service.ts | /*
* Wazuh app - Check Kibana settings service
*
* Copyright (C) 2015-2021 Wazuh, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Find more information about this on the LICENSE file.
*
*/
import { CheckLogger } from '../types/check_logger';
import _ from 'lodash';
import { getUiSettings } from '../../../kibana-services';
export const checkKibanaSettings = (kibanaSettingName: string, defaultAppValue: any, callback?: (checkLogger: CheckLogger, options: {defaultAppValue: any}) => void) => (appConfig: any) => async (checkLogger: CheckLogger) => {
checkLogger.info('Getting settings...');
const valueKibanaSetting = getUiSettings().get(kibanaSettingName);
const settingsAreDifferent = !_.isEqual(
typeof defaultAppValue === 'string' ? stringifySetting(valueKibanaSetting) : valueKibanaSetting,
defaultAppValue
);
checkLogger.info(`Check Kibana setting [${kibanaSettingName}]: ${stringifySetting(valueKibanaSetting)}`);
checkLogger.info(`App setting [${kibanaSettingName}]: ${stringifySetting(defaultAppValue)}`);
checkLogger.info(`Settings mismatch [${kibanaSettingName}]: ${settingsAreDifferent ? 'yes' : 'no'}`);
if ( !valueKibanaSetting || settingsAreDifferent ){
checkLogger.info(`Updating [${kibanaSettingName}] setting...`);
await updateSetting(kibanaSettingName, defaultAppValue);
checkLogger.action(`Updated [${kibanaSettingName}] setting to: ${stringifySetting(defaultAppValue)}`);
callback && callback(checkLogger,{ defaultAppValue }); |
async function updateSetting(kibanaSettingName, defaultAppValue, retries = 3) {
return await getUiSettings()
.set(kibanaSettingName, null)
.catch(async (error) => {
if (retries > 0) {
return await updateSetting(kibanaSettingName, defaultAppValue, --retries);
}
throw error;
});
}
function stringifySetting(setting: any){
try{
return JSON.stringify(setting);
}catch(error){
return setting;
};
}; | }
} | random_line_split |
check-kibana-settings.service.ts | /*
* Wazuh app - Check Kibana settings service
*
* Copyright (C) 2015-2021 Wazuh, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Find more information about this on the LICENSE file.
*
*/
import { CheckLogger } from '../types/check_logger';
import _ from 'lodash';
import { getUiSettings } from '../../../kibana-services';
export const checkKibanaSettings = (kibanaSettingName: string, defaultAppValue: any, callback?: (checkLogger: CheckLogger, options: {defaultAppValue: any}) => void) => (appConfig: any) => async (checkLogger: CheckLogger) => {
checkLogger.info('Getting settings...');
const valueKibanaSetting = getUiSettings().get(kibanaSettingName);
const settingsAreDifferent = !_.isEqual(
typeof defaultAppValue === 'string' ? stringifySetting(valueKibanaSetting) : valueKibanaSetting,
defaultAppValue
);
checkLogger.info(`Check Kibana setting [${kibanaSettingName}]: ${stringifySetting(valueKibanaSetting)}`);
checkLogger.info(`App setting [${kibanaSettingName}]: ${stringifySetting(defaultAppValue)}`);
checkLogger.info(`Settings mismatch [${kibanaSettingName}]: ${settingsAreDifferent ? 'yes' : 'no'}`);
if ( !valueKibanaSetting || settingsAreDifferent ){
checkLogger.info(`Updating [${kibanaSettingName}] setting...`);
await updateSetting(kibanaSettingName, defaultAppValue);
checkLogger.action(`Updated [${kibanaSettingName}] setting to: ${stringifySetting(defaultAppValue)}`);
callback && callback(checkLogger,{ defaultAppValue });
}
}
async function updateSetting(kibanaSettingName, defaultAppValue, retries = 3) {
return await getUiSettings()
.set(kibanaSettingName, null)
.catch(async (error) => {
if (retries > 0) {
return await updateSetting(kibanaSettingName, defaultAppValue, --retries);
}
throw error;
});
}
function stringifySetting(setting: any) | ;
| {
try{
return JSON.stringify(setting);
}catch(error){
return setting;
};
} | identifier_body |
bitstream.rs | //! This module provides bit readers and writers
use std::io::{self, Write};
/// Containes either the consumed bytes and reconstructed bits or
/// only the consumed bytes if the supplied buffer was not bit enough
pub enum Bits {
/// Consumed bytes, reconstructed bits
Some(usize, u16),
/// Consumed bytes
None(usize),
}
/// A bit reader.
pub trait BitReader {
/// Returns the next `n` bits.
fn read_bits(&mut self, buf: &[u8], n: u8) -> Bits;
}
/// A bit writer.
pub trait BitWriter: Write {
/// Writes the next `n` bits.
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()>;
}
macro_rules! define_bit_readers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[derive(Debug)]
pub struct $name {
bits: u8,
acc: u32,
}
impl $name {
/// Creates a new bit reader
pub fn new() -> $name {
$name {
bits: 0,
acc: 0,
}
}
}
)* // END Structure definitions
}
}
define_bit_readers!{
LsbReader, #[doc = "Reads bits from a byte stream, LSB first."];
MsbReader, #[doc = "Reads bits from a byte stream, MSB first."];
}
impl BitReader for LsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 |
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << self.bits;
self.bits += 8;
consumed += 1;
}
let res = self.acc & ((1 << n) - 1);
self.acc >>= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
impl BitReader for MsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
}
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << (24 - self.bits);
self.bits += 8;
consumed += 1;
}
let res = self.acc >> (32 - n);
self.acc <<= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
macro_rules! define_bit_writers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[allow(dead_code)]
pub struct $name<W: Write> {
w: W,
bits: u8,
acc: u32,
}
impl<W: Write> $name<W> {
/// Creates a new bit reader
#[allow(dead_code)]
pub fn new(writer: W) -> $name<W> {
$name {
w: writer,
bits: 0,
acc: 0,
}
}
}
impl<W: Write> Write for $name<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.acc == 0 {
self.w.write(buf)
} else {
for &byte in buf.iter() {
try!(self.write_bits(byte as u16, 8))
}
Ok(buf.len())
}
}
fn flush(&mut self) -> io::Result<()> {
let missing = 8 - self.bits;
if missing > 0 {
try!(self.write_bits(0, missing));
}
self.w.flush()
}
}
)* // END Structure definitions
}
}
define_bit_writers!{
LsbWriter, #[doc = "Writes bits to a byte stream, LSB first."];
MsbWriter, #[doc = "Writes bits to a byte stream, MSB first."];
}
impl<W: Write> BitWriter for LsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> {
self.acc |= (v as u32) << self.bits;
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[self.acc as u8]));
self.acc >>= 8;
self.bits -= 8
}
Ok(())
}
}
impl<W: Write> BitWriter for MsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> {
self.acc |= (v as u32) << (32 - n - self.bits);
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[(self.acc >> 24) as u8]));
self.acc <<= 8;
self.bits -= 8
}
Ok(())
}
}
#[cfg(test)]
mod test {
use super::{BitReader, BitWriter, Bits};
#[test]
fn reader_writer() {
let data = [255, 20, 40, 120, 128];
let mut offset = 0;
let mut expanded_data = Vec::new();
let mut reader = super::LsbReader::new();
while let Bits::Some(consumed, b) = reader.read_bits(&data[offset..], 10) {
offset += consumed;
expanded_data.push(b)
}
let mut compressed_data = Vec::new();
{
let mut writer = super::LsbWriter::new(&mut compressed_data);
for &datum in expanded_data.iter() {
let _ = writer.write_bits(datum, 10);
}
}
assert_eq!(&data[..], &compressed_data[..])
}
}
| {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
} | conditional_block |
bitstream.rs | //! This module provides bit readers and writers
use std::io::{self, Write};
/// Containes either the consumed bytes and reconstructed bits or
/// only the consumed bytes if the supplied buffer was not bit enough
pub enum Bits {
/// Consumed bytes, reconstructed bits
Some(usize, u16),
/// Consumed bytes
None(usize),
}
/// A bit reader.
pub trait BitReader {
/// Returns the next `n` bits.
fn read_bits(&mut self, buf: &[u8], n: u8) -> Bits;
}
/// A bit writer.
pub trait BitWriter: Write {
/// Writes the next `n` bits.
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()>;
}
macro_rules! define_bit_readers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[derive(Debug)]
pub struct $name {
bits: u8,
acc: u32,
}
impl $name {
/// Creates a new bit reader
pub fn new() -> $name {
$name {
bits: 0,
acc: 0,
}
}
}
)* // END Structure definitions
}
}
define_bit_readers!{
LsbReader, #[doc = "Reads bits from a byte stream, LSB first."];
MsbReader, #[doc = "Reads bits from a byte stream, MSB first."];
}
impl BitReader for LsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
}
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << self.bits;
self.bits += 8;
consumed += 1;
}
let res = self.acc & ((1 << n) - 1);
self.acc >>= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
impl BitReader for MsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
}
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << (24 - self.bits);
self.bits += 8;
consumed += 1;
}
let res = self.acc >> (32 - n);
self.acc <<= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
macro_rules! define_bit_writers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[allow(dead_code)]
pub struct $name<W: Write> {
w: W,
bits: u8,
acc: u32,
}
impl<W: Write> $name<W> {
/// Creates a new bit reader
#[allow(dead_code)]
pub fn new(writer: W) -> $name<W> {
$name {
w: writer,
bits: 0,
acc: 0,
}
}
}
impl<W: Write> Write for $name<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.acc == 0 {
self.w.write(buf)
} else {
for &byte in buf.iter() {
try!(self.write_bits(byte as u16, 8))
}
Ok(buf.len())
}
}
fn flush(&mut self) -> io::Result<()> {
let missing = 8 - self.bits;
if missing > 0 {
try!(self.write_bits(0, missing));
}
self.w.flush()
}
}
)* // END Structure definitions
}
}
define_bit_writers!{
LsbWriter, #[doc = "Writes bits to a byte stream, LSB first."];
MsbWriter, #[doc = "Writes bits to a byte stream, MSB first."];
}
impl<W: Write> BitWriter for LsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> {
self.acc |= (v as u32) << self.bits;
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[self.acc as u8]));
self.acc >>= 8;
self.bits -= 8
}
Ok(())
}
}
impl<W: Write> BitWriter for MsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> {
self.acc |= (v as u32) << (32 - n - self.bits);
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[(self.acc >> 24) as u8]));
self.acc <<= 8;
self.bits -= 8
}
Ok(())
}
}
#[cfg(test)]
mod test {
use super::{BitReader, BitWriter, Bits};
#[test]
fn | () {
let data = [255, 20, 40, 120, 128];
let mut offset = 0;
let mut expanded_data = Vec::new();
let mut reader = super::LsbReader::new();
while let Bits::Some(consumed, b) = reader.read_bits(&data[offset..], 10) {
offset += consumed;
expanded_data.push(b)
}
let mut compressed_data = Vec::new();
{
let mut writer = super::LsbWriter::new(&mut compressed_data);
for &datum in expanded_data.iter() {
let _ = writer.write_bits(datum, 10);
}
}
assert_eq!(&data[..], &compressed_data[..])
}
}
| reader_writer | identifier_name |
bitstream.rs | //! This module provides bit readers and writers
use std::io::{self, Write};
/// Containes either the consumed bytes and reconstructed bits or
/// only the consumed bytes if the supplied buffer was not bit enough
pub enum Bits {
/// Consumed bytes, reconstructed bits
Some(usize, u16),
/// Consumed bytes
None(usize),
}
/// A bit reader.
pub trait BitReader {
/// Returns the next `n` bits.
fn read_bits(&mut self, buf: &[u8], n: u8) -> Bits;
}
/// A bit writer.
pub trait BitWriter: Write {
/// Writes the next `n` bits.
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()>;
}
macro_rules! define_bit_readers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[derive(Debug)]
pub struct $name {
bits: u8,
acc: u32,
}
impl $name {
/// Creates a new bit reader
pub fn new() -> $name {
$name {
bits: 0,
acc: 0,
}
}
}
)* // END Structure definitions
}
}
define_bit_readers!{
LsbReader, #[doc = "Reads bits from a byte stream, LSB first."];
MsbReader, #[doc = "Reads bits from a byte stream, MSB first."];
}
impl BitReader for LsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
}
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << self.bits;
self.bits += 8;
consumed += 1;
}
let res = self.acc & ((1 << n) - 1);
self.acc >>= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
impl BitReader for MsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
}
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << (24 - self.bits);
self.bits += 8;
consumed += 1;
}
let res = self.acc >> (32 - n);
self.acc <<= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
macro_rules! define_bit_writers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[allow(dead_code)]
pub struct $name<W: Write> {
w: W,
bits: u8,
acc: u32,
}
impl<W: Write> $name<W> {
/// Creates a new bit reader
#[allow(dead_code)]
pub fn new(writer: W) -> $name<W> {
$name {
w: writer,
bits: 0,
acc: 0,
}
}
}
impl<W: Write> Write for $name<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.acc == 0 {
self.w.write(buf)
} else {
for &byte in buf.iter() {
try!(self.write_bits(byte as u16, 8))
}
Ok(buf.len())
}
}
fn flush(&mut self) -> io::Result<()> {
let missing = 8 - self.bits;
if missing > 0 {
try!(self.write_bits(0, missing));
}
self.w.flush()
}
}
)* // END Structure definitions
}
}
define_bit_writers!{
LsbWriter, #[doc = "Writes bits to a byte stream, LSB first."];
MsbWriter, #[doc = "Writes bits to a byte stream, MSB first."];
}
impl<W: Write> BitWriter for LsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> {
self.acc |= (v as u32) << self.bits;
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[self.acc as u8]));
self.acc >>= 8;
self.bits -= 8
}
Ok(())
}
}
impl<W: Write> BitWriter for MsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> {
self.acc |= (v as u32) << (32 - n - self.bits);
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[(self.acc >> 24) as u8]));
self.acc <<= 8;
self.bits -= 8
}
Ok(())
}
}
#[cfg(test)]
mod test {
use super::{BitReader, BitWriter, Bits};
#[test]
fn reader_writer() {
let data = [255, 20, 40, 120, 128];
let mut offset = 0;
let mut expanded_data = Vec::new();
let mut reader = super::LsbReader::new();
while let Bits::Some(consumed, b) = reader.read_bits(&data[offset..], 10) {
offset += consumed;
expanded_data.push(b)
}
let mut compressed_data = Vec::new();
{
let mut writer = super::LsbWriter::new(&mut compressed_data); | }
assert_eq!(&data[..], &compressed_data[..])
}
} | for &datum in expanded_data.iter() {
let _ = writer.write_bits(datum, 10);
} | random_line_split |
bitstream.rs | //! This module provides bit readers and writers
use std::io::{self, Write};
/// Containes either the consumed bytes and reconstructed bits or
/// only the consumed bytes if the supplied buffer was not bit enough
pub enum Bits {
/// Consumed bytes, reconstructed bits
Some(usize, u16),
/// Consumed bytes
None(usize),
}
/// A bit reader.
pub trait BitReader {
/// Returns the next `n` bits.
fn read_bits(&mut self, buf: &[u8], n: u8) -> Bits;
}
/// A bit writer.
pub trait BitWriter: Write {
/// Writes the next `n` bits.
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()>;
}
macro_rules! define_bit_readers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[derive(Debug)]
pub struct $name {
bits: u8,
acc: u32,
}
impl $name {
/// Creates a new bit reader
pub fn new() -> $name {
$name {
bits: 0,
acc: 0,
}
}
}
)* // END Structure definitions
}
}
define_bit_readers!{
LsbReader, #[doc = "Reads bits from a byte stream, LSB first."];
MsbReader, #[doc = "Reads bits from a byte stream, MSB first."];
}
impl BitReader for LsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
}
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << self.bits;
self.bits += 8;
consumed += 1;
}
let res = self.acc & ((1 << n) - 1);
self.acc >>= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
impl BitReader for MsbReader {
fn read_bits(&mut self, mut buf: &[u8], n: u8) -> Bits {
if n > 16 {
// This is a logic error the program should have prevented this
// Ideally we would used bounded a integer value instead of u8
panic!("Cannot read more than 16 bits")
}
let mut consumed = 0;
while self.bits < n {
let byte = if buf.len() > 0 {
let byte = buf[0];
buf = &buf[1..];
byte
} else {
return Bits::None(consumed)
};
self.acc |= (byte as u32) << (24 - self.bits);
self.bits += 8;
consumed += 1;
}
let res = self.acc >> (32 - n);
self.acc <<= n;
self.bits -= n;
Bits::Some(consumed, res as u16)
}
}
macro_rules! define_bit_writers {
{$(
$name:ident, #[$doc:meta];
)*} => {
$( // START Structure definitions
#[$doc]
#[allow(dead_code)]
pub struct $name<W: Write> {
w: W,
bits: u8,
acc: u32,
}
impl<W: Write> $name<W> {
/// Creates a new bit reader
#[allow(dead_code)]
pub fn new(writer: W) -> $name<W> {
$name {
w: writer,
bits: 0,
acc: 0,
}
}
}
impl<W: Write> Write for $name<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
if self.acc == 0 {
self.w.write(buf)
} else {
for &byte in buf.iter() {
try!(self.write_bits(byte as u16, 8))
}
Ok(buf.len())
}
}
fn flush(&mut self) -> io::Result<()> {
let missing = 8 - self.bits;
if missing > 0 {
try!(self.write_bits(0, missing));
}
self.w.flush()
}
}
)* // END Structure definitions
}
}
define_bit_writers!{
LsbWriter, #[doc = "Writes bits to a byte stream, LSB first."];
MsbWriter, #[doc = "Writes bits to a byte stream, MSB first."];
}
impl<W: Write> BitWriter for LsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> |
}
impl<W: Write> BitWriter for MsbWriter<W> {
fn write_bits(&mut self, v: u16, n: u8) -> io::Result<()> {
self.acc |= (v as u32) << (32 - n - self.bits);
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[(self.acc >> 24) as u8]));
self.acc <<= 8;
self.bits -= 8
}
Ok(())
}
}
#[cfg(test)]
mod test {
use super::{BitReader, BitWriter, Bits};
#[test]
fn reader_writer() {
let data = [255, 20, 40, 120, 128];
let mut offset = 0;
let mut expanded_data = Vec::new();
let mut reader = super::LsbReader::new();
while let Bits::Some(consumed, b) = reader.read_bits(&data[offset..], 10) {
offset += consumed;
expanded_data.push(b)
}
let mut compressed_data = Vec::new();
{
let mut writer = super::LsbWriter::new(&mut compressed_data);
for &datum in expanded_data.iter() {
let _ = writer.write_bits(datum, 10);
}
}
assert_eq!(&data[..], &compressed_data[..])
}
}
| {
self.acc |= (v as u32) << self.bits;
self.bits += n;
while self.bits >= 8 {
try!(self.w.write_all(&[self.acc as u8]));
self.acc >>= 8;
self.bits -= 8
}
Ok(())
} | identifier_body |
api.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate hyper;
extern crate time;
extern crate url;
use foxbox_taxonomy::api::{ Error, InternalError };
use foxbox_taxonomy::services::*;
use self::hyper::header::{ Authorization, Basic, Connection };
use std::fs;
use std::os::unix::fs::MetadataExt;
use std::io::{ BufWriter, ErrorKind };
use std::io::prelude::*;
use std::path::Path;
// TODO: The camera username and password need to be persisted per-camera
static CAMERA_USERNAME: &'static str = "admin";
static CAMERA_PASSWORD: &'static str = "password";
pub fn create_service_id(service_id: &str) -> Id<ServiceId> {
Id::new(&format!("service:{}@link.mozilla.org", service_id))
}
pub fn create_setter_id(operation: &str, service_id: &str) -> Id<Setter> {
create_io_mechanism_id("setter", operation, service_id)
}
pub fn create_getter_id(operation: &str, service_id: &str) -> Id<Getter> {
create_io_mechanism_id("getter", operation, service_id)
}
pub fn create_io_mechanism_id<IO>(prefix: &str, operation: &str, service_id: &str) -> Id<IO>
where IO: IOMechanism
{
Id::new(&format!("{}:{}.{}@link.mozilla.org", prefix, operation, service_id))
}
fn get_bytes(url: String) -> Result<Vec<u8>, Error> {
let client = hyper::Client::new();
let get_result = client.get(&url)
.header(
Authorization(
Basic {
username: CAMERA_USERNAME.to_owned(),
password: Some(CAMERA_PASSWORD.to_owned())
}
)
)
.header(Connection::close())
.send();
let mut res = match get_result {
Ok(res) => res,
Err(err) => {
warn!("GET on {} failed: {}", url, err);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
if res.status != self::hyper::status::StatusCode::Ok {
warn!("GET on {} failed: {}", url, res.status);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
let mut image = Vec::new();
match res.read_to_end(&mut image) {
Ok(_) => Ok(image),
Err(err) => {
warn!("read of image data from {} failed: {}", url, err);
Err(Error::InternalError(InternalError::InvalidInitialService))
}
}
}
#[derive(Clone)]
pub struct IpCamera {
pub udn: String,
url: String,
snapshot_dir: String,
pub image_list_id: Id<Getter>,
pub image_newest_id: Id<Getter>,
pub snapshot_id: Id<Setter>,
}
impl IpCamera {
pub fn new(udn: &str, url: &str, root_snapshot_dir: &str) -> Result<Self, Error> {
let camera = IpCamera {
udn: udn.to_owned(),
url: url.to_owned(),
image_list_id: create_getter_id("image_list", &udn),
image_newest_id: create_getter_id("image_newest", &udn),
snapshot_id: create_setter_id("snapshot", &udn),
snapshot_dir: format!("{}/{}", root_snapshot_dir, udn)
};
// Create a directory to store snapshots for this camera.
if let Err(err) = fs::create_dir_all(&camera.snapshot_dir) {
if err.kind() != ErrorKind::AlreadyExists {
error!("Unable to create directory {}: {}", camera.snapshot_dir, err);
return Err(Error::InternalError(InternalError::GenericError(format!("cannot create {}", camera.snapshot_dir))));
}
}
Ok(camera)
}
pub fn get_image_list(&self) -> Vec<String> {
let mut array: Vec<String> = vec!();
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
array.push(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
array
}
pub fn get_image(&self, filename: &str) -> Result<Vec<u8>, Error> {
let full_filename = format!("{}/{}", self.snapshot_dir, filename);
debug!("get_image: filename = {}", full_filename.clone());
let mut options = fs::OpenOptions::new();
options.read(true);
if let Ok(mut image_file) = options.open(full_filename.clone()) {
let mut image = Vec::new();
if let Ok(_) = image_file.read_to_end(&mut image) {
return Ok(image);
}
warn!("Error reading {}", full_filename);
} else {
warn!("Image {} not found", full_filename);
}
Err(Error::InternalError(InternalError::InvalidInitialService))
}
pub fn get_newest_image(&self) -> Result<Vec<u8>, Error> {
let mut newest_image_time = 0;
let mut newest_image = None;
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
let time = metadata.ctime();
if newest_image_time <= time {
newest_image_time = time;
newest_image = Some(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
}
if newest_image.is_none() {
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
self.get_image(&newest_image.unwrap())
}
pub fn take_snapshot(&self) -> Result<String, Error> {
let image_url = "image/jpeg.cgi";
let url = format!("{}/{}", self.url, image_url);
let image = match get_bytes(url) {
Ok(image) => image,
Err(err) => {
warn!("Error '{:?}' retrieving image from camera {}", err, self.url);
return Err(Error::InternalError(InternalError::InvalidInitialService));
} | };
let mut options = fs::OpenOptions::new();
options.write(true);
options.create(true);
options.truncate(true);
let filename_base = time::strftime("%Y-%m-%d-%H%M%S", &time::now()).unwrap();
let mut full_filename;
let image_file;
let mut loop_count = 0;
let mut filename;
loop {
if loop_count == 0 {
filename = filename_base.clone();
} else {
filename = format!("{}-{}", filename_base, loop_count);
}
full_filename = format!("{}/{}.jpg", self.snapshot_dir, filename);
if fs::metadata(full_filename.clone()).is_ok() {
// File exists
loop_count += 1;
continue;
}
image_file = match options.open(full_filename.clone()) {
Ok(file) => file,
Err(err) => {
warn!("Unable to open {}: {:?}", full_filename, err.kind());
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
break;
}
let mut writer = BufWriter::new(&image_file);
match writer.write_all(&image) {
Ok(_) => {}
Err(err) => {
warn!("Error '{:?}' writing snapshot.jpg for camera {}", err, self.udn);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
}
debug!("Took a snapshot from {}: {}", self.udn, full_filename);
Ok(format!("{}.jpg", filename))
}
} | random_line_split | |
api.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate hyper;
extern crate time;
extern crate url;
use foxbox_taxonomy::api::{ Error, InternalError };
use foxbox_taxonomy::services::*;
use self::hyper::header::{ Authorization, Basic, Connection };
use std::fs;
use std::os::unix::fs::MetadataExt;
use std::io::{ BufWriter, ErrorKind };
use std::io::prelude::*;
use std::path::Path;
// TODO: The camera username and password need to be persisted per-camera
static CAMERA_USERNAME: &'static str = "admin";
static CAMERA_PASSWORD: &'static str = "password";
pub fn create_service_id(service_id: &str) -> Id<ServiceId> {
Id::new(&format!("service:{}@link.mozilla.org", service_id))
}
pub fn create_setter_id(operation: &str, service_id: &str) -> Id<Setter> {
create_io_mechanism_id("setter", operation, service_id)
}
pub fn create_getter_id(operation: &str, service_id: &str) -> Id<Getter> {
create_io_mechanism_id("getter", operation, service_id)
}
pub fn create_io_mechanism_id<IO>(prefix: &str, operation: &str, service_id: &str) -> Id<IO>
where IO: IOMechanism
{
Id::new(&format!("{}:{}.{}@link.mozilla.org", prefix, operation, service_id))
}
fn get_bytes(url: String) -> Result<Vec<u8>, Error> {
let client = hyper::Client::new();
let get_result = client.get(&url)
.header(
Authorization(
Basic {
username: CAMERA_USERNAME.to_owned(),
password: Some(CAMERA_PASSWORD.to_owned())
}
)
)
.header(Connection::close())
.send();
let mut res = match get_result {
Ok(res) => res,
Err(err) => {
warn!("GET on {} failed: {}", url, err);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
if res.status != self::hyper::status::StatusCode::Ok {
warn!("GET on {} failed: {}", url, res.status);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
let mut image = Vec::new();
match res.read_to_end(&mut image) {
Ok(_) => Ok(image),
Err(err) => |
}
}
#[derive(Clone)]
pub struct IpCamera {
pub udn: String,
url: String,
snapshot_dir: String,
pub image_list_id: Id<Getter>,
pub image_newest_id: Id<Getter>,
pub snapshot_id: Id<Setter>,
}
impl IpCamera {
pub fn new(udn: &str, url: &str, root_snapshot_dir: &str) -> Result<Self, Error> {
let camera = IpCamera {
udn: udn.to_owned(),
url: url.to_owned(),
image_list_id: create_getter_id("image_list", &udn),
image_newest_id: create_getter_id("image_newest", &udn),
snapshot_id: create_setter_id("snapshot", &udn),
snapshot_dir: format!("{}/{}", root_snapshot_dir, udn)
};
// Create a directory to store snapshots for this camera.
if let Err(err) = fs::create_dir_all(&camera.snapshot_dir) {
if err.kind() != ErrorKind::AlreadyExists {
error!("Unable to create directory {}: {}", camera.snapshot_dir, err);
return Err(Error::InternalError(InternalError::GenericError(format!("cannot create {}", camera.snapshot_dir))));
}
}
Ok(camera)
}
pub fn get_image_list(&self) -> Vec<String> {
let mut array: Vec<String> = vec!();
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
array.push(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
array
}
pub fn get_image(&self, filename: &str) -> Result<Vec<u8>, Error> {
let full_filename = format!("{}/{}", self.snapshot_dir, filename);
debug!("get_image: filename = {}", full_filename.clone());
let mut options = fs::OpenOptions::new();
options.read(true);
if let Ok(mut image_file) = options.open(full_filename.clone()) {
let mut image = Vec::new();
if let Ok(_) = image_file.read_to_end(&mut image) {
return Ok(image);
}
warn!("Error reading {}", full_filename);
} else {
warn!("Image {} not found", full_filename);
}
Err(Error::InternalError(InternalError::InvalidInitialService))
}
pub fn get_newest_image(&self) -> Result<Vec<u8>, Error> {
let mut newest_image_time = 0;
let mut newest_image = None;
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
let time = metadata.ctime();
if newest_image_time <= time {
newest_image_time = time;
newest_image = Some(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
}
if newest_image.is_none() {
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
self.get_image(&newest_image.unwrap())
}
pub fn take_snapshot(&self) -> Result<String, Error> {
let image_url = "image/jpeg.cgi";
let url = format!("{}/{}", self.url, image_url);
let image = match get_bytes(url) {
Ok(image) => image,
Err(err) => {
warn!("Error '{:?}' retrieving image from camera {}", err, self.url);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
let mut options = fs::OpenOptions::new();
options.write(true);
options.create(true);
options.truncate(true);
let filename_base = time::strftime("%Y-%m-%d-%H%M%S", &time::now()).unwrap();
let mut full_filename;
let image_file;
let mut loop_count = 0;
let mut filename;
loop {
if loop_count == 0 {
filename = filename_base.clone();
} else {
filename = format!("{}-{}", filename_base, loop_count);
}
full_filename = format!("{}/{}.jpg", self.snapshot_dir, filename);
if fs::metadata(full_filename.clone()).is_ok() {
// File exists
loop_count += 1;
continue;
}
image_file = match options.open(full_filename.clone()) {
Ok(file) => file,
Err(err) => {
warn!("Unable to open {}: {:?}", full_filename, err.kind());
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
break;
}
let mut writer = BufWriter::new(&image_file);
match writer.write_all(&image) {
Ok(_) => {}
Err(err) => {
warn!("Error '{:?}' writing snapshot.jpg for camera {}", err, self.udn);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
}
debug!("Took a snapshot from {}: {}", self.udn, full_filename);
Ok(format!("{}.jpg", filename))
}
}
| {
warn!("read of image data from {} failed: {}", url, err);
Err(Error::InternalError(InternalError::InvalidInitialService))
} | conditional_block |
api.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate hyper;
extern crate time;
extern crate url;
use foxbox_taxonomy::api::{ Error, InternalError };
use foxbox_taxonomy::services::*;
use self::hyper::header::{ Authorization, Basic, Connection };
use std::fs;
use std::os::unix::fs::MetadataExt;
use std::io::{ BufWriter, ErrorKind };
use std::io::prelude::*;
use std::path::Path;
// TODO: The camera username and password need to be persisted per-camera
static CAMERA_USERNAME: &'static str = "admin";
static CAMERA_PASSWORD: &'static str = "password";
pub fn create_service_id(service_id: &str) -> Id<ServiceId> {
Id::new(&format!("service:{}@link.mozilla.org", service_id))
}
pub fn create_setter_id(operation: &str, service_id: &str) -> Id<Setter> {
create_io_mechanism_id("setter", operation, service_id)
}
pub fn create_getter_id(operation: &str, service_id: &str) -> Id<Getter> {
create_io_mechanism_id("getter", operation, service_id)
}
pub fn | <IO>(prefix: &str, operation: &str, service_id: &str) -> Id<IO>
where IO: IOMechanism
{
Id::new(&format!("{}:{}.{}@link.mozilla.org", prefix, operation, service_id))
}
fn get_bytes(url: String) -> Result<Vec<u8>, Error> {
let client = hyper::Client::new();
let get_result = client.get(&url)
.header(
Authorization(
Basic {
username: CAMERA_USERNAME.to_owned(),
password: Some(CAMERA_PASSWORD.to_owned())
}
)
)
.header(Connection::close())
.send();
let mut res = match get_result {
Ok(res) => res,
Err(err) => {
warn!("GET on {} failed: {}", url, err);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
if res.status != self::hyper::status::StatusCode::Ok {
warn!("GET on {} failed: {}", url, res.status);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
let mut image = Vec::new();
match res.read_to_end(&mut image) {
Ok(_) => Ok(image),
Err(err) => {
warn!("read of image data from {} failed: {}", url, err);
Err(Error::InternalError(InternalError::InvalidInitialService))
}
}
}
#[derive(Clone)]
pub struct IpCamera {
pub udn: String,
url: String,
snapshot_dir: String,
pub image_list_id: Id<Getter>,
pub image_newest_id: Id<Getter>,
pub snapshot_id: Id<Setter>,
}
impl IpCamera {
pub fn new(udn: &str, url: &str, root_snapshot_dir: &str) -> Result<Self, Error> {
let camera = IpCamera {
udn: udn.to_owned(),
url: url.to_owned(),
image_list_id: create_getter_id("image_list", &udn),
image_newest_id: create_getter_id("image_newest", &udn),
snapshot_id: create_setter_id("snapshot", &udn),
snapshot_dir: format!("{}/{}", root_snapshot_dir, udn)
};
// Create a directory to store snapshots for this camera.
if let Err(err) = fs::create_dir_all(&camera.snapshot_dir) {
if err.kind() != ErrorKind::AlreadyExists {
error!("Unable to create directory {}: {}", camera.snapshot_dir, err);
return Err(Error::InternalError(InternalError::GenericError(format!("cannot create {}", camera.snapshot_dir))));
}
}
Ok(camera)
}
pub fn get_image_list(&self) -> Vec<String> {
let mut array: Vec<String> = vec!();
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
array.push(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
array
}
pub fn get_image(&self, filename: &str) -> Result<Vec<u8>, Error> {
let full_filename = format!("{}/{}", self.snapshot_dir, filename);
debug!("get_image: filename = {}", full_filename.clone());
let mut options = fs::OpenOptions::new();
options.read(true);
if let Ok(mut image_file) = options.open(full_filename.clone()) {
let mut image = Vec::new();
if let Ok(_) = image_file.read_to_end(&mut image) {
return Ok(image);
}
warn!("Error reading {}", full_filename);
} else {
warn!("Image {} not found", full_filename);
}
Err(Error::InternalError(InternalError::InvalidInitialService))
}
pub fn get_newest_image(&self) -> Result<Vec<u8>, Error> {
let mut newest_image_time = 0;
let mut newest_image = None;
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
let time = metadata.ctime();
if newest_image_time <= time {
newest_image_time = time;
newest_image = Some(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
}
if newest_image.is_none() {
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
self.get_image(&newest_image.unwrap())
}
pub fn take_snapshot(&self) -> Result<String, Error> {
let image_url = "image/jpeg.cgi";
let url = format!("{}/{}", self.url, image_url);
let image = match get_bytes(url) {
Ok(image) => image,
Err(err) => {
warn!("Error '{:?}' retrieving image from camera {}", err, self.url);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
let mut options = fs::OpenOptions::new();
options.write(true);
options.create(true);
options.truncate(true);
let filename_base = time::strftime("%Y-%m-%d-%H%M%S", &time::now()).unwrap();
let mut full_filename;
let image_file;
let mut loop_count = 0;
let mut filename;
loop {
if loop_count == 0 {
filename = filename_base.clone();
} else {
filename = format!("{}-{}", filename_base, loop_count);
}
full_filename = format!("{}/{}.jpg", self.snapshot_dir, filename);
if fs::metadata(full_filename.clone()).is_ok() {
// File exists
loop_count += 1;
continue;
}
image_file = match options.open(full_filename.clone()) {
Ok(file) => file,
Err(err) => {
warn!("Unable to open {}: {:?}", full_filename, err.kind());
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
break;
}
let mut writer = BufWriter::new(&image_file);
match writer.write_all(&image) {
Ok(_) => {}
Err(err) => {
warn!("Error '{:?}' writing snapshot.jpg for camera {}", err, self.udn);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
}
debug!("Took a snapshot from {}: {}", self.udn, full_filename);
Ok(format!("{}.jpg", filename))
}
}
| create_io_mechanism_id | identifier_name |
api.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate hyper;
extern crate time;
extern crate url;
use foxbox_taxonomy::api::{ Error, InternalError };
use foxbox_taxonomy::services::*;
use self::hyper::header::{ Authorization, Basic, Connection };
use std::fs;
use std::os::unix::fs::MetadataExt;
use std::io::{ BufWriter, ErrorKind };
use std::io::prelude::*;
use std::path::Path;
// TODO: The camera username and password need to be persisted per-camera
static CAMERA_USERNAME: &'static str = "admin";
static CAMERA_PASSWORD: &'static str = "password";
pub fn create_service_id(service_id: &str) -> Id<ServiceId> |
pub fn create_setter_id(operation: &str, service_id: &str) -> Id<Setter> {
create_io_mechanism_id("setter", operation, service_id)
}
pub fn create_getter_id(operation: &str, service_id: &str) -> Id<Getter> {
create_io_mechanism_id("getter", operation, service_id)
}
pub fn create_io_mechanism_id<IO>(prefix: &str, operation: &str, service_id: &str) -> Id<IO>
where IO: IOMechanism
{
Id::new(&format!("{}:{}.{}@link.mozilla.org", prefix, operation, service_id))
}
fn get_bytes(url: String) -> Result<Vec<u8>, Error> {
let client = hyper::Client::new();
let get_result = client.get(&url)
.header(
Authorization(
Basic {
username: CAMERA_USERNAME.to_owned(),
password: Some(CAMERA_PASSWORD.to_owned())
}
)
)
.header(Connection::close())
.send();
let mut res = match get_result {
Ok(res) => res,
Err(err) => {
warn!("GET on {} failed: {}", url, err);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
if res.status != self::hyper::status::StatusCode::Ok {
warn!("GET on {} failed: {}", url, res.status);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
let mut image = Vec::new();
match res.read_to_end(&mut image) {
Ok(_) => Ok(image),
Err(err) => {
warn!("read of image data from {} failed: {}", url, err);
Err(Error::InternalError(InternalError::InvalidInitialService))
}
}
}
#[derive(Clone)]
pub struct IpCamera {
pub udn: String,
url: String,
snapshot_dir: String,
pub image_list_id: Id<Getter>,
pub image_newest_id: Id<Getter>,
pub snapshot_id: Id<Setter>,
}
impl IpCamera {
pub fn new(udn: &str, url: &str, root_snapshot_dir: &str) -> Result<Self, Error> {
let camera = IpCamera {
udn: udn.to_owned(),
url: url.to_owned(),
image_list_id: create_getter_id("image_list", &udn),
image_newest_id: create_getter_id("image_newest", &udn),
snapshot_id: create_setter_id("snapshot", &udn),
snapshot_dir: format!("{}/{}", root_snapshot_dir, udn)
};
// Create a directory to store snapshots for this camera.
if let Err(err) = fs::create_dir_all(&camera.snapshot_dir) {
if err.kind() != ErrorKind::AlreadyExists {
error!("Unable to create directory {}: {}", camera.snapshot_dir, err);
return Err(Error::InternalError(InternalError::GenericError(format!("cannot create {}", camera.snapshot_dir))));
}
}
Ok(camera)
}
pub fn get_image_list(&self) -> Vec<String> {
let mut array: Vec<String> = vec!();
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
array.push(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
array
}
pub fn get_image(&self, filename: &str) -> Result<Vec<u8>, Error> {
let full_filename = format!("{}/{}", self.snapshot_dir, filename);
debug!("get_image: filename = {}", full_filename.clone());
let mut options = fs::OpenOptions::new();
options.read(true);
if let Ok(mut image_file) = options.open(full_filename.clone()) {
let mut image = Vec::new();
if let Ok(_) = image_file.read_to_end(&mut image) {
return Ok(image);
}
warn!("Error reading {}", full_filename);
} else {
warn!("Image {} not found", full_filename);
}
Err(Error::InternalError(InternalError::InvalidInitialService))
}
pub fn get_newest_image(&self) -> Result<Vec<u8>, Error> {
let mut newest_image_time = 0;
let mut newest_image = None;
if let Ok(iter) = fs::read_dir(Path::new(&self.snapshot_dir)) {
for entry in iter {
if let Ok(entry) = entry {
if let Ok(metadata) = entry.metadata() {
if metadata.is_file() {
let time = metadata.ctime();
if newest_image_time <= time {
newest_image_time = time;
newest_image = Some(String::from(entry.file_name().to_str().unwrap()));
}
}
}
}
}
}
if newest_image.is_none() {
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
self.get_image(&newest_image.unwrap())
}
pub fn take_snapshot(&self) -> Result<String, Error> {
let image_url = "image/jpeg.cgi";
let url = format!("{}/{}", self.url, image_url);
let image = match get_bytes(url) {
Ok(image) => image,
Err(err) => {
warn!("Error '{:?}' retrieving image from camera {}", err, self.url);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
let mut options = fs::OpenOptions::new();
options.write(true);
options.create(true);
options.truncate(true);
let filename_base = time::strftime("%Y-%m-%d-%H%M%S", &time::now()).unwrap();
let mut full_filename;
let image_file;
let mut loop_count = 0;
let mut filename;
loop {
if loop_count == 0 {
filename = filename_base.clone();
} else {
filename = format!("{}-{}", filename_base, loop_count);
}
full_filename = format!("{}/{}.jpg", self.snapshot_dir, filename);
if fs::metadata(full_filename.clone()).is_ok() {
// File exists
loop_count += 1;
continue;
}
image_file = match options.open(full_filename.clone()) {
Ok(file) => file,
Err(err) => {
warn!("Unable to open {}: {:?}", full_filename, err.kind());
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
};
break;
}
let mut writer = BufWriter::new(&image_file);
match writer.write_all(&image) {
Ok(_) => {}
Err(err) => {
warn!("Error '{:?}' writing snapshot.jpg for camera {}", err, self.udn);
return Err(Error::InternalError(InternalError::InvalidInitialService));
}
}
debug!("Took a snapshot from {}: {}", self.udn, full_filename);
Ok(format!("{}.jpg", filename))
}
}
| {
Id::new(&format!("service:{}@link.mozilla.org", service_id))
} | identifier_body |
lib.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A lightweight logging facade.
//!
//! A logging facade provides a single logging API that abstracts over the
//! actual logging implementation. Libraries can use the logging API provided
//! by this crate, and the consumer of those libraries can choose the logging
//! framework that is most suitable for its use case.
//!
//! If no logging implementation is selected, the facade falls back to a "noop"
//! implementation that ignores all log messages. The overhead in this case
//! is very small - just an integer load, comparison and jump.
//!
//! A log request consists of a target, a level, and a body. A target is a
//! string which defaults to the module path of the location of the log
//! request, though that default may be overridden. Logger implementations
//! typically use the target to filter requests based on some user
//! configuration.
//!
//! # Use
//!
//! ## In libraries
//!
//! Libraries should link only to the `log` crate, and use the provided
//! macros to log whatever information will be useful to downstream consumers.
//!
//! ### Examples
//!
//! ```rust
//! # #![allow(unstable)]
//! #[macro_use]
//! extern crate log;
//!
//! # #[derive(Debug)] pub struct Yak(String);
//! # impl Yak { fn shave(&self, _: u32) {} }
//! # fn find_a_razor() -> Result<u32, u32> { Ok(1) }
//! pub fn shave_the_yak(yak: &Yak) {
//! info!(target: "yak_events", "Commencing yak shaving for {:?}", yak);
//!
//! loop {
//! match find_a_razor() {
//! Ok(razor) => {
//! info!("Razor located: {}", razor);
//! yak.shave(razor);
//! break;
//! }
//! Err(err) => {
//! warn!("Unable to locate a razor: {}, retrying", err);
//! }
//! }
//! }
//! }
//! # fn main() {}
//! ```
//!
//! ## In executables
//!
//! Executables should chose a logging framework and initialize it early in the
//! runtime of the program. Logging frameworks will typically include a
//! function to do this. Any log messages generated before the framework is
//! initialized will be ignored.
//!
//! The executable itself may use the `log` crate to log as well.
//!
//! ### Warning
//!
//! The logging system may only be initialized once.
//!
//! ### Examples
//!
//! ```rust,ignore
//! #[macro_use]
//! extern crate log;
//! extern crate my_logger;
//!
//! fn main() {
//! my_logger::init();
//!
//! info!("starting up");
//!
//! // ...
//! }
//! ```
//!
//! # Logger implementations
//!
//! Loggers implement the `Log` trait. Here's a very basic example that simply
//! logs all messages at the `Error`, `Warn` or `Info` levels to stdout:
//!
//! ```rust
//! extern crate log;
//!
//! use log::{LogRecord, LogLevel, LogMetadata};
//!
//! struct SimpleLogger;
//!
//! impl log::Log for SimpleLogger {
//! fn enabled(&self, metadata: &LogMetadata) -> bool {
//! metadata.level() <= LogLevel::Info
//! }
//!
//! fn log(&self, record: &LogRecord) {
//! if self.enabled(record.metadata()) {
//! println!("{} - {}", record.level(), record.args());
//! }
//! }
//! }
//!
//! # fn main() {}
//! ```
//!
//! Loggers are installed by calling the `set_logger` function. It takes a
//! closure which is provided a `MaxLogLevel` token and returns a `Log` trait
//! object. The `MaxLogLevel` token controls the global maximum log level. The
//! logging facade uses this as an optimization to improve performance of log
//! messages at levels that are disabled. In the case of our example logger,
//! we'll want to set the maximum log level to `Info`, since we ignore any
//! `Debug` or `Trace` level log messages. A logging framework should provide a
//! function that wraps a call to `set_logger`, handling initialization of the
//! logger:
//!
//! ```rust
//! # extern crate log;
//! # use log::{LogLevel, LogLevelFilter, SetLoggerError, LogMetadata};
//! # struct SimpleLogger;
//! # impl log::Log for SimpleLogger {
//! # fn enabled(&self, _: &LogMetadata) -> bool { false }
//! # fn log(&self, _: &log::LogRecord) {}
//! # }
//! # fn main() {}
//! pub fn init() -> Result<(), SetLoggerError> {
//! log::set_logger(|max_log_level| {
//! max_log_level.set(LogLevelFilter::Info);
//! Box::new(SimpleLogger)
//! })
//! }
//! ```
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/log/")]
#![warn(missing_docs)]
extern crate libc;
use std::ascii::AsciiExt;
use std::cmp;
use std::error;
use std::fmt;
use std::mem;
use std::ops::Deref;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
mod macros;
// The setup here is a bit weird to make at_exit work.
//
// There are four different states that we care about: the logger's
// uninitialized, the logger's initializing (set_logger's been called but
// LOGGER hasn't actually been set yet), the logger's active, or the logger's
// shutting down inside of at_exit.
//
// The LOGGER static is normally a Box<Box<Log>> with some special possible
// values as well. The uninitialized and initializing states are represented by
// the values 0 and 1 respectively. The shutting down state is also represented
// by 1. Any other value is a valid pointer to the logger.
//
// The at_exit routine needs to make sure that no threads are actively logging
// when it deallocates the logger. The number of actively logging threads is
// tracked in the REFCOUNT static. The routine first sets LOGGER back to 1.
// All logging calls past that point will immediately return without accessing
// the logger. At that point, the at_exit routine just waits for the refcount
// to reach 0 before deallocating the logger. Note that the refcount does not
// necessarily monotonically decrease at this point, as new log calls still
// increment and decrement it, but the interval in between is small enough that
// the wait is really just for the active log calls to finish.
static LOGGER: AtomicUsize = ATOMIC_USIZE_INIT;
static REFCOUNT: AtomicUsize = ATOMIC_USIZE_INIT;
const UNINITIALIZED: usize = 0;
const INITIALIZING: usize = 1;
static MAX_LOG_LEVEL_FILTER: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG_LEVEL_NAMES: [&'static str; 6] = ["OFF", "ERROR", "WARN", "INFO",
"DEBUG", "TRACE"];
/// An enum representing the available verbosity levels of the logging framework
///
/// A `LogLevel` may be compared directly to a `LogLevelFilter`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevel {
/// The "error" level.
///
/// Designates very serious errors.
Error = 1, // This way these line up with the discriminants for LogLevelFilter below
/// The "warn" level.
///
/// Designates hazardous situations.
Warn,
/// The "info" level.
///
/// Designates useful information.
Info,
/// The "debug" level.
///
/// Designates lower priority information.
Debug,
/// The "trace" level.
///
/// Designates very low priority, often extremely verbose, information.
Trace,
}
impl Clone for LogLevel {
#[inline]
fn clone(&self) -> LogLevel {
*self
}
}
impl PartialEq for LogLevel {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevelFilter> for LogLevel {
#[inline]
fn eq(&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialOrd for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<LogLevelFilter> for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some((*self as usize).cmp(&(*other as usize)))
}
}
impl Ord for LogLevel {
#[inline]
fn cmp(&self, other: &LogLevel) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
fn ok_or<T, E>(t: Option<T>, e: E) -> Result<T, E> {
match t {
Some(t) => Ok(t),
None => Err(e),
}
}
impl FromStr for LogLevel {
type Err = ();
fn from_str(level: &str) -> Result<LogLevel, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.into_iter()
.filter(|&idx| idx != 0)
.map(|idx| LogLevel::from_usize(idx).unwrap())
.next(), ())
}
}
impl fmt::Display for LogLevel {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.pad(LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevel {
fn from_usize(u: usize) -> Option<LogLevel> {
match u {
1 => Some(LogLevel::Error),
2 => Some(LogLevel::Warn),
3 => Some(LogLevel::Info),
4 => Some(LogLevel::Debug),
5 => Some(LogLevel::Trace),
_ => None
}
}
/// Returns the most verbose logging level.
#[inline]
pub fn max() -> LogLevel {
LogLevel::Trace
}
/// Converts the `LogLevel` to the equivalent `LogLevelFilter`.
#[inline]
pub fn to_log_level_filter(&self) -> LogLevelFilter {
LogLevelFilter::from_usize(*self as usize).unwrap()
}
}
/// An enum representing the available verbosity level filters of the logging
/// framework.
///
/// A `LogLevelFilter` may be compared directly to a `LogLevel`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevelFilter {
/// A level lower than all log levels.
Off,
/// Corresponds to the `Error` log level.
Error,
/// Corresponds to the `Warn` log level.
Warn,
/// Corresponds to the `Info` log level.
Info,
/// Corresponds to the `Debug` log level.
Debug,
/// Corresponds to the `Trace` log level.
Trace,
}
// Deriving generates terrible impls of these traits
impl Clone for LogLevelFilter {
#[inline]
fn clone(&self) -> LogLevelFilter {
*self
}
}
impl PartialEq for LogLevelFilter {
#[inline]
fn eq(&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevel> for LogLevelFilter {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
other.eq(self)
}
}
impl PartialOrd for LogLevelFilter { | }
}
impl PartialOrd<LogLevel> for LogLevelFilter {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
other.partial_cmp(self).map(|x| x.reverse())
}
}
impl Ord for LogLevelFilter {
#[inline]
fn cmp(&self, other: &LogLevelFilter) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
impl FromStr for LogLevelFilter {
type Err = ();
fn from_str(level: &str) -> Result<LogLevelFilter, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.map(|p| LogLevelFilter::from_usize(p).unwrap()), ())
}
}
impl fmt::Display for LogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevelFilter {
fn from_usize(u: usize) -> Option<LogLevelFilter> {
match u {
0 => Some(LogLevelFilter::Off),
1 => Some(LogLevelFilter::Error),
2 => Some(LogLevelFilter::Warn),
3 => Some(LogLevelFilter::Info),
4 => Some(LogLevelFilter::Debug),
5 => Some(LogLevelFilter::Trace),
_ => None
}
}
/// Returns the most verbose logging level filter.
#[inline]
pub fn max() -> LogLevelFilter {
LogLevelFilter::Trace
}
/// Converts `self` to the equivalent `LogLevel`.
///
/// Returns `None` if `self` is `LogLevelFilter::Off`.
#[inline]
pub fn to_log_level(&self) -> Option<LogLevel> {
LogLevel::from_usize(*self as usize)
}
}
/// The "payload" of a log message.
pub struct LogRecord<'a> {
metadata: LogMetadata<'a>,
location: &'a LogLocation,
args: fmt::Arguments<'a>,
}
impl<'a> LogRecord<'a> {
/// The message body.
pub fn args(&self) -> &fmt::Arguments<'a> {
&self.args
}
/// Metadata about the log directive.
pub fn metadata(&self) -> &LogMetadata {
&self.metadata
}
/// The location of the log directive.
pub fn location(&self) -> &LogLocation {
self.location
}
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel {
self.metadata.level()
}
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.metadata.target()
}
}
/// Metadata about a log message.
pub struct LogMetadata<'a> {
level: LogLevel,
target: &'a str,
}
impl<'a> LogMetadata<'a> {
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel {
self.level
}
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.target
}
}
/// A trait encapsulating the operations required of a logger
pub trait Log: Sync+Send {
/// Determines if a log message with the specified metadata would be
/// logged.
///
/// This is used by the `log_enabled!` macro to allow callers to avoid
/// expensive computation of log message arguments if the message would be
/// discarded anyway.
fn enabled(&self, metadata: &LogMetadata) -> bool;
/// Logs the `LogRecord`.
///
/// Note that `enabled` is *not* necessarily called before this method.
/// Implementations of `log` should perform all necessary filtering
/// internally.
fn log(&self, record: &LogRecord);
}
/// The location of a log message.
///
/// # Warning
///
/// The fields of this struct are public so that they may be initialized by the
/// `log!` macro. They are subject to change at any time and should never be
/// accessed directly.
#[derive(Copy, Clone, Debug)]
pub struct LogLocation {
#[doc(hidden)]
pub __module_path: &'static str,
#[doc(hidden)]
pub __file: &'static str,
#[doc(hidden)]
pub __line: u32,
}
impl LogLocation {
/// The module path of the message.
pub fn module_path(&self) -> &str {
self.__module_path
}
/// The source file containing the message.
pub fn file(&self) -> &str {
self.__file
}
/// The line containing the message.
pub fn line(&self) -> u32 {
self.__line
}
}
/// A token providing read and write access to the global maximum log level
/// filter.
///
/// The maximum log level is used as an optimization to avoid evaluating log
/// messages that will be ignored by the logger. Any message with a level
/// higher than the maximum log level filter will be ignored. A logger should
/// make sure to keep the maximum log level filter in sync with its current
/// configuration.
#[allow(missing_copy_implementations)]
pub struct MaxLogLevelFilter(());
impl fmt::Debug for MaxLogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "MaxLogLevelFilter")
}
}
impl MaxLogLevelFilter {
/// Gets the current maximum log level filter.
pub fn get(&self) -> LogLevelFilter {
max_log_level()
}
/// Sets the maximum log level.
pub fn set(&self, level: LogLevelFilter) {
MAX_LOG_LEVEL_FILTER.store(level as usize, Ordering::SeqCst)
}
}
/// Returns the current maximum log level.
///
/// The `log!`, `error!`, `warn!`, `info!`, `debug!`, and `trace!` macros check
/// this value and discard any message logged at a higher level. The maximum
/// log level is set by the `MaxLogLevel` token passed to loggers.
#[inline(always)]
pub fn max_log_level() -> LogLevelFilter {
unsafe { mem::transmute(MAX_LOG_LEVEL_FILTER.load(Ordering::Relaxed)) }
}
/// Sets the global logger.
///
/// The `make_logger` closure is passed a `MaxLogLevel` object, which the
/// logger should use to keep the global maximum log level in sync with the
/// highest log level that the logger will not ignore.
///
/// This function may only be called once in the lifetime of a program. Any log
/// events that occur before the call to `set_logger` completes will be
/// ignored.
///
/// This function does not typically need to be called manually. Logger
/// implementations should provide an initialization method that calls
/// `set_logger` internally.
pub fn set_logger<M>(make_logger: M) -> Result<(), SetLoggerError>
where M: FnOnce(MaxLogLevelFilter) -> Box<Log> {
if LOGGER.compare_and_swap(UNINITIALIZED, INITIALIZING,
Ordering::SeqCst) != UNINITIALIZED {
return Err(SetLoggerError(()));
}
let logger = Box::new(make_logger(MaxLogLevelFilter(())));
let logger = unsafe { mem::transmute::<Box<Box<Log>>, usize>(logger) };
LOGGER.store(logger, Ordering::SeqCst);
unsafe {
assert_eq!(libc::atexit(shutdown), 0);
}
return Ok(());
extern fn shutdown() {
// Set to INITIALIZING to prevent re-initialization after
let logger = LOGGER.swap(INITIALIZING, Ordering::SeqCst);
while REFCOUNT.load(Ordering::SeqCst) != 0 {
// FIXME add a sleep here when it doesn't involve timers
}
unsafe { mem::transmute::<usize, Box<Box<Log>>>(logger); }
}
}
/// The type returned by `set_logger` if `set_logger` has already been called.
#[allow(missing_copy_implementations)]
#[derive(Debug)]
pub struct SetLoggerError(());
impl fmt::Display for SetLoggerError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "attempted to set a logger after the logging system \
was already initialized")
}
}
impl error::Error for SetLoggerError {
fn description(&self) -> &str { "set_logger() called multiple times" }
}
struct LoggerGuard(usize);
impl Drop for LoggerGuard {
fn drop(&mut self) {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
}
}
impl Deref for LoggerGuard {
type Target = Box<Log>;
fn deref(&self) -> &Box<Log+'static> {
unsafe { mem::transmute(self.0) }
}
}
fn logger() -> Option<LoggerGuard> {
REFCOUNT.fetch_add(1, Ordering::SeqCst);
let logger = LOGGER.load(Ordering::SeqCst);
if logger == UNINITIALIZED || logger == INITIALIZING {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
None
} else {
Some(LoggerGuard(logger))
}
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __enabled(level: LogLevel, target: &str) -> bool {
if let Some(logger) = logger() {
logger.enabled(&LogMetadata { level: level, target: target })
} else {
false
}
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __log(level: LogLevel, target: &str, loc: &LogLocation,
args: fmt::Arguments) {
if let Some(logger) = logger() {
let record = LogRecord {
metadata: LogMetadata {
level: level,
target: target,
},
location: loc,
args: args
};
logger.log(&record)
}
}
#[cfg(test)]
mod tests {
use std::error::Error;
use super::{LogLevel, LogLevelFilter, SetLoggerError};
#[test]
fn test_loglevelfilter_from_str() {
let tests = [
("off", Ok(LogLevelFilter::Off)),
("error", Ok(LogLevelFilter::Error)),
("warn", Ok(LogLevelFilter::Warn)),
("info", Ok(LogLevelFilter::Info)),
("debug", Ok(LogLevelFilter::Debug)),
("trace", Ok(LogLevelFilter::Trace)),
("OFF", Ok(LogLevelFilter::Off)),
("ERROR", Ok(LogLevelFilter::Error)),
("WARN", Ok(LogLevelFilter::Warn)),
("INFO", Ok(LogLevelFilter::Info)),
("DEBUG", Ok(LogLevelFilter::Debug)),
("TRACE", Ok(LogLevelFilter::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_from_str() {
let tests = [
("OFF", Err(())),
("error", Ok(LogLevel::Error)),
("warn", Ok(LogLevel::Warn)),
("info", Ok(LogLevel::Info)),
("debug", Ok(LogLevel::Debug)),
("trace", Ok(LogLevel::Trace)),
("ERROR", Ok(LogLevel::Error)),
("WARN", Ok(LogLevel::Warn)),
("INFO", Ok(LogLevel::Info)),
("DEBUG", Ok(LogLevel::Debug)),
("TRACE", Ok(LogLevel::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_show() {
assert_eq!("INFO", LogLevel::Info.to_string());
assert_eq!("ERROR", LogLevel::Error.to_string());
}
#[test]
fn test_loglevelfilter_show() {
assert_eq!("OFF", LogLevelFilter::Off.to_string());
assert_eq!("ERROR", LogLevelFilter::Error.to_string());
}
#[test]
fn test_cross_cmp() {
assert!(LogLevel::Debug > LogLevelFilter::Error);
assert!(LogLevelFilter::Warn < LogLevel::Trace);
assert!(LogLevelFilter::Off < LogLevel::Error);
}
#[test]
fn test_cross_eq() {
assert!(LogLevel::Error == LogLevelFilter::Error);
assert!(LogLevelFilter::Off != LogLevel::Error);
assert!(LogLevel::Trace == LogLevelFilter::Trace);
}
#[test]
fn test_to_log_level() {
assert_eq!(Some(LogLevel::Error), LogLevelFilter::Error.to_log_level());
assert_eq!(None, LogLevelFilter::Off.to_log_level());
assert_eq!(Some(LogLevel::Debug), LogLevelFilter::Debug.to_log_level());
}
#[test]
fn test_to_log_level_filter() {
assert_eq!(LogLevelFilter::Error, LogLevel::Error.to_log_level_filter());
assert_eq!(LogLevelFilter::Trace, LogLevel::Trace.to_log_level_filter());
}
#[test]
fn test_error_trait() {
let e = SetLoggerError(());
assert_eq!(e.description(), "set_logger() called multiple times");
}
} | #[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some(self.cmp(other)) | random_line_split |
lib.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A lightweight logging facade.
//!
//! A logging facade provides a single logging API that abstracts over the
//! actual logging implementation. Libraries can use the logging API provided
//! by this crate, and the consumer of those libraries can choose the logging
//! framework that is most suitable for its use case.
//!
//! If no logging implementation is selected, the facade falls back to a "noop"
//! implementation that ignores all log messages. The overhead in this case
//! is very small - just an integer load, comparison and jump.
//!
//! A log request consists of a target, a level, and a body. A target is a
//! string which defaults to the module path of the location of the log
//! request, though that default may be overridden. Logger implementations
//! typically use the target to filter requests based on some user
//! configuration.
//!
//! # Use
//!
//! ## In libraries
//!
//! Libraries should link only to the `log` crate, and use the provided
//! macros to log whatever information will be useful to downstream consumers.
//!
//! ### Examples
//!
//! ```rust
//! # #![allow(unstable)]
//! #[macro_use]
//! extern crate log;
//!
//! # #[derive(Debug)] pub struct Yak(String);
//! # impl Yak { fn shave(&self, _: u32) {} }
//! # fn find_a_razor() -> Result<u32, u32> { Ok(1) }
//! pub fn shave_the_yak(yak: &Yak) {
//! info!(target: "yak_events", "Commencing yak shaving for {:?}", yak);
//!
//! loop {
//! match find_a_razor() {
//! Ok(razor) => {
//! info!("Razor located: {}", razor);
//! yak.shave(razor);
//! break;
//! }
//! Err(err) => {
//! warn!("Unable to locate a razor: {}, retrying", err);
//! }
//! }
//! }
//! }
//! # fn main() {}
//! ```
//!
//! ## In executables
//!
//! Executables should chose a logging framework and initialize it early in the
//! runtime of the program. Logging frameworks will typically include a
//! function to do this. Any log messages generated before the framework is
//! initialized will be ignored.
//!
//! The executable itself may use the `log` crate to log as well.
//!
//! ### Warning
//!
//! The logging system may only be initialized once.
//!
//! ### Examples
//!
//! ```rust,ignore
//! #[macro_use]
//! extern crate log;
//! extern crate my_logger;
//!
//! fn main() {
//! my_logger::init();
//!
//! info!("starting up");
//!
//! // ...
//! }
//! ```
//!
//! # Logger implementations
//!
//! Loggers implement the `Log` trait. Here's a very basic example that simply
//! logs all messages at the `Error`, `Warn` or `Info` levels to stdout:
//!
//! ```rust
//! extern crate log;
//!
//! use log::{LogRecord, LogLevel, LogMetadata};
//!
//! struct SimpleLogger;
//!
//! impl log::Log for SimpleLogger {
//! fn enabled(&self, metadata: &LogMetadata) -> bool {
//! metadata.level() <= LogLevel::Info
//! }
//!
//! fn log(&self, record: &LogRecord) {
//! if self.enabled(record.metadata()) {
//! println!("{} - {}", record.level(), record.args());
//! }
//! }
//! }
//!
//! # fn main() {}
//! ```
//!
//! Loggers are installed by calling the `set_logger` function. It takes a
//! closure which is provided a `MaxLogLevel` token and returns a `Log` trait
//! object. The `MaxLogLevel` token controls the global maximum log level. The
//! logging facade uses this as an optimization to improve performance of log
//! messages at levels that are disabled. In the case of our example logger,
//! we'll want to set the maximum log level to `Info`, since we ignore any
//! `Debug` or `Trace` level log messages. A logging framework should provide a
//! function that wraps a call to `set_logger`, handling initialization of the
//! logger:
//!
//! ```rust
//! # extern crate log;
//! # use log::{LogLevel, LogLevelFilter, SetLoggerError, LogMetadata};
//! # struct SimpleLogger;
//! # impl log::Log for SimpleLogger {
//! # fn enabled(&self, _: &LogMetadata) -> bool { false }
//! # fn log(&self, _: &log::LogRecord) {}
//! # }
//! # fn main() {}
//! pub fn init() -> Result<(), SetLoggerError> {
//! log::set_logger(|max_log_level| {
//! max_log_level.set(LogLevelFilter::Info);
//! Box::new(SimpleLogger)
//! })
//! }
//! ```
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/log/")]
#![warn(missing_docs)]
extern crate libc;
use std::ascii::AsciiExt;
use std::cmp;
use std::error;
use std::fmt;
use std::mem;
use std::ops::Deref;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
mod macros;
// The setup here is a bit weird to make at_exit work.
//
// There are four different states that we care about: the logger's
// uninitialized, the logger's initializing (set_logger's been called but
// LOGGER hasn't actually been set yet), the logger's active, or the logger's
// shutting down inside of at_exit.
//
// The LOGGER static is normally a Box<Box<Log>> with some special possible
// values as well. The uninitialized and initializing states are represented by
// the values 0 and 1 respectively. The shutting down state is also represented
// by 1. Any other value is a valid pointer to the logger.
//
// The at_exit routine needs to make sure that no threads are actively logging
// when it deallocates the logger. The number of actively logging threads is
// tracked in the REFCOUNT static. The routine first sets LOGGER back to 1.
// All logging calls past that point will immediately return without accessing
// the logger. At that point, the at_exit routine just waits for the refcount
// to reach 0 before deallocating the logger. Note that the refcount does not
// necessarily monotonically decrease at this point, as new log calls still
// increment and decrement it, but the interval in between is small enough that
// the wait is really just for the active log calls to finish.
static LOGGER: AtomicUsize = ATOMIC_USIZE_INIT;
static REFCOUNT: AtomicUsize = ATOMIC_USIZE_INIT;
const UNINITIALIZED: usize = 0;
const INITIALIZING: usize = 1;
static MAX_LOG_LEVEL_FILTER: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG_LEVEL_NAMES: [&'static str; 6] = ["OFF", "ERROR", "WARN", "INFO",
"DEBUG", "TRACE"];
/// An enum representing the available verbosity levels of the logging framework
///
/// A `LogLevel` may be compared directly to a `LogLevelFilter`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevel {
/// The "error" level.
///
/// Designates very serious errors.
Error = 1, // This way these line up with the discriminants for LogLevelFilter below
/// The "warn" level.
///
/// Designates hazardous situations.
Warn,
/// The "info" level.
///
/// Designates useful information.
Info,
/// The "debug" level.
///
/// Designates lower priority information.
Debug,
/// The "trace" level.
///
/// Designates very low priority, often extremely verbose, information.
Trace,
}
impl Clone for LogLevel {
#[inline]
fn clone(&self) -> LogLevel {
*self
}
}
impl PartialEq for LogLevel {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevelFilter> for LogLevel {
#[inline]
fn eq(&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialOrd for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<LogLevelFilter> for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some((*self as usize).cmp(&(*other as usize)))
}
}
impl Ord for LogLevel {
#[inline]
fn cmp(&self, other: &LogLevel) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
fn ok_or<T, E>(t: Option<T>, e: E) -> Result<T, E> {
match t {
Some(t) => Ok(t),
None => Err(e),
}
}
impl FromStr for LogLevel {
type Err = ();
fn from_str(level: &str) -> Result<LogLevel, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.into_iter()
.filter(|&idx| idx != 0)
.map(|idx| LogLevel::from_usize(idx).unwrap())
.next(), ())
}
}
impl fmt::Display for LogLevel {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.pad(LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevel {
fn from_usize(u: usize) -> Option<LogLevel> {
match u {
1 => Some(LogLevel::Error),
2 => Some(LogLevel::Warn),
3 => Some(LogLevel::Info),
4 => Some(LogLevel::Debug),
5 => Some(LogLevel::Trace),
_ => None
}
}
/// Returns the most verbose logging level.
#[inline]
pub fn max() -> LogLevel {
LogLevel::Trace
}
/// Converts the `LogLevel` to the equivalent `LogLevelFilter`.
#[inline]
pub fn to_log_level_filter(&self) -> LogLevelFilter {
LogLevelFilter::from_usize(*self as usize).unwrap()
}
}
/// An enum representing the available verbosity level filters of the logging
/// framework.
///
/// A `LogLevelFilter` may be compared directly to a `LogLevel`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevelFilter {
/// A level lower than all log levels.
Off,
/// Corresponds to the `Error` log level.
Error,
/// Corresponds to the `Warn` log level.
Warn,
/// Corresponds to the `Info` log level.
Info,
/// Corresponds to the `Debug` log level.
Debug,
/// Corresponds to the `Trace` log level.
Trace,
}
// Deriving generates terrible impls of these traits
impl Clone for LogLevelFilter {
#[inline]
fn clone(&self) -> LogLevelFilter {
*self
}
}
impl PartialEq for LogLevelFilter {
#[inline]
fn eq(&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevel> for LogLevelFilter {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
other.eq(self)
}
}
impl PartialOrd for LogLevelFilter {
#[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<LogLevel> for LogLevelFilter {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
other.partial_cmp(self).map(|x| x.reverse())
}
}
impl Ord for LogLevelFilter {
#[inline]
fn cmp(&self, other: &LogLevelFilter) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
impl FromStr for LogLevelFilter {
type Err = ();
fn from_str(level: &str) -> Result<LogLevelFilter, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.map(|p| LogLevelFilter::from_usize(p).unwrap()), ())
}
}
impl fmt::Display for LogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevelFilter {
fn from_usize(u: usize) -> Option<LogLevelFilter> {
match u {
0 => Some(LogLevelFilter::Off),
1 => Some(LogLevelFilter::Error),
2 => Some(LogLevelFilter::Warn),
3 => Some(LogLevelFilter::Info),
4 => Some(LogLevelFilter::Debug),
5 => Some(LogLevelFilter::Trace),
_ => None
}
}
/// Returns the most verbose logging level filter.
#[inline]
pub fn max() -> LogLevelFilter {
LogLevelFilter::Trace
}
/// Converts `self` to the equivalent `LogLevel`.
///
/// Returns `None` if `self` is `LogLevelFilter::Off`.
#[inline]
pub fn to_log_level(&self) -> Option<LogLevel> {
LogLevel::from_usize(*self as usize)
}
}
/// The "payload" of a log message.
pub struct LogRecord<'a> {
metadata: LogMetadata<'a>,
location: &'a LogLocation,
args: fmt::Arguments<'a>,
}
impl<'a> LogRecord<'a> {
/// The message body.
pub fn args(&self) -> &fmt::Arguments<'a> {
&self.args
}
/// Metadata about the log directive.
pub fn metadata(&self) -> &LogMetadata {
&self.metadata
}
/// The location of the log directive.
pub fn location(&self) -> &LogLocation {
self.location
}
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel |
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.metadata.target()
}
}
/// Metadata about a log message.
pub struct LogMetadata<'a> {
level: LogLevel,
target: &'a str,
}
impl<'a> LogMetadata<'a> {
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel {
self.level
}
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.target
}
}
/// A trait encapsulating the operations required of a logger
pub trait Log: Sync+Send {
/// Determines if a log message with the specified metadata would be
/// logged.
///
/// This is used by the `log_enabled!` macro to allow callers to avoid
/// expensive computation of log message arguments if the message would be
/// discarded anyway.
fn enabled(&self, metadata: &LogMetadata) -> bool;
/// Logs the `LogRecord`.
///
/// Note that `enabled` is *not* necessarily called before this method.
/// Implementations of `log` should perform all necessary filtering
/// internally.
fn log(&self, record: &LogRecord);
}
/// The location of a log message.
///
/// # Warning
///
/// The fields of this struct are public so that they may be initialized by the
/// `log!` macro. They are subject to change at any time and should never be
/// accessed directly.
#[derive(Copy, Clone, Debug)]
pub struct LogLocation {
#[doc(hidden)]
pub __module_path: &'static str,
#[doc(hidden)]
pub __file: &'static str,
#[doc(hidden)]
pub __line: u32,
}
impl LogLocation {
/// The module path of the message.
pub fn module_path(&self) -> &str {
self.__module_path
}
/// The source file containing the message.
pub fn file(&self) -> &str {
self.__file
}
/// The line containing the message.
pub fn line(&self) -> u32 {
self.__line
}
}
/// A token providing read and write access to the global maximum log level
/// filter.
///
/// The maximum log level is used as an optimization to avoid evaluating log
/// messages that will be ignored by the logger. Any message with a level
/// higher than the maximum log level filter will be ignored. A logger should
/// make sure to keep the maximum log level filter in sync with its current
/// configuration.
#[allow(missing_copy_implementations)]
pub struct MaxLogLevelFilter(());
impl fmt::Debug for MaxLogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "MaxLogLevelFilter")
}
}
impl MaxLogLevelFilter {
/// Gets the current maximum log level filter.
pub fn get(&self) -> LogLevelFilter {
max_log_level()
}
/// Sets the maximum log level.
pub fn set(&self, level: LogLevelFilter) {
MAX_LOG_LEVEL_FILTER.store(level as usize, Ordering::SeqCst)
}
}
/// Returns the current maximum log level.
///
/// The `log!`, `error!`, `warn!`, `info!`, `debug!`, and `trace!` macros check
/// this value and discard any message logged at a higher level. The maximum
/// log level is set by the `MaxLogLevel` token passed to loggers.
#[inline(always)]
pub fn max_log_level() -> LogLevelFilter {
unsafe { mem::transmute(MAX_LOG_LEVEL_FILTER.load(Ordering::Relaxed)) }
}
/// Sets the global logger.
///
/// The `make_logger` closure is passed a `MaxLogLevel` object, which the
/// logger should use to keep the global maximum log level in sync with the
/// highest log level that the logger will not ignore.
///
/// This function may only be called once in the lifetime of a program. Any log
/// events that occur before the call to `set_logger` completes will be
/// ignored.
///
/// This function does not typically need to be called manually. Logger
/// implementations should provide an initialization method that calls
/// `set_logger` internally.
pub fn set_logger<M>(make_logger: M) -> Result<(), SetLoggerError>
where M: FnOnce(MaxLogLevelFilter) -> Box<Log> {
if LOGGER.compare_and_swap(UNINITIALIZED, INITIALIZING,
Ordering::SeqCst) != UNINITIALIZED {
return Err(SetLoggerError(()));
}
let logger = Box::new(make_logger(MaxLogLevelFilter(())));
let logger = unsafe { mem::transmute::<Box<Box<Log>>, usize>(logger) };
LOGGER.store(logger, Ordering::SeqCst);
unsafe {
assert_eq!(libc::atexit(shutdown), 0);
}
return Ok(());
extern fn shutdown() {
// Set to INITIALIZING to prevent re-initialization after
let logger = LOGGER.swap(INITIALIZING, Ordering::SeqCst);
while REFCOUNT.load(Ordering::SeqCst) != 0 {
// FIXME add a sleep here when it doesn't involve timers
}
unsafe { mem::transmute::<usize, Box<Box<Log>>>(logger); }
}
}
/// The type returned by `set_logger` if `set_logger` has already been called.
#[allow(missing_copy_implementations)]
#[derive(Debug)]
pub struct SetLoggerError(());
impl fmt::Display for SetLoggerError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "attempted to set a logger after the logging system \
was already initialized")
}
}
impl error::Error for SetLoggerError {
fn description(&self) -> &str { "set_logger() called multiple times" }
}
struct LoggerGuard(usize);
impl Drop for LoggerGuard {
fn drop(&mut self) {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
}
}
impl Deref for LoggerGuard {
type Target = Box<Log>;
fn deref(&self) -> &Box<Log+'static> {
unsafe { mem::transmute(self.0) }
}
}
fn logger() -> Option<LoggerGuard> {
REFCOUNT.fetch_add(1, Ordering::SeqCst);
let logger = LOGGER.load(Ordering::SeqCst);
if logger == UNINITIALIZED || logger == INITIALIZING {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
None
} else {
Some(LoggerGuard(logger))
}
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __enabled(level: LogLevel, target: &str) -> bool {
if let Some(logger) = logger() {
logger.enabled(&LogMetadata { level: level, target: target })
} else {
false
}
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __log(level: LogLevel, target: &str, loc: &LogLocation,
args: fmt::Arguments) {
if let Some(logger) = logger() {
let record = LogRecord {
metadata: LogMetadata {
level: level,
target: target,
},
location: loc,
args: args
};
logger.log(&record)
}
}
#[cfg(test)]
mod tests {
use std::error::Error;
use super::{LogLevel, LogLevelFilter, SetLoggerError};
#[test]
fn test_loglevelfilter_from_str() {
let tests = [
("off", Ok(LogLevelFilter::Off)),
("error", Ok(LogLevelFilter::Error)),
("warn", Ok(LogLevelFilter::Warn)),
("info", Ok(LogLevelFilter::Info)),
("debug", Ok(LogLevelFilter::Debug)),
("trace", Ok(LogLevelFilter::Trace)),
("OFF", Ok(LogLevelFilter::Off)),
("ERROR", Ok(LogLevelFilter::Error)),
("WARN", Ok(LogLevelFilter::Warn)),
("INFO", Ok(LogLevelFilter::Info)),
("DEBUG", Ok(LogLevelFilter::Debug)),
("TRACE", Ok(LogLevelFilter::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_from_str() {
let tests = [
("OFF", Err(())),
("error", Ok(LogLevel::Error)),
("warn", Ok(LogLevel::Warn)),
("info", Ok(LogLevel::Info)),
("debug", Ok(LogLevel::Debug)),
("trace", Ok(LogLevel::Trace)),
("ERROR", Ok(LogLevel::Error)),
("WARN", Ok(LogLevel::Warn)),
("INFO", Ok(LogLevel::Info)),
("DEBUG", Ok(LogLevel::Debug)),
("TRACE", Ok(LogLevel::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_show() {
assert_eq!("INFO", LogLevel::Info.to_string());
assert_eq!("ERROR", LogLevel::Error.to_string());
}
#[test]
fn test_loglevelfilter_show() {
assert_eq!("OFF", LogLevelFilter::Off.to_string());
assert_eq!("ERROR", LogLevelFilter::Error.to_string());
}
#[test]
fn test_cross_cmp() {
assert!(LogLevel::Debug > LogLevelFilter::Error);
assert!(LogLevelFilter::Warn < LogLevel::Trace);
assert!(LogLevelFilter::Off < LogLevel::Error);
}
#[test]
fn test_cross_eq() {
assert!(LogLevel::Error == LogLevelFilter::Error);
assert!(LogLevelFilter::Off != LogLevel::Error);
assert!(LogLevel::Trace == LogLevelFilter::Trace);
}
#[test]
fn test_to_log_level() {
assert_eq!(Some(LogLevel::Error), LogLevelFilter::Error.to_log_level());
assert_eq!(None, LogLevelFilter::Off.to_log_level());
assert_eq!(Some(LogLevel::Debug), LogLevelFilter::Debug.to_log_level());
}
#[test]
fn test_to_log_level_filter() {
assert_eq!(LogLevelFilter::Error, LogLevel::Error.to_log_level_filter());
assert_eq!(LogLevelFilter::Trace, LogLevel::Trace.to_log_level_filter());
}
#[test]
fn test_error_trait() {
let e = SetLoggerError(());
assert_eq!(e.description(), "set_logger() called multiple times");
}
}
| {
self.metadata.level()
} | identifier_body |
lib.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A lightweight logging facade.
//!
//! A logging facade provides a single logging API that abstracts over the
//! actual logging implementation. Libraries can use the logging API provided
//! by this crate, and the consumer of those libraries can choose the logging
//! framework that is most suitable for its use case.
//!
//! If no logging implementation is selected, the facade falls back to a "noop"
//! implementation that ignores all log messages. The overhead in this case
//! is very small - just an integer load, comparison and jump.
//!
//! A log request consists of a target, a level, and a body. A target is a
//! string which defaults to the module path of the location of the log
//! request, though that default may be overridden. Logger implementations
//! typically use the target to filter requests based on some user
//! configuration.
//!
//! # Use
//!
//! ## In libraries
//!
//! Libraries should link only to the `log` crate, and use the provided
//! macros to log whatever information will be useful to downstream consumers.
//!
//! ### Examples
//!
//! ```rust
//! # #![allow(unstable)]
//! #[macro_use]
//! extern crate log;
//!
//! # #[derive(Debug)] pub struct Yak(String);
//! # impl Yak { fn shave(&self, _: u32) {} }
//! # fn find_a_razor() -> Result<u32, u32> { Ok(1) }
//! pub fn shave_the_yak(yak: &Yak) {
//! info!(target: "yak_events", "Commencing yak shaving for {:?}", yak);
//!
//! loop {
//! match find_a_razor() {
//! Ok(razor) => {
//! info!("Razor located: {}", razor);
//! yak.shave(razor);
//! break;
//! }
//! Err(err) => {
//! warn!("Unable to locate a razor: {}, retrying", err);
//! }
//! }
//! }
//! }
//! # fn main() {}
//! ```
//!
//! ## In executables
//!
//! Executables should chose a logging framework and initialize it early in the
//! runtime of the program. Logging frameworks will typically include a
//! function to do this. Any log messages generated before the framework is
//! initialized will be ignored.
//!
//! The executable itself may use the `log` crate to log as well.
//!
//! ### Warning
//!
//! The logging system may only be initialized once.
//!
//! ### Examples
//!
//! ```rust,ignore
//! #[macro_use]
//! extern crate log;
//! extern crate my_logger;
//!
//! fn main() {
//! my_logger::init();
//!
//! info!("starting up");
//!
//! // ...
//! }
//! ```
//!
//! # Logger implementations
//!
//! Loggers implement the `Log` trait. Here's a very basic example that simply
//! logs all messages at the `Error`, `Warn` or `Info` levels to stdout:
//!
//! ```rust
//! extern crate log;
//!
//! use log::{LogRecord, LogLevel, LogMetadata};
//!
//! struct SimpleLogger;
//!
//! impl log::Log for SimpleLogger {
//! fn enabled(&self, metadata: &LogMetadata) -> bool {
//! metadata.level() <= LogLevel::Info
//! }
//!
//! fn log(&self, record: &LogRecord) {
//! if self.enabled(record.metadata()) {
//! println!("{} - {}", record.level(), record.args());
//! }
//! }
//! }
//!
//! # fn main() {}
//! ```
//!
//! Loggers are installed by calling the `set_logger` function. It takes a
//! closure which is provided a `MaxLogLevel` token and returns a `Log` trait
//! object. The `MaxLogLevel` token controls the global maximum log level. The
//! logging facade uses this as an optimization to improve performance of log
//! messages at levels that are disabled. In the case of our example logger,
//! we'll want to set the maximum log level to `Info`, since we ignore any
//! `Debug` or `Trace` level log messages. A logging framework should provide a
//! function that wraps a call to `set_logger`, handling initialization of the
//! logger:
//!
//! ```rust
//! # extern crate log;
//! # use log::{LogLevel, LogLevelFilter, SetLoggerError, LogMetadata};
//! # struct SimpleLogger;
//! # impl log::Log for SimpleLogger {
//! # fn enabled(&self, _: &LogMetadata) -> bool { false }
//! # fn log(&self, _: &log::LogRecord) {}
//! # }
//! # fn main() {}
//! pub fn init() -> Result<(), SetLoggerError> {
//! log::set_logger(|max_log_level| {
//! max_log_level.set(LogLevelFilter::Info);
//! Box::new(SimpleLogger)
//! })
//! }
//! ```
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/log/")]
#![warn(missing_docs)]
extern crate libc;
use std::ascii::AsciiExt;
use std::cmp;
use std::error;
use std::fmt;
use std::mem;
use std::ops::Deref;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
mod macros;
// The setup here is a bit weird to make at_exit work.
//
// There are four different states that we care about: the logger's
// uninitialized, the logger's initializing (set_logger's been called but
// LOGGER hasn't actually been set yet), the logger's active, or the logger's
// shutting down inside of at_exit.
//
// The LOGGER static is normally a Box<Box<Log>> with some special possible
// values as well. The uninitialized and initializing states are represented by
// the values 0 and 1 respectively. The shutting down state is also represented
// by 1. Any other value is a valid pointer to the logger.
//
// The at_exit routine needs to make sure that no threads are actively logging
// when it deallocates the logger. The number of actively logging threads is
// tracked in the REFCOUNT static. The routine first sets LOGGER back to 1.
// All logging calls past that point will immediately return without accessing
// the logger. At that point, the at_exit routine just waits for the refcount
// to reach 0 before deallocating the logger. Note that the refcount does not
// necessarily monotonically decrease at this point, as new log calls still
// increment and decrement it, but the interval in between is small enough that
// the wait is really just for the active log calls to finish.
static LOGGER: AtomicUsize = ATOMIC_USIZE_INIT;
static REFCOUNT: AtomicUsize = ATOMIC_USIZE_INIT;
const UNINITIALIZED: usize = 0;
const INITIALIZING: usize = 1;
static MAX_LOG_LEVEL_FILTER: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG_LEVEL_NAMES: [&'static str; 6] = ["OFF", "ERROR", "WARN", "INFO",
"DEBUG", "TRACE"];
/// An enum representing the available verbosity levels of the logging framework
///
/// A `LogLevel` may be compared directly to a `LogLevelFilter`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevel {
/// The "error" level.
///
/// Designates very serious errors.
Error = 1, // This way these line up with the discriminants for LogLevelFilter below
/// The "warn" level.
///
/// Designates hazardous situations.
Warn,
/// The "info" level.
///
/// Designates useful information.
Info,
/// The "debug" level.
///
/// Designates lower priority information.
Debug,
/// The "trace" level.
///
/// Designates very low priority, often extremely verbose, information.
Trace,
}
impl Clone for LogLevel {
#[inline]
fn clone(&self) -> LogLevel {
*self
}
}
impl PartialEq for LogLevel {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevelFilter> for LogLevel {
#[inline]
fn eq(&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialOrd for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<LogLevelFilter> for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some((*self as usize).cmp(&(*other as usize)))
}
}
impl Ord for LogLevel {
#[inline]
fn cmp(&self, other: &LogLevel) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
fn ok_or<T, E>(t: Option<T>, e: E) -> Result<T, E> {
match t {
Some(t) => Ok(t),
None => Err(e),
}
}
impl FromStr for LogLevel {
type Err = ();
fn from_str(level: &str) -> Result<LogLevel, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.into_iter()
.filter(|&idx| idx != 0)
.map(|idx| LogLevel::from_usize(idx).unwrap())
.next(), ())
}
}
impl fmt::Display for LogLevel {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.pad(LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevel {
fn from_usize(u: usize) -> Option<LogLevel> {
match u {
1 => Some(LogLevel::Error),
2 => Some(LogLevel::Warn),
3 => Some(LogLevel::Info),
4 => Some(LogLevel::Debug),
5 => Some(LogLevel::Trace),
_ => None
}
}
/// Returns the most verbose logging level.
#[inline]
pub fn max() -> LogLevel {
LogLevel::Trace
}
/// Converts the `LogLevel` to the equivalent `LogLevelFilter`.
#[inline]
pub fn to_log_level_filter(&self) -> LogLevelFilter {
LogLevelFilter::from_usize(*self as usize).unwrap()
}
}
/// An enum representing the available verbosity level filters of the logging
/// framework.
///
/// A `LogLevelFilter` may be compared directly to a `LogLevel`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevelFilter {
/// A level lower than all log levels.
Off,
/// Corresponds to the `Error` log level.
Error,
/// Corresponds to the `Warn` log level.
Warn,
/// Corresponds to the `Info` log level.
Info,
/// Corresponds to the `Debug` log level.
Debug,
/// Corresponds to the `Trace` log level.
Trace,
}
// Deriving generates terrible impls of these traits
impl Clone for LogLevelFilter {
#[inline]
fn clone(&self) -> LogLevelFilter {
*self
}
}
impl PartialEq for LogLevelFilter {
#[inline]
fn | (&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevel> for LogLevelFilter {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
other.eq(self)
}
}
impl PartialOrd for LogLevelFilter {
#[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<LogLevel> for LogLevelFilter {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
other.partial_cmp(self).map(|x| x.reverse())
}
}
impl Ord for LogLevelFilter {
#[inline]
fn cmp(&self, other: &LogLevelFilter) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
impl FromStr for LogLevelFilter {
type Err = ();
fn from_str(level: &str) -> Result<LogLevelFilter, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.map(|p| LogLevelFilter::from_usize(p).unwrap()), ())
}
}
impl fmt::Display for LogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevelFilter {
fn from_usize(u: usize) -> Option<LogLevelFilter> {
match u {
0 => Some(LogLevelFilter::Off),
1 => Some(LogLevelFilter::Error),
2 => Some(LogLevelFilter::Warn),
3 => Some(LogLevelFilter::Info),
4 => Some(LogLevelFilter::Debug),
5 => Some(LogLevelFilter::Trace),
_ => None
}
}
/// Returns the most verbose logging level filter.
#[inline]
pub fn max() -> LogLevelFilter {
LogLevelFilter::Trace
}
/// Converts `self` to the equivalent `LogLevel`.
///
/// Returns `None` if `self` is `LogLevelFilter::Off`.
#[inline]
pub fn to_log_level(&self) -> Option<LogLevel> {
LogLevel::from_usize(*self as usize)
}
}
/// The "payload" of a log message.
pub struct LogRecord<'a> {
metadata: LogMetadata<'a>,
location: &'a LogLocation,
args: fmt::Arguments<'a>,
}
impl<'a> LogRecord<'a> {
/// The message body.
pub fn args(&self) -> &fmt::Arguments<'a> {
&self.args
}
/// Metadata about the log directive.
pub fn metadata(&self) -> &LogMetadata {
&self.metadata
}
/// The location of the log directive.
pub fn location(&self) -> &LogLocation {
self.location
}
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel {
self.metadata.level()
}
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.metadata.target()
}
}
/// Metadata about a log message.
pub struct LogMetadata<'a> {
level: LogLevel,
target: &'a str,
}
impl<'a> LogMetadata<'a> {
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel {
self.level
}
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.target
}
}
/// A trait encapsulating the operations required of a logger
pub trait Log: Sync+Send {
/// Determines if a log message with the specified metadata would be
/// logged.
///
/// This is used by the `log_enabled!` macro to allow callers to avoid
/// expensive computation of log message arguments if the message would be
/// discarded anyway.
fn enabled(&self, metadata: &LogMetadata) -> bool;
/// Logs the `LogRecord`.
///
/// Note that `enabled` is *not* necessarily called before this method.
/// Implementations of `log` should perform all necessary filtering
/// internally.
fn log(&self, record: &LogRecord);
}
/// The location of a log message.
///
/// # Warning
///
/// The fields of this struct are public so that they may be initialized by the
/// `log!` macro. They are subject to change at any time and should never be
/// accessed directly.
#[derive(Copy, Clone, Debug)]
pub struct LogLocation {
#[doc(hidden)]
pub __module_path: &'static str,
#[doc(hidden)]
pub __file: &'static str,
#[doc(hidden)]
pub __line: u32,
}
impl LogLocation {
/// The module path of the message.
pub fn module_path(&self) -> &str {
self.__module_path
}
/// The source file containing the message.
pub fn file(&self) -> &str {
self.__file
}
/// The line containing the message.
pub fn line(&self) -> u32 {
self.__line
}
}
/// A token providing read and write access to the global maximum log level
/// filter.
///
/// The maximum log level is used as an optimization to avoid evaluating log
/// messages that will be ignored by the logger. Any message with a level
/// higher than the maximum log level filter will be ignored. A logger should
/// make sure to keep the maximum log level filter in sync with its current
/// configuration.
#[allow(missing_copy_implementations)]
pub struct MaxLogLevelFilter(());
impl fmt::Debug for MaxLogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "MaxLogLevelFilter")
}
}
impl MaxLogLevelFilter {
/// Gets the current maximum log level filter.
pub fn get(&self) -> LogLevelFilter {
max_log_level()
}
/// Sets the maximum log level.
pub fn set(&self, level: LogLevelFilter) {
MAX_LOG_LEVEL_FILTER.store(level as usize, Ordering::SeqCst)
}
}
/// Returns the current maximum log level.
///
/// The `log!`, `error!`, `warn!`, `info!`, `debug!`, and `trace!` macros check
/// this value and discard any message logged at a higher level. The maximum
/// log level is set by the `MaxLogLevel` token passed to loggers.
#[inline(always)]
pub fn max_log_level() -> LogLevelFilter {
unsafe { mem::transmute(MAX_LOG_LEVEL_FILTER.load(Ordering::Relaxed)) }
}
/// Sets the global logger.
///
/// The `make_logger` closure is passed a `MaxLogLevel` object, which the
/// logger should use to keep the global maximum log level in sync with the
/// highest log level that the logger will not ignore.
///
/// This function may only be called once in the lifetime of a program. Any log
/// events that occur before the call to `set_logger` completes will be
/// ignored.
///
/// This function does not typically need to be called manually. Logger
/// implementations should provide an initialization method that calls
/// `set_logger` internally.
pub fn set_logger<M>(make_logger: M) -> Result<(), SetLoggerError>
where M: FnOnce(MaxLogLevelFilter) -> Box<Log> {
if LOGGER.compare_and_swap(UNINITIALIZED, INITIALIZING,
Ordering::SeqCst) != UNINITIALIZED {
return Err(SetLoggerError(()));
}
let logger = Box::new(make_logger(MaxLogLevelFilter(())));
let logger = unsafe { mem::transmute::<Box<Box<Log>>, usize>(logger) };
LOGGER.store(logger, Ordering::SeqCst);
unsafe {
assert_eq!(libc::atexit(shutdown), 0);
}
return Ok(());
extern fn shutdown() {
// Set to INITIALIZING to prevent re-initialization after
let logger = LOGGER.swap(INITIALIZING, Ordering::SeqCst);
while REFCOUNT.load(Ordering::SeqCst) != 0 {
// FIXME add a sleep here when it doesn't involve timers
}
unsafe { mem::transmute::<usize, Box<Box<Log>>>(logger); }
}
}
/// The type returned by `set_logger` if `set_logger` has already been called.
#[allow(missing_copy_implementations)]
#[derive(Debug)]
pub struct SetLoggerError(());
impl fmt::Display for SetLoggerError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "attempted to set a logger after the logging system \
was already initialized")
}
}
impl error::Error for SetLoggerError {
fn description(&self) -> &str { "set_logger() called multiple times" }
}
struct LoggerGuard(usize);
impl Drop for LoggerGuard {
fn drop(&mut self) {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
}
}
impl Deref for LoggerGuard {
type Target = Box<Log>;
fn deref(&self) -> &Box<Log+'static> {
unsafe { mem::transmute(self.0) }
}
}
fn logger() -> Option<LoggerGuard> {
REFCOUNT.fetch_add(1, Ordering::SeqCst);
let logger = LOGGER.load(Ordering::SeqCst);
if logger == UNINITIALIZED || logger == INITIALIZING {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
None
} else {
Some(LoggerGuard(logger))
}
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __enabled(level: LogLevel, target: &str) -> bool {
if let Some(logger) = logger() {
logger.enabled(&LogMetadata { level: level, target: target })
} else {
false
}
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __log(level: LogLevel, target: &str, loc: &LogLocation,
args: fmt::Arguments) {
if let Some(logger) = logger() {
let record = LogRecord {
metadata: LogMetadata {
level: level,
target: target,
},
location: loc,
args: args
};
logger.log(&record)
}
}
#[cfg(test)]
mod tests {
use std::error::Error;
use super::{LogLevel, LogLevelFilter, SetLoggerError};
#[test]
fn test_loglevelfilter_from_str() {
let tests = [
("off", Ok(LogLevelFilter::Off)),
("error", Ok(LogLevelFilter::Error)),
("warn", Ok(LogLevelFilter::Warn)),
("info", Ok(LogLevelFilter::Info)),
("debug", Ok(LogLevelFilter::Debug)),
("trace", Ok(LogLevelFilter::Trace)),
("OFF", Ok(LogLevelFilter::Off)),
("ERROR", Ok(LogLevelFilter::Error)),
("WARN", Ok(LogLevelFilter::Warn)),
("INFO", Ok(LogLevelFilter::Info)),
("DEBUG", Ok(LogLevelFilter::Debug)),
("TRACE", Ok(LogLevelFilter::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_from_str() {
let tests = [
("OFF", Err(())),
("error", Ok(LogLevel::Error)),
("warn", Ok(LogLevel::Warn)),
("info", Ok(LogLevel::Info)),
("debug", Ok(LogLevel::Debug)),
("trace", Ok(LogLevel::Trace)),
("ERROR", Ok(LogLevel::Error)),
("WARN", Ok(LogLevel::Warn)),
("INFO", Ok(LogLevel::Info)),
("DEBUG", Ok(LogLevel::Debug)),
("TRACE", Ok(LogLevel::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_show() {
assert_eq!("INFO", LogLevel::Info.to_string());
assert_eq!("ERROR", LogLevel::Error.to_string());
}
#[test]
fn test_loglevelfilter_show() {
assert_eq!("OFF", LogLevelFilter::Off.to_string());
assert_eq!("ERROR", LogLevelFilter::Error.to_string());
}
#[test]
fn test_cross_cmp() {
assert!(LogLevel::Debug > LogLevelFilter::Error);
assert!(LogLevelFilter::Warn < LogLevel::Trace);
assert!(LogLevelFilter::Off < LogLevel::Error);
}
#[test]
fn test_cross_eq() {
assert!(LogLevel::Error == LogLevelFilter::Error);
assert!(LogLevelFilter::Off != LogLevel::Error);
assert!(LogLevel::Trace == LogLevelFilter::Trace);
}
#[test]
fn test_to_log_level() {
assert_eq!(Some(LogLevel::Error), LogLevelFilter::Error.to_log_level());
assert_eq!(None, LogLevelFilter::Off.to_log_level());
assert_eq!(Some(LogLevel::Debug), LogLevelFilter::Debug.to_log_level());
}
#[test]
fn test_to_log_level_filter() {
assert_eq!(LogLevelFilter::Error, LogLevel::Error.to_log_level_filter());
assert_eq!(LogLevelFilter::Trace, LogLevel::Trace.to_log_level_filter());
}
#[test]
fn test_error_trait() {
let e = SetLoggerError(());
assert_eq!(e.description(), "set_logger() called multiple times");
}
}
| eq | identifier_name |
lib.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A lightweight logging facade.
//!
//! A logging facade provides a single logging API that abstracts over the
//! actual logging implementation. Libraries can use the logging API provided
//! by this crate, and the consumer of those libraries can choose the logging
//! framework that is most suitable for its use case.
//!
//! If no logging implementation is selected, the facade falls back to a "noop"
//! implementation that ignores all log messages. The overhead in this case
//! is very small - just an integer load, comparison and jump.
//!
//! A log request consists of a target, a level, and a body. A target is a
//! string which defaults to the module path of the location of the log
//! request, though that default may be overridden. Logger implementations
//! typically use the target to filter requests based on some user
//! configuration.
//!
//! # Use
//!
//! ## In libraries
//!
//! Libraries should link only to the `log` crate, and use the provided
//! macros to log whatever information will be useful to downstream consumers.
//!
//! ### Examples
//!
//! ```rust
//! # #![allow(unstable)]
//! #[macro_use]
//! extern crate log;
//!
//! # #[derive(Debug)] pub struct Yak(String);
//! # impl Yak { fn shave(&self, _: u32) {} }
//! # fn find_a_razor() -> Result<u32, u32> { Ok(1) }
//! pub fn shave_the_yak(yak: &Yak) {
//! info!(target: "yak_events", "Commencing yak shaving for {:?}", yak);
//!
//! loop {
//! match find_a_razor() {
//! Ok(razor) => {
//! info!("Razor located: {}", razor);
//! yak.shave(razor);
//! break;
//! }
//! Err(err) => {
//! warn!("Unable to locate a razor: {}, retrying", err);
//! }
//! }
//! }
//! }
//! # fn main() {}
//! ```
//!
//! ## In executables
//!
//! Executables should chose a logging framework and initialize it early in the
//! runtime of the program. Logging frameworks will typically include a
//! function to do this. Any log messages generated before the framework is
//! initialized will be ignored.
//!
//! The executable itself may use the `log` crate to log as well.
//!
//! ### Warning
//!
//! The logging system may only be initialized once.
//!
//! ### Examples
//!
//! ```rust,ignore
//! #[macro_use]
//! extern crate log;
//! extern crate my_logger;
//!
//! fn main() {
//! my_logger::init();
//!
//! info!("starting up");
//!
//! // ...
//! }
//! ```
//!
//! # Logger implementations
//!
//! Loggers implement the `Log` trait. Here's a very basic example that simply
//! logs all messages at the `Error`, `Warn` or `Info` levels to stdout:
//!
//! ```rust
//! extern crate log;
//!
//! use log::{LogRecord, LogLevel, LogMetadata};
//!
//! struct SimpleLogger;
//!
//! impl log::Log for SimpleLogger {
//! fn enabled(&self, metadata: &LogMetadata) -> bool {
//! metadata.level() <= LogLevel::Info
//! }
//!
//! fn log(&self, record: &LogRecord) {
//! if self.enabled(record.metadata()) {
//! println!("{} - {}", record.level(), record.args());
//! }
//! }
//! }
//!
//! # fn main() {}
//! ```
//!
//! Loggers are installed by calling the `set_logger` function. It takes a
//! closure which is provided a `MaxLogLevel` token and returns a `Log` trait
//! object. The `MaxLogLevel` token controls the global maximum log level. The
//! logging facade uses this as an optimization to improve performance of log
//! messages at levels that are disabled. In the case of our example logger,
//! we'll want to set the maximum log level to `Info`, since we ignore any
//! `Debug` or `Trace` level log messages. A logging framework should provide a
//! function that wraps a call to `set_logger`, handling initialization of the
//! logger:
//!
//! ```rust
//! # extern crate log;
//! # use log::{LogLevel, LogLevelFilter, SetLoggerError, LogMetadata};
//! # struct SimpleLogger;
//! # impl log::Log for SimpleLogger {
//! # fn enabled(&self, _: &LogMetadata) -> bool { false }
//! # fn log(&self, _: &log::LogRecord) {}
//! # }
//! # fn main() {}
//! pub fn init() -> Result<(), SetLoggerError> {
//! log::set_logger(|max_log_level| {
//! max_log_level.set(LogLevelFilter::Info);
//! Box::new(SimpleLogger)
//! })
//! }
//! ```
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/log/")]
#![warn(missing_docs)]
extern crate libc;
use std::ascii::AsciiExt;
use std::cmp;
use std::error;
use std::fmt;
use std::mem;
use std::ops::Deref;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
mod macros;
// The setup here is a bit weird to make at_exit work.
//
// There are four different states that we care about: the logger's
// uninitialized, the logger's initializing (set_logger's been called but
// LOGGER hasn't actually been set yet), the logger's active, or the logger's
// shutting down inside of at_exit.
//
// The LOGGER static is normally a Box<Box<Log>> with some special possible
// values as well. The uninitialized and initializing states are represented by
// the values 0 and 1 respectively. The shutting down state is also represented
// by 1. Any other value is a valid pointer to the logger.
//
// The at_exit routine needs to make sure that no threads are actively logging
// when it deallocates the logger. The number of actively logging threads is
// tracked in the REFCOUNT static. The routine first sets LOGGER back to 1.
// All logging calls past that point will immediately return without accessing
// the logger. At that point, the at_exit routine just waits for the refcount
// to reach 0 before deallocating the logger. Note that the refcount does not
// necessarily monotonically decrease at this point, as new log calls still
// increment and decrement it, but the interval in between is small enough that
// the wait is really just for the active log calls to finish.
static LOGGER: AtomicUsize = ATOMIC_USIZE_INIT;
static REFCOUNT: AtomicUsize = ATOMIC_USIZE_INIT;
const UNINITIALIZED: usize = 0;
const INITIALIZING: usize = 1;
static MAX_LOG_LEVEL_FILTER: AtomicUsize = ATOMIC_USIZE_INIT;
static LOG_LEVEL_NAMES: [&'static str; 6] = ["OFF", "ERROR", "WARN", "INFO",
"DEBUG", "TRACE"];
/// An enum representing the available verbosity levels of the logging framework
///
/// A `LogLevel` may be compared directly to a `LogLevelFilter`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevel {
/// The "error" level.
///
/// Designates very serious errors.
Error = 1, // This way these line up with the discriminants for LogLevelFilter below
/// The "warn" level.
///
/// Designates hazardous situations.
Warn,
/// The "info" level.
///
/// Designates useful information.
Info,
/// The "debug" level.
///
/// Designates lower priority information.
Debug,
/// The "trace" level.
///
/// Designates very low priority, often extremely verbose, information.
Trace,
}
impl Clone for LogLevel {
#[inline]
fn clone(&self) -> LogLevel {
*self
}
}
impl PartialEq for LogLevel {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevelFilter> for LogLevel {
#[inline]
fn eq(&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialOrd for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<LogLevelFilter> for LogLevel {
#[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some((*self as usize).cmp(&(*other as usize)))
}
}
impl Ord for LogLevel {
#[inline]
fn cmp(&self, other: &LogLevel) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
fn ok_or<T, E>(t: Option<T>, e: E) -> Result<T, E> {
match t {
Some(t) => Ok(t),
None => Err(e),
}
}
impl FromStr for LogLevel {
type Err = ();
fn from_str(level: &str) -> Result<LogLevel, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.into_iter()
.filter(|&idx| idx != 0)
.map(|idx| LogLevel::from_usize(idx).unwrap())
.next(), ())
}
}
impl fmt::Display for LogLevel {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.pad(LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevel {
fn from_usize(u: usize) -> Option<LogLevel> {
match u {
1 => Some(LogLevel::Error),
2 => Some(LogLevel::Warn),
3 => Some(LogLevel::Info),
4 => Some(LogLevel::Debug),
5 => Some(LogLevel::Trace),
_ => None
}
}
/// Returns the most verbose logging level.
#[inline]
pub fn max() -> LogLevel {
LogLevel::Trace
}
/// Converts the `LogLevel` to the equivalent `LogLevelFilter`.
#[inline]
pub fn to_log_level_filter(&self) -> LogLevelFilter {
LogLevelFilter::from_usize(*self as usize).unwrap()
}
}
/// An enum representing the available verbosity level filters of the logging
/// framework.
///
/// A `LogLevelFilter` may be compared directly to a `LogLevel`.
#[repr(usize)]
#[derive(Copy, Eq, Debug)]
pub enum LogLevelFilter {
/// A level lower than all log levels.
Off,
/// Corresponds to the `Error` log level.
Error,
/// Corresponds to the `Warn` log level.
Warn,
/// Corresponds to the `Info` log level.
Info,
/// Corresponds to the `Debug` log level.
Debug,
/// Corresponds to the `Trace` log level.
Trace,
}
// Deriving generates terrible impls of these traits
impl Clone for LogLevelFilter {
#[inline]
fn clone(&self) -> LogLevelFilter {
*self
}
}
impl PartialEq for LogLevelFilter {
#[inline]
fn eq(&self, other: &LogLevelFilter) -> bool {
*self as usize == *other as usize
}
}
impl PartialEq<LogLevel> for LogLevelFilter {
#[inline]
fn eq(&self, other: &LogLevel) -> bool {
other.eq(self)
}
}
impl PartialOrd for LogLevelFilter {
#[inline]
fn partial_cmp(&self, other: &LogLevelFilter) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<LogLevel> for LogLevelFilter {
#[inline]
fn partial_cmp(&self, other: &LogLevel) -> Option<cmp::Ordering> {
other.partial_cmp(self).map(|x| x.reverse())
}
}
impl Ord for LogLevelFilter {
#[inline]
fn cmp(&self, other: &LogLevelFilter) -> cmp::Ordering {
(*self as usize).cmp(&(*other as usize))
}
}
impl FromStr for LogLevelFilter {
type Err = ();
fn from_str(level: &str) -> Result<LogLevelFilter, ()> {
ok_or(LOG_LEVEL_NAMES.iter()
.position(|&name| name.eq_ignore_ascii_case(level))
.map(|p| LogLevelFilter::from_usize(p).unwrap()), ())
}
}
impl fmt::Display for LogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", LOG_LEVEL_NAMES[*self as usize])
}
}
impl LogLevelFilter {
fn from_usize(u: usize) -> Option<LogLevelFilter> {
match u {
0 => Some(LogLevelFilter::Off),
1 => Some(LogLevelFilter::Error),
2 => Some(LogLevelFilter::Warn),
3 => Some(LogLevelFilter::Info),
4 => Some(LogLevelFilter::Debug),
5 => Some(LogLevelFilter::Trace),
_ => None
}
}
/// Returns the most verbose logging level filter.
#[inline]
pub fn max() -> LogLevelFilter {
LogLevelFilter::Trace
}
/// Converts `self` to the equivalent `LogLevel`.
///
/// Returns `None` if `self` is `LogLevelFilter::Off`.
#[inline]
pub fn to_log_level(&self) -> Option<LogLevel> {
LogLevel::from_usize(*self as usize)
}
}
/// The "payload" of a log message.
pub struct LogRecord<'a> {
metadata: LogMetadata<'a>,
location: &'a LogLocation,
args: fmt::Arguments<'a>,
}
impl<'a> LogRecord<'a> {
/// The message body.
pub fn args(&self) -> &fmt::Arguments<'a> {
&self.args
}
/// Metadata about the log directive.
pub fn metadata(&self) -> &LogMetadata {
&self.metadata
}
/// The location of the log directive.
pub fn location(&self) -> &LogLocation {
self.location
}
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel {
self.metadata.level()
}
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.metadata.target()
}
}
/// Metadata about a log message.
pub struct LogMetadata<'a> {
level: LogLevel,
target: &'a str,
}
impl<'a> LogMetadata<'a> {
/// The verbosity level of the message.
pub fn level(&self) -> LogLevel {
self.level
}
/// The name of the target of the directive.
pub fn target(&self) -> &str {
self.target
}
}
/// A trait encapsulating the operations required of a logger
pub trait Log: Sync+Send {
/// Determines if a log message with the specified metadata would be
/// logged.
///
/// This is used by the `log_enabled!` macro to allow callers to avoid
/// expensive computation of log message arguments if the message would be
/// discarded anyway.
fn enabled(&self, metadata: &LogMetadata) -> bool;
/// Logs the `LogRecord`.
///
/// Note that `enabled` is *not* necessarily called before this method.
/// Implementations of `log` should perform all necessary filtering
/// internally.
fn log(&self, record: &LogRecord);
}
/// The location of a log message.
///
/// # Warning
///
/// The fields of this struct are public so that they may be initialized by the
/// `log!` macro. They are subject to change at any time and should never be
/// accessed directly.
#[derive(Copy, Clone, Debug)]
pub struct LogLocation {
#[doc(hidden)]
pub __module_path: &'static str,
#[doc(hidden)]
pub __file: &'static str,
#[doc(hidden)]
pub __line: u32,
}
impl LogLocation {
/// The module path of the message.
pub fn module_path(&self) -> &str {
self.__module_path
}
/// The source file containing the message.
pub fn file(&self) -> &str {
self.__file
}
/// The line containing the message.
pub fn line(&self) -> u32 {
self.__line
}
}
/// A token providing read and write access to the global maximum log level
/// filter.
///
/// The maximum log level is used as an optimization to avoid evaluating log
/// messages that will be ignored by the logger. Any message with a level
/// higher than the maximum log level filter will be ignored. A logger should
/// make sure to keep the maximum log level filter in sync with its current
/// configuration.
#[allow(missing_copy_implementations)]
pub struct MaxLogLevelFilter(());
impl fmt::Debug for MaxLogLevelFilter {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "MaxLogLevelFilter")
}
}
impl MaxLogLevelFilter {
/// Gets the current maximum log level filter.
pub fn get(&self) -> LogLevelFilter {
max_log_level()
}
/// Sets the maximum log level.
pub fn set(&self, level: LogLevelFilter) {
MAX_LOG_LEVEL_FILTER.store(level as usize, Ordering::SeqCst)
}
}
/// Returns the current maximum log level.
///
/// The `log!`, `error!`, `warn!`, `info!`, `debug!`, and `trace!` macros check
/// this value and discard any message logged at a higher level. The maximum
/// log level is set by the `MaxLogLevel` token passed to loggers.
#[inline(always)]
pub fn max_log_level() -> LogLevelFilter {
unsafe { mem::transmute(MAX_LOG_LEVEL_FILTER.load(Ordering::Relaxed)) }
}
/// Sets the global logger.
///
/// The `make_logger` closure is passed a `MaxLogLevel` object, which the
/// logger should use to keep the global maximum log level in sync with the
/// highest log level that the logger will not ignore.
///
/// This function may only be called once in the lifetime of a program. Any log
/// events that occur before the call to `set_logger` completes will be
/// ignored.
///
/// This function does not typically need to be called manually. Logger
/// implementations should provide an initialization method that calls
/// `set_logger` internally.
pub fn set_logger<M>(make_logger: M) -> Result<(), SetLoggerError>
where M: FnOnce(MaxLogLevelFilter) -> Box<Log> {
if LOGGER.compare_and_swap(UNINITIALIZED, INITIALIZING,
Ordering::SeqCst) != UNINITIALIZED {
return Err(SetLoggerError(()));
}
let logger = Box::new(make_logger(MaxLogLevelFilter(())));
let logger = unsafe { mem::transmute::<Box<Box<Log>>, usize>(logger) };
LOGGER.store(logger, Ordering::SeqCst);
unsafe {
assert_eq!(libc::atexit(shutdown), 0);
}
return Ok(());
extern fn shutdown() {
// Set to INITIALIZING to prevent re-initialization after
let logger = LOGGER.swap(INITIALIZING, Ordering::SeqCst);
while REFCOUNT.load(Ordering::SeqCst) != 0 {
// FIXME add a sleep here when it doesn't involve timers
}
unsafe { mem::transmute::<usize, Box<Box<Log>>>(logger); }
}
}
/// The type returned by `set_logger` if `set_logger` has already been called.
#[allow(missing_copy_implementations)]
#[derive(Debug)]
pub struct SetLoggerError(());
impl fmt::Display for SetLoggerError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "attempted to set a logger after the logging system \
was already initialized")
}
}
impl error::Error for SetLoggerError {
fn description(&self) -> &str { "set_logger() called multiple times" }
}
struct LoggerGuard(usize);
impl Drop for LoggerGuard {
fn drop(&mut self) {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
}
}
impl Deref for LoggerGuard {
type Target = Box<Log>;
fn deref(&self) -> &Box<Log+'static> {
unsafe { mem::transmute(self.0) }
}
}
fn logger() -> Option<LoggerGuard> {
REFCOUNT.fetch_add(1, Ordering::SeqCst);
let logger = LOGGER.load(Ordering::SeqCst);
if logger == UNINITIALIZED || logger == INITIALIZING {
REFCOUNT.fetch_sub(1, Ordering::SeqCst);
None
} else {
Some(LoggerGuard(logger))
}
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __enabled(level: LogLevel, target: &str) -> bool {
if let Some(logger) = logger() {
logger.enabled(&LogMetadata { level: level, target: target })
} else |
}
// WARNING
// This is not considered part of the crate's public API. It is subject to
// change at any time.
#[doc(hidden)]
pub fn __log(level: LogLevel, target: &str, loc: &LogLocation,
args: fmt::Arguments) {
if let Some(logger) = logger() {
let record = LogRecord {
metadata: LogMetadata {
level: level,
target: target,
},
location: loc,
args: args
};
logger.log(&record)
}
}
#[cfg(test)]
mod tests {
use std::error::Error;
use super::{LogLevel, LogLevelFilter, SetLoggerError};
#[test]
fn test_loglevelfilter_from_str() {
let tests = [
("off", Ok(LogLevelFilter::Off)),
("error", Ok(LogLevelFilter::Error)),
("warn", Ok(LogLevelFilter::Warn)),
("info", Ok(LogLevelFilter::Info)),
("debug", Ok(LogLevelFilter::Debug)),
("trace", Ok(LogLevelFilter::Trace)),
("OFF", Ok(LogLevelFilter::Off)),
("ERROR", Ok(LogLevelFilter::Error)),
("WARN", Ok(LogLevelFilter::Warn)),
("INFO", Ok(LogLevelFilter::Info)),
("DEBUG", Ok(LogLevelFilter::Debug)),
("TRACE", Ok(LogLevelFilter::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_from_str() {
let tests = [
("OFF", Err(())),
("error", Ok(LogLevel::Error)),
("warn", Ok(LogLevel::Warn)),
("info", Ok(LogLevel::Info)),
("debug", Ok(LogLevel::Debug)),
("trace", Ok(LogLevel::Trace)),
("ERROR", Ok(LogLevel::Error)),
("WARN", Ok(LogLevel::Warn)),
("INFO", Ok(LogLevel::Info)),
("DEBUG", Ok(LogLevel::Debug)),
("TRACE", Ok(LogLevel::Trace)),
("asdf", Err(())),
];
for &(s, ref expected) in &tests {
assert_eq!(expected, &s.parse());
}
}
#[test]
fn test_loglevel_show() {
assert_eq!("INFO", LogLevel::Info.to_string());
assert_eq!("ERROR", LogLevel::Error.to_string());
}
#[test]
fn test_loglevelfilter_show() {
assert_eq!("OFF", LogLevelFilter::Off.to_string());
assert_eq!("ERROR", LogLevelFilter::Error.to_string());
}
#[test]
fn test_cross_cmp() {
assert!(LogLevel::Debug > LogLevelFilter::Error);
assert!(LogLevelFilter::Warn < LogLevel::Trace);
assert!(LogLevelFilter::Off < LogLevel::Error);
}
#[test]
fn test_cross_eq() {
assert!(LogLevel::Error == LogLevelFilter::Error);
assert!(LogLevelFilter::Off != LogLevel::Error);
assert!(LogLevel::Trace == LogLevelFilter::Trace);
}
#[test]
fn test_to_log_level() {
assert_eq!(Some(LogLevel::Error), LogLevelFilter::Error.to_log_level());
assert_eq!(None, LogLevelFilter::Off.to_log_level());
assert_eq!(Some(LogLevel::Debug), LogLevelFilter::Debug.to_log_level());
}
#[test]
fn test_to_log_level_filter() {
assert_eq!(LogLevelFilter::Error, LogLevel::Error.to_log_level_filter());
assert_eq!(LogLevelFilter::Trace, LogLevel::Trace.to_log_level_filter());
}
#[test]
fn test_error_trait() {
let e = SetLoggerError(());
assert_eq!(e.description(), "set_logger() called multiple times");
}
}
| {
false
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.