code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
# -*- coding: utf-8 -*-
import uuid
from openerp import api, fields, models, _
class Rating(models.Model):
_name = "rating.rating"
_description = "Rating"
_order = 'create_date desc'
_rec_name = 'res_name'
_sql_constraints = [
('rating_range', 'check(rating >= -1 and rating <= 10)', 'Rating should be between -1 to 10'),
]
@api.one
@api.depends('res_model', 'res_id')
def _compute_res_name(self):
name = self.env[self.res_model].sudo().browse(self.res_id).name_get()
self.res_name = name and name[0][1] or ('%s/%s') % (self.res_model, self.res_id)
@api.model
def new_access_token(self):
return uuid.uuid4().hex
res_name = fields.Char(string='Resource Name', compute='_compute_res_name', store=True, help="The name of the rated resource.")
res_model = fields.Char(string='Document Model', required=True, help="Model name of the rated object", index=True)
res_id = fields.Integer(string='Document ID', required=True, help="Identifier of the rated object", index=True)
rated_partner_id = fields.Many2one('res.partner', string="Rated Partner", help="Owner of the rated resource")
partner_id = fields.Many2one('res.partner', string='Customer', help="Author of the rating")
rating = fields.Float(string="Rating", group_operator="avg", default=-1, help="Rating value")
feedback = fields.Text('Feedback reason', help="Reason of the rating")
access_token = fields.Char(string='Security Token', default=new_access_token, help="Access token to set the rating of the value")
message_id = fields.Many2one('mail.message', string="Linked message", help="Associated message when posting a review. Mainly used in website addons.", index=True)
@api.model
def apply_rating(self, rate, res_model=None, res_id=None, token=None):
""" apply a rating for given res_model/res_id or token. If the res_model is a mail.thread
object, a message will be posted in the chatter.
:param rate : the rating value to apply
:type rate : float
:param res_id : id of the rated object.
:param res_model : name of model.
:param token : access token
:returns rating.rating record
"""
domain = [('access_token', '=', token)] if token else [('res_model', '=', res_model), ('res_id', '=', res_id)]
rating = self.search(domain, limit=1)
if rating:
rating.write({'rating' : rate})
if hasattr(self.env[rating.res_model], 'message_post'):
record = self.env[rating.res_model].sudo().browse(rating.res_id)
record.sudo().message_post(
body="%s %s <br/><img src='/rating/static/src/img/rating_%s.png' style='width:20px;height:20px'/>"
% (rating.sudo().partner_id.name, _('rated it'), rate),
subtype='mail.mt_comment',
author_id=rating.partner_id and rating.partner_id.id or None # None will set the default author in mail_thread.py
)
return rating
@api.multi
def reset(self):
for record in self:
record.write({
'rating': -1,
'access_token': record.new_access_token(),
'feedback' : False
})
class RatingMixin(models.AbstractModel):
_name = 'rating.mixin'
_description = "Rating Mixin"
rating_ids = fields.One2many('rating.rating', 'res_id', string='Rating', domain=lambda self: [('res_model', '=', self._name)])
@api.multi
def rating_send_request(self, template, partner_id, rated_partner_id, reuse_rating=True):
""" This method create (empty) rating objects for the current recordsets
and send this request by mail (given email template) with the given
rated_partner_id and partner_id as recipient and sender of the email.
:param template : the email template to send. The res_model of the
template must be 'rating.rating'.
:type template : mail.template
:param res_model : model name of the object to rated_partner_id
:type res_model : string
:param res_id : id the record to rate
:type res_id : int
:param partner_id : the recipient partner
:type partner : res.partner
:param rated_partner_id : the sender partner
:type rated_partner_id : res.partner
:param reuse_rating : if True, the rating of the current objects will
be reset. Otherwise a new rating will be create
:type reuse_rating : boolean
"""
if not rated_partner_id.email or not partner_id.email:
return False
Rating = self.env['rating.rating']
res_model = self._name
for record in self:
res_id = record.id
values = {
'res_model': res_model,
'res_id': res_id,
'partner_id': partner_id.id,
'rated_partner_id': rated_partner_id.id
}
if reuse_rating:
# search the existing rating for the given res_model/res_id
rating = Rating.search([('res_id', '=', res_id), ('res_model', '=', res_model)], limit=1)
if rating: # reset the rating
rating.reset()
else: # create a new one
rating = Rating.create(values)
else:
rating = Rating.create(values)
# send the mail
template.send_mail(rating.id, force_send=True)
@api.multi
def rating_get_repartition(self, add_stats=False, domain=None):
""" get the repatition of rating grade for the given res_ids.
:param add_stats : flag to add stat to the result
:type add_stats : boolean
:param domain : optional extra domain of the rating to include/exclude in repartition
:return dictionnary
if not add_stats, the dict is like
- key is the rating value (integer)
- value is the number of object (res_model, res_id) having the value
otherwise, key is the value of the information (string) : either stat name (avg, total, ...) or 'repartition'
containing the same dict if add_stats was False.
"""
base_domain = [('res_model', '=', self._name), ('res_id', 'in', self.ids), ('rating', '>=', 0)]
if domain:
base_domain += domain
data = self.env['rating.rating'].read_group(base_domain, ['rating'], ['rating', 'res_id'])
# init dict with all posible rate value, except -1 (no value for the rating)
values = dict.fromkeys(range(11), 0)
values.update((d['rating'], d['rating_count']) for d in data)
# add other stats
if add_stats:
rating_number = sum(values.values())
result = {
'repartition': values,
'avg': sum([float(key*values[key]) for key in values])/rating_number if rating_number > 0 else 0,
'total': reduce(lambda x, y: y['rating_count']+x, data, 0),
}
return result
return values
@api.multi
def rating_get_grades(self, domain=None):
""" get the repatition of rating grade for the given res_ids.
:param domain : optional domain of the rating to include/exclude in grades computation
:return dictionnary where the key is the grade (great, okay, bad), and the value, the number of object (res_model, res_id) having the grade
the grade are compute as 0-30% : Bad
31-69%: Okay
70-100%: Great
"""
data = self.rating_get_repartition(domain=domain)
res = dict.fromkeys(['great', 'okay', 'bad'], 0)
for key in data:
if key >= 7:
res['great'] += data[key]
elif key > 3:
res['okay'] += data[key]
else:
res['bad'] += data[key]
return res
@api.multi
def rating_get_stats(self, domain=None):
""" get the statistics of the rating repatition
:param domain : optional domain of the rating to include/exclude in statistic computation
:return dictionnary where
- key is the the name of the information (stat name)
- value is statistic value : 'percent' contains the repartition in percentage, 'avg' is the average rate
and 'total' is the number of rating
"""
data = self.rating_get_repartition(domain=domain, add_stats=True)
result = {
'avg': data['avg'],
'total': data['total'],
'percent': dict.fromkeys(range(11), 0),
}
for rate in data['repartition']:
result['percent'][rate] = (data['repartition'][rate] * 100) / data['total'] if data['total'] > 0 else 0
return result | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# Copyright 2008 the V8 project authors.
# Copyright 2023 Microsoft Inc.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
from utils import SearchFiles
if __name__ == '__main__':
try:
files = SearchFiles(*sys.argv[2:])
files = [ os.path.relpath(x, sys.argv[1]) for x in files ]
# Apply the same transform in SearchFiles after relpath
if sys.platform == 'win32':
files = [ x.replace('\\', '/') for x in files ]
print('\n'.join(files))
except Exception as e:
print(str(e))
sys.exit(1) | python | github | https://github.com/nodejs/node | tools/search_files.py |
import * as ts from "../../_namespaces/ts.js";
import { jsonToReadableText } from "../helpers.js";
describe("unittests:: services:: PreProcessFile:", () => {
function test(sourceText: string, readImportFile: boolean, detectJavaScriptImports: boolean, expectedPreProcess: ts.PreProcessedFileInfo): void {
const resultPreProcess = ts.preProcessFile(sourceText, readImportFile, detectJavaScriptImports);
assert.equal(resultPreProcess.isLibFile, expectedPreProcess.isLibFile, "Pre-processed file has different value for isLibFile. Expected: " + expectedPreProcess.isLibFile + ". Actual: " + resultPreProcess.isLibFile);
checkFileReferenceList("Imported files", expectedPreProcess.importedFiles, resultPreProcess.importedFiles);
checkFileReferenceList("Referenced files", expectedPreProcess.referencedFiles, resultPreProcess.referencedFiles);
checkFileReferenceList("Type reference directives", expectedPreProcess.typeReferenceDirectives, resultPreProcess.typeReferenceDirectives);
checkFileReferenceList("Lib reference directives", expectedPreProcess.libReferenceDirectives, resultPreProcess.libReferenceDirectives);
assert.deepEqual(resultPreProcess.ambientExternalModules, expectedPreProcess.ambientExternalModules);
}
function checkFileReferenceList(kind: string, expected: ts.FileReference[], actual: ts.FileReference[]) {
if (expected === actual) {
return;
}
assert.deepEqual(actual, expected, `Expected [${kind}] ${jsonToReadableText(expected)}, got ${jsonToReadableText(actual)}`);
}
describe("Test preProcessFiles,", () => {
it("Correctly return referenced files from triple slash", () => {
test('///<reference path = "refFile1.ts" />' + "\n" + '///<reference path ="refFile2.ts"/>' + "\n" + '///<reference path="refFile3.ts" />' + "\n" + '///<reference path= "..\\refFile4d.ts" />', /*readImportFile*/ true, /*detectJavaScriptImports*/ false, {
referencedFiles: [{ fileName: "refFile1.ts", pos: 22, end: 33 }, { fileName: "refFile2.ts", pos: 59, end: 70 }, { fileName: "refFile3.ts", pos: 94, end: 105 }, { fileName: "..\\refFile4d.ts", pos: 131, end: 146 }],
importedFiles: [] as ts.FileReference[],
typeReferenceDirectives: [],
libReferenceDirectives: [],
ambientExternalModules: undefined,
isLibFile: false,
});
});
it("Do not return reference path because of invalid triple-slash syntax", () => {
test('///<reference path"refFile1.ts" />' + "\n" + '///<reference path ="refFile2.ts">' + "\n" + '///<referencepath="refFile3.ts" />' + "\n" + '///<reference pat= "refFile4d.ts" />', /*readImportFile*/ true, /*detectJavaScriptImports*/ false, {
referencedFiles: [] as ts.FileReference[],
importedFiles: [] as ts.FileReference[],
typeReferenceDirectives: [],
libReferenceDirectives: [],
ambientExternalModules: undefined,
isLibFile: false,
});
});
it("Do not return reference path of non-imports", () => {
test("Quill.import('delta');", /*readImportFile*/ true, /*detectJavaScriptImports*/ false, {
referencedFiles: [] as ts.FileReference[],
importedFiles: [] as ts.FileReference[],
typeReferenceDirectives: [],
libReferenceDirectives: [],
ambientExternalModules: undefined,
isLibFile: false,
});
});
it("Do not return reference path of nested non-imports", () => {
test("a.b.import('c');", /*readImportFile*/ true, /*detectJavaScriptImports*/ false, {
referencedFiles: [] as ts.FileReference[],
importedFiles: [] as ts.FileReference[],
typeReferenceDirectives: [],
libReferenceDirectives: [],
ambientExternalModules: undefined,
isLibFile: false,
});
});
it("Correctly return imported files", () => {
test('import i1 = require("r1.ts"); import i2 =require("r2.ts"); import i3= require("r3.ts"); import i4=require("r4.ts"); import i5 = require ("r5.ts");', /*readImportFile*/ true, /*detectJavaScriptImports*/ false, {
referencedFiles: [] as ts.FileReference[],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [{ fileName: "r1.ts", pos: 20, end: 25 }, { fileName: "r2.ts", pos: 49, end: 54 }, { fileName: "r3.ts", pos: 78, end: 83 }, { fileName: "r4.ts", pos: 106, end: 111 }, { fileName: "r5.ts", pos: 138, end: 143 }],
ambientExternalModules: undefined,
isLibFile: false,
});
});
it("Do not return imported files if readImportFiles argument is false", () => {
test('import i1 = require("r1.ts"); import i2 =require("r2.ts"); import i3= require("r3.ts"); import i4=require("r4.ts"); import i5 = require ("r5.ts");', /*readImportFile*/ false, /*detectJavaScriptImports*/ false, {
referencedFiles: [] as ts.FileReference[],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [] as ts.FileReference[],
ambientExternalModules: undefined,
isLibFile: false,
});
});
it("Do not return import path because of invalid import syntax", () => {
test('import i1 require("r1.ts"); import = require("r2.ts") import i3= require("r3.ts"); import i5', /*readImportFile*/ true, /*detectJavaScriptImports*/ false, {
referencedFiles: [] as ts.FileReference[],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [{ fileName: "r3.ts", pos: 73, end: 78 }],
ambientExternalModules: undefined,
isLibFile: false,
});
});
it("Correctly return referenced files and import files", () => {
test('///<reference path="refFile1.ts" />' + "\n" + '///<reference path ="refFile2.ts"/>' + "\n" + 'import i1 = require("r1.ts"); import i2 =require("r2.ts");', /*readImportFile*/ true, /*detectJavaScriptImports*/ false, {
referencedFiles: [{ fileName: "refFile1.ts", pos: 20, end: 31 }, { fileName: "refFile2.ts", pos: 57, end: 68 }],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [{ fileName: "r1.ts", pos: 92, end: 97 }, { fileName: "r2.ts", pos: 121, end: 126 }],
ambientExternalModules: undefined,
isLibFile: false,
});
});
it("Correctly return referenced files and import files even with some invalid syntax", () => {
test('///<reference path="refFile1.ts" />' + "\n" + '///<reference path "refFile2.ts"/>' + "\n" + 'import i1 = require("r1.ts"); import = require("r2.ts"); import i2 = require("r3.ts");', /*readImportFile*/ true, /*detectJavaScriptImports*/ false, {
referencedFiles: [{ fileName: "refFile1.ts", pos: 20, end: 31 }],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [{ fileName: "r1.ts", pos: 91, end: 96 }, { fileName: "r3.ts", pos: 148, end: 153 }],
ambientExternalModules: undefined,
isLibFile: false,
});
});
it("Correctly return ES6 imports", () => {
test(
'import * as ns from "m1";' + "\n" +
'import def, * as ns from "m2";' + "\n" +
'import def from "m3";' + "\n" +
'import {a} from "m4";' + "\n" +
'import {a as A} from "m5";' + "\n" +
'import {a as A, b, c as C} from "m6";' + "\n" +
'import def , {a, b, c as C} from "m7";' + "\n",
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "m1", pos: 20, end: 22 },
{ fileName: "m2", pos: 51, end: 53 },
{ fileName: "m3", pos: 73, end: 75 },
{ fileName: "m4", pos: 95, end: 97 },
{ fileName: "m5", pos: 122, end: 124 },
{ fileName: "m6", pos: 160, end: 162 },
{ fileName: "m7", pos: 199, end: 201 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("Correctly ignore commented imports following template expression", () => {
/* eslint-disable no-template-curly-in-string */
test(
"/**" + "\n" +
" * Before" + "\n" +
" * ```" + "\n" +
' * import * as a from "a";' + "\n" +
" * ```" + "\n" +
" */" + "\n" +
"type Foo = `${string}`;" + "\n" +
"/**" + "\n" +
" * After" + "\n" +
" * ```" + "\n" +
' * import { B } from "b";' + "\n" +
' * import * as c from "c";' + "\n" +
" * ```" + "\n" +
" */",
/*readImportFile*/ true,
/*detectJavaScriptImports*/ true,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [],
ambientExternalModules: undefined,
isLibFile: false,
},
);
/* eslint-enable no-template-curly-in-string */
});
it("Ignores imports in template strings", () => {
/* eslint-disable no-template-curly-in-string */
test('a ? `&${a}` : `#${b}`;\n\n `import("${moduleSpecifier}").${id}`;', /*readImportFile*/ true, /*detectJavaScriptImports*/ true, {
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [],
ambientExternalModules: undefined,
isLibFile: false,
});
/* eslint-enable no-template-curly-in-string */
});
it("Correctly returns imports after a template expression", () => {
/* eslint-disable no-template-curly-in-string */
test('`${foo}`; import "./foo";', /*readImportFile*/ true, /*detectJavaScriptImports*/ true, {
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "./foo", pos: 17, end: 22 },
],
ambientExternalModules: undefined,
isLibFile: false,
});
/* eslint-enable no-template-curly-in-string */
});
it("Correctly returns dynamic imports from template expression", () => {
/* eslint-disable no-template-curly-in-string */
test(
"`${(<div>Text `` ${} text {} " + "\n" +
'${import("a")} {import("b")} ' + "\n" +
'${/* A comment */} ${/* import("ignored") */} </div>)}`',
/*readImportFile*/ true,
/*detectJavaScriptImports*/ true,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "a", pos: 39, end: 40 },
{ fileName: "b", pos: 53, end: 54 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
/* eslint-enable no-template-curly-in-string */
});
it("Correctly returns dynamic imports from nested template expression", () => {
/* eslint-disable no-template-curly-in-string */
test('`${foo(`${bar(`${import("a")} ${import("b")}`, `${baz(`${import("c") ${import("d")}`)}`)}`)}`', /*readImportFile*/ true, /*detectJavaScriptImports*/ true, {
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "a", pos: 24, end: 25 },
{ fileName: "b", pos: 39, end: 40 },
{ fileName: "c", pos: 64, end: 65 },
{ fileName: "d", pos: 78, end: 79 },
],
ambientExternalModules: undefined,
isLibFile: false,
});
/* eslint-enable no-template-curly-in-string */
});
it("Correctly returns dynamic imports from tagged template expression", () => {
/* eslint-disable no-template-curly-in-string */
test('foo`${ fn({ a: 100 }, import("a"), `${import("b")}`, import("c"), `${import("d")} foo`, import("e")) }`', /*readImportFile*/ true, /*detectJavaScriptImports*/ true, {
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "a", pos: 29, end: 30 },
{ fileName: "b", pos: 45, end: 46 },
{ fileName: "c", pos: 60, end: 61 },
{ fileName: "d", pos: 76, end: 77 },
{ fileName: "e", pos: 95, end: 96 },
],
ambientExternalModules: undefined,
isLibFile: false,
});
/* eslint-enable no-template-curly-in-string */
});
it("Correctly returns dynamic imports from template expression and imports following it", () => {
/* eslint-disable no-template-curly-in-string */
test(
'const x = `hello ${await import("a").default}`;' + "\n\n" +
'import { y } from "b";',
/*readImportFile*/ true,
/*detectJavaScriptImports*/ true,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "a", pos: 32, end: 33 },
{ fileName: "b", pos: 67, end: 68 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
/* eslint-enable no-template-curly-in-string */
});
it("Correctly returns dynamic imports from template expressions and other imports", () => {
/* eslint-disable no-template-curly-in-string */
test(
'const x = `x ${await import("a").default}`;' + "\n\n" +
'import { y } from "b";' + "\n" +
'const y = `y ${import("c")}`;' + "\n\n" +
'import { d } from "d";',
/*readImportFile*/ true,
/*detectJavaScriptImports*/ true,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "a", pos: 28, end: 29 },
{ fileName: "b", pos: 63, end: 64 },
{ fileName: "c", pos: 90, end: 91 },
{ fileName: "d", pos: 117, end: 118 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
/* eslint-enable no-template-curly-in-string */
});
it("Correctly returns empty importedFiles with incorrect template expression", () => {
test("const foo = `${", /*readImportFile*/ true, /*detectJavaScriptImports*/ true, {
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [],
ambientExternalModules: undefined,
isLibFile: false,
});
});
it("Correctly return ES6 exports", () => {
test(
'export * from "m1";' + "\n" +
'export {a} from "m2";' + "\n" +
'export {a as A} from "m3";' + "\n" +
'export {a as A, b, c as C} from "m4";' + "\n",
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "m1", pos: 14, end: 16 },
{ fileName: "m2", pos: 36, end: 38 },
{ fileName: "m3", pos: 63, end: 65 },
{ fileName: "m4", pos: 101, end: 103 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("Correctly handles import types", () => {
test(
'import type * as ns from "m1";' + "\n" +
'import type def, * as ns from "m2";' + "\n" +
'import type def from "m3";' + "\n" +
'import type {a} from "m4";' + "\n" +
'import type {a as A} from "m5";' + "\n" +
'import type {a as A, b, c as C} from "m6";' + "\n" +
'import type def , {a, b, c as C} from "m7";' + "\n" +
'import type from "m8";' + "\n" +
'import type T = require("m9");' + "\n" +
'import type = require("m10");' + "\n" +
'export import type T = require("m11");' + "\n" +
'export import type = require("m12");' + "\n",
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [] as ts.FileReference[],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "m1", pos: 25, end: 27 },
{ fileName: "m2", pos: 61, end: 63 },
{ fileName: "m3", pos: 88, end: 90 },
{ fileName: "m4", pos: 115, end: 117 },
{ fileName: "m5", pos: 147, end: 149 },
{ fileName: "m6", pos: 190, end: 192 },
{ fileName: "m7", pos: 234, end: 236 },
{ fileName: "m8", pos: 257, end: 259 },
{ fileName: "m9", pos: 287, end: 289 },
{ fileName: "m10", pos: 316, end: 319 },
{ fileName: "m11", pos: 355, end: 358 },
{ fileName: "m12", pos: 392, end: 395 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("Correctly handles export types", () => {
test(
'export type * from "m1";' + "\n" +
'export type {a} from "m2";' + "\n" +
'export type {a as A} from "m3";' + "\n" +
'export type {a as A, b, c as C} from "m4";' + "\n",
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [] as ts.FileReference[],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "m1", pos: 19, end: 21 },
{ fileName: "m2", pos: 46, end: 48 },
{ fileName: "m3", pos: 78, end: 80 },
{ fileName: "m4", pos: 121, end: 123 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("Correctly handles import type node", () => {
test(
'const x: import("m1") = { x: 0, y: 0 };' + "\n" +
'let y: import("m2").Bar.I = { a: "", b: 0 };' + "\n" +
'let shim: typeof import("m3") = { Bar: Bar2 };' + "\n",
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "m1", pos: 16, end: 18 },
{ fileName: "m2", pos: 54, end: 56 },
{ fileName: "m3", pos: 109, end: 111 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("Correctly return ambient external modules", () => {
test(
`
declare module A {}
declare module "B" {}
function foo() {
}
`,
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [],
ambientExternalModules: ["B"],
isLibFile: false,
},
);
});
it("Correctly handles export import declarations", () => {
test('export import a = require("m1");', /*readImportFile*/ true, /*detectJavaScriptImports*/ false, {
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "m1", pos: 26, end: 28 },
],
ambientExternalModules: undefined,
isLibFile: false,
});
});
it("Correctly handles export require calls in JavaScript files", () => {
test(
`
export import a = require("m1");
var x = require('m2');
foo(require('m3'));
var z = { f: require('m4') }
`,
/*readImportFile*/ true,
/*detectJavaScriptImports*/ true,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "m1", pos: 39, end: 41 },
{ fileName: "m2", pos: 74, end: 76 },
{ fileName: "m3", pos: 105, end: 107 },
{ fileName: "m4", pos: 146, end: 148 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("Correctly handles dependency lists in define([deplist]) calls in JavaScript files", () => {
test(
`
define(["mod1", "mod2"], (m1, m2) => {
});
`,
/*readImportFile*/ true,
/*detectJavaScriptImports*/ true,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "mod1", pos: 21, end: 25 },
{ fileName: "mod2", pos: 29, end: 33 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("Correctly handles dependency lists in define(modName, [deplist]) calls in JavaScript files", () => {
test(
`
define("mod", ["mod1", "mod2"], (m1, m2) => {
});
`,
/*readImportFile*/ true,
/*detectJavaScriptImports*/ true,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "mod1", pos: 28, end: 32 },
{ fileName: "mod2", pos: 36, end: 40 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("correctly handles augmentations in external modules - 1", () => {
test(
`
declare module "../Observable" {
interface I {}
}
export {}
`,
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "../Observable", pos: 28, end: 41 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("correctly handles augmentations in external modules - 2", () => {
test(
`
declare module "../Observable" {
interface I {}
}
import * as x from "m";
`,
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "m", pos: 123, end: 124 },
{ fileName: "../Observable", pos: 28, end: 41 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("correctly handles augmentations in external modules - 3", () => {
test(
`
declare module "../Observable" {
interface I {}
}
import m = require("m");
`,
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "m", pos: 123, end: 124 },
{ fileName: "../Observable", pos: 28, end: 41 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("correctly handles augmentations in external modules - 4", () => {
test(
`
declare module "../Observable" {
interface I {}
}
namespace N {}
export = N;
`,
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "../Observable", pos: 28, end: 41 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("correctly handles augmentations in external modules - 5", () => {
test(
`
declare module "../Observable" {
interface I {}
}
namespace N {}
export import IN = N;
`,
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "../Observable", pos: 28, end: 41 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("correctly handles augmentations in external modules - 6", () => {
test(
`
declare module "../Observable" {
interface I {}
}
export let x = 1;
`,
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "../Observable", pos: 28, end: 41 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("correctly handles augmentations in ambient external modules - 1", () => {
test(
`
declare module "m1" {
export * from "m2";
declare module "augmentation" {
interface I {}
}
}
`,
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "m2", pos: 65, end: 67 },
{ fileName: "augmentation", pos: 102, end: 114 },
],
ambientExternalModules: ["m1"],
isLibFile: false,
},
);
});
it("correctly handles augmentations in ambient external modules - 2", () => {
test(
`
namespace M { var x; }
import IM = M;
declare module "m1" {
export * from "m2";
declare module "augmentation" {
interface I {}
}
}
`,
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "m2", pos: 127, end: 129 },
{ fileName: "augmentation", pos: 164, end: 176 },
],
ambientExternalModules: ["m1"],
isLibFile: false,
},
);
});
it("correctly recognizes type reference directives", () => {
test(
`
/// <reference path="a"/>
/// <reference types="a1"/>
/// <reference path="a2"/>
/// <reference types="a3"/>
`,
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [
{ pos: 34, end: 35, fileName: "a" },
{ pos: 112, end: 114, fileName: "a2" },
],
typeReferenceDirectives: [
{ pos: 73, end: 75, fileName: "a1" },
{ pos: 152, end: 154, fileName: "a3" },
],
libReferenceDirectives: [],
importedFiles: [],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("correctly recognizes lib reference directives", () => {
test(
`
/// <reference path="a"/>
/// <reference lib="a1"/>
/// <reference path="a2"/>
/// <reference lib="a3"/>
`,
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [
{ pos: 34, end: 35, fileName: "a" },
{ pos: 110, end: 112, fileName: "a2" },
],
typeReferenceDirectives: [],
libReferenceDirectives: [
{ pos: 71, end: 73, fileName: "a1" },
{ pos: 148, end: 150, fileName: "a3" },
],
importedFiles: [],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("Correctly handles dynamic imports with template literals", () => {
test(
"const m1 = import('mod1');" + "\n" +
"const m2 = import(`mod2`);" + "\n" +
"Promise.all([import('mod3'), import(`mod4`)]);" + "\n" +
"import(/* webpackChunkName: 'module5' */ `mod5`);" + "\n",
/*readImportFile*/ true,
/*detectJavaScriptImports*/ false,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "mod1", pos: 18, end: 22 },
{ fileName: "mod2", pos: 45, end: 49 },
{ fileName: "mod3", pos: 74, end: 78 },
{ fileName: "mod4", pos: 90, end: 94 },
{ fileName: "mod5", pos: 142, end: 146 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("Correctly handles require calls with template literals in JS files", () => {
test(
"const m1 = require(`mod1`);" + "\n" +
"f(require(`mod2`));" + "\n" +
"const a = { x: require(`mod3`) };" + "\n",
/*readImportFile*/ true,
/*detectJavaScriptImports*/ true,
{
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "mod1", pos: 19, end: 23 },
{ fileName: "mod2", pos: 38, end: 42 },
{ fileName: "mod3", pos: 71, end: 75 },
],
ambientExternalModules: undefined,
isLibFile: false,
},
);
});
it("Correctly handles dependency lists in define(modName, [deplist]) calls with template literals in JS files", () => {
test("define(`mod`, [`mod1`, `mod2`], (m1, m2) => {});", /*readImportFile*/ true, /*detectJavaScriptImports*/ true, {
referencedFiles: [],
typeReferenceDirectives: [],
libReferenceDirectives: [],
importedFiles: [
{ fileName: "mod1", pos: 15, end: 19 },
{ fileName: "mod2", pos: 23, end: 27 },
],
ambientExternalModules: undefined,
isLibFile: false,
});
});
});
}); | typescript | github | https://github.com/microsoft/TypeScript | src/testRunner/unittests/services/preProcessFile.ts |
# frozen_string_literal: true
require "active_support/log_subscriber"
module ActiveJob
class LogSubscriber < ActiveSupport::EventReporter::LogSubscriber # :nodoc:
class_attribute :backtrace_cleaner, default: ActiveSupport::BacktraceCleaner.new
self.namespace = "active_job"
def enqueued(event)
payload = event[:payload]
if payload[:exception_class]
error do
"Failed enqueuing #{payload[:job_class]} to #{queue_name(event)}: #{payload[:exception_class]} (#{payload[:exception_message]})"
end
elsif payload[:aborted]
info do
"Failed enqueuing #{payload[:job_class]} to #{queue_name(event)}, a before_enqueue callback halted the enqueuing execution."
end
else
info do
"Enqueued #{payload[:job_class]} (Job ID: #{payload[:job_id]}) to #{queue_name(event)}" + args_info(event)
end
end
end
event_log_level :enqueued, :info
def enqueued_at(event)
payload = event[:payload]
if payload[:exception_class]
error do
"Failed enqueuing #{payload[:job_class]} to #{queue_name(event)}: #{payload[:exception_class]} (#{payload[:exception_message]})"
end
elsif payload[:aborted]
info do
"Failed enqueuing #{payload[:job_class]} to #{queue_name(event)}, a before_enqueue callback halted the enqueuing execution."
end
else
info do
"Enqueued #{payload[:job_class]} (Job ID: #{payload[:job_id]}) to #{queue_name(event)} at #{event[:payload][:scheduled_at]}" + args_info(event)
end
end
end
event_log_level :enqueued_at, :info
def bulk_enqueued(event)
payload = event[:payload]
info do
if payload[:enqueued_count] == payload[:job_count]
enqueued_jobs_message(event)
elsif payload[:enqueued_count] > 0
if payload[:failed_enqueue_count] == 0
enqueued_jobs_message(event)
else
"#{enqueued_jobs_message(event)}. "\
"Failed enqueuing #{payload[:failed_enqueue_count]} #{'job'.pluralize(payload[:failed_enqueue_count])}"
end
else
"Failed enqueuing #{payload[:failed_enqueue_count]} #{'job'.pluralize(payload[:failed_enqueue_count])} "\
"to #{payload[:adapter]}"
end
end
end
event_log_level :bulk_enqueued, :info
def started(event)
payload = event[:payload]
info do
enqueue_info = payload[:enqueued_at].present? ? " enqueued at #{payload[:enqueued_at]}" : ""
"Performing #{payload[:job_class]} (Job ID: #{payload[:job_id]}) from #{queue_name(event)}" + enqueue_info + args_info(event)
end
end
event_log_level :started, :info
def completed(event)
payload = event[:payload]
if payload[:exception_class]
cleaned_backtrace = backtrace_cleaner.clean(payload[:exception_backtrace])
error do
"Error performing #{payload[:job_class]} (Job ID: #{payload[:job_id]}) from #{queue_name(event)} in #{payload[:duration]}ms: #{payload[:exception_class]} (#{payload[:exception_message]}):\n" + Array(cleaned_backtrace).join("\n")
end
elsif payload[:aborted]
error do
"Error performing #{payload[:job_class]} (Job ID: #{payload[:job_id]}) from #{queue_name(event)} in #{payload[:duration]}ms: a before_perform callback halted the job execution"
end
else
info do
"Performed #{payload[:job_class]} (Job ID: #{payload[:job_id]}) from #{queue_name(event)} in #{payload[:duration]}ms"
end
end
end
event_log_level :completed, :info
def retry_scheduled(event)
payload = event[:payload]
info do
if payload[:exception_class]
"Retrying #{payload[:job_class]} (Job ID: #{payload[:job_id]}) after #{payload[:executions]} attempts in #{payload[:wait_seconds]} seconds, due to a #{payload[:exception_class]} (#{payload[:exception_message]})."
else
"Retrying #{payload[:job_class]} (Job ID: #{payload[:job_id]}) after #{payload[:executions]} attempts in #{payload[:wait_seconds]} seconds."
end
end
end
event_log_level :retry_scheduled, :info
def retry_stopped(event)
payload = event[:payload]
error do
"Stopped retrying #{payload[:job_class]} (Job ID: #{payload[:job_id]}) due to a #{payload[:exception_class]} (#{payload[:exception_message]}), which reoccurred on #{payload[:executions]} attempts."
end
end
event_log_level :retry_stopped, :error
def discarded(event)
payload = event[:payload]
error do
"Discarded #{payload[:job_class]} (Job ID: #{payload[:job_id]}) due to a #{payload[:exception_class]} (#{payload[:exception_message]})."
end
end
event_log_level :discarded, :error
def interrupt(event)
payload = event[:payload]
info do
"Interrupted #{payload[:job_class]} (Job ID: #{payload[:job_id]}) #{payload[:description]} (#{payload[:reason]})"
end
end
event_log_level :interrupt, :info
def resume(event)
payload = event[:payload]
info do
"Resuming #{payload[:job_class]} (Job ID: #{payload[:job_id]}) #{payload[:description]}"
end
end
event_log_level :resume, :info
def step_skipped(event)
payload = event[:payload]
info do
"Step '#{payload[:step]}' skipped #{payload[:job_class]}"
end
end
event_log_level :step_skipped, :info
def step_started(event)
payload = event[:payload]
info do
if payload[:resumed]
"Step '#{payload[:step]}' resumed from cursor '#{payload[:cursor]}' for #{payload[:job_class]} (Job ID: #{payload[:job_id]})"
else
"Step '#{payload[:step]}' started for #{payload[:job_class]} (Job ID: #{payload[:job_id]})"
end
end
end
event_log_level :step_started, :info
def step(event)
payload = event[:payload]
if payload[:interrupted]
info do
"Step '#{payload[:step]}' interrupted at cursor '#{payload[:cursor]}' for #{payload[:job_class]} (Job ID: #{payload[:job_id]}) in #{payload[:duration]}ms"
end
elsif payload[:exception_class]
error do
"Error during step '#{payload[:step]}' at cursor '#{payload[:cursor]}' for #{payload[:job_class]} (Job ID: #{payload[:job_id]}) in #{payload[:duration]}ms: #{payload[:exception_class]} (#{payload[:exception_message]})"
end
else
info do
"Step '#{payload[:step]}' completed for #{payload[:job_class]} (Job ID: #{payload[:job_id]}) in #{payload[:duration]}ms"
end
end
end
event_log_level :step, :error
def self.default_logger
ActiveJob::Base.logger
end
private
def queue_name(event)
adapter, queue = event[:payload].values_at(:adapter, :queue)
"#{adapter}(#{queue})"
end
def args_info(event)
if (arguments = event[:payload][:arguments])
" with arguments: " +
arguments.map { |arg| format(arg).inspect }.join(", ")
else
""
end
end
def format(arg)
case arg
when Hash
arg.transform_values { |value| format(value) }
when Array
arg.map { |value| format(value) }
when GlobalID::Identification
arg.to_global_id rescue arg
else
arg
end
end
def info(progname = nil, &block)
return unless super
if ActiveJob.verbose_enqueue_logs
log_enqueue_source
end
end
def error(progname = nil, &block)
return unless super
if ActiveJob.verbose_enqueue_logs
log_enqueue_source
end
end
def log_enqueue_source
source = enqueue_source_location
if source
logger.info("↳ #{source}")
end
end
def enqueue_source_location
backtrace_cleaner.first_clean_frame
end
def enqueued_jobs_message(event)
payload = event[:payload]
enqueued_count = payload[:enqueued_count]
job_classes_counts = payload[:enqueued_classes].sort_by { |_k, v| -v }
"Enqueued #{enqueued_count} #{'job'.pluralize(enqueued_count)} to #{payload[:adapter]}"\
" (#{job_classes_counts.map { |klass, count| "#{count} #{klass}" }.join(', ')})"
end
end
end
ActiveSupport.event_reporter.subscribe(
ActiveJob::LogSubscriber.new, &ActiveJob::LogSubscriber.subscription_filter
) | ruby | github | https://github.com/rails/rails | activejob/lib/active_job/log_subscriber.rb |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffxparser(RPackage):
"""Package for parsing Affymetrix files (CDF, CEL, CHP, BPMAP, BAR).
It provides methods for fast and memory efficient parsing of
Affymetrix files using the Affymetrix' Fusion SDK. Both ASCII-
and binary-based files are supported. Currently, there are methods
for reading chip definition file (CDF) and a cell intensity file (CEL).
These files can be read either in full or in part. For example,
probe signals from a few probesets can be extracted very quickly
from a set of CEL files into a convenient list structure."""
homepage = "https://www.bioconductor.org/packages/affxparser/"
git = "https://git.bioconductor.org/packages/affxparser.git"
version('1.48.0', commit='2461ea88f310b59c4a9a997a4b3dadedbd65a4aa')
depends_on('r@3.4.0:3.4.9', when='@1.48.0') | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from django.conf import settings
from django.test import TestCase
from zerver.lib import bugdown
from zerver.lib.actions import (
check_add_realm_emoji,
do_remove_realm_emoji,
do_set_alert_words,
get_realm,
)
from zerver.lib.camo import get_camo_url
from zerver.lib.request import (
JsonableError,
)
from zerver.lib.test_helpers import (
ZulipTestCase,
)
from zerver.models import (
get_client,
get_user_profile_by_email,
Message,
RealmFilter,
Recipient,
)
import mock
import os
import ujson
import six
class FencedBlockPreprocessorTest(TestCase):
def test_simple_quoting(self):
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
markdown = [
'~~~ quote',
'hi',
'bye',
'',
''
]
expected = [
'',
'> hi',
'> bye',
'',
'',
''
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def test_serial_quoting(self):
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
markdown = [
'~~~ quote',
'hi',
'~~~',
'',
'~~~ quote',
'bye',
'',
''
]
expected = [
'',
'> hi',
'',
'',
'',
'> bye',
'',
'',
''
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def test_serial_code(self):
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
# Simulate code formatting.
processor.format_code = lambda lang, code: lang + ':' + code
processor.placeholder = lambda s: '**' + s.strip('\n') + '**'
markdown = [
'``` .py',
'hello()',
'```',
'',
'``` .py',
'goodbye()',
'```',
'',
''
]
expected = [
'',
'**py:hello()**',
'',
'',
'',
'**py:goodbye()**',
'',
'',
''
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def test_nested_code(self):
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
# Simulate code formatting.
processor.format_code = lambda lang, code: lang + ':' + code
processor.placeholder = lambda s: '**' + s.strip('\n') + '**'
markdown = [
'~~~ quote',
'hi',
'``` .py',
'hello()',
'```',
'',
''
]
expected = [
'',
'> hi',
'',
'> **py:hello()**',
'',
'',
''
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def bugdown_convert(text):
return bugdown.convert(text, "zulip.com")
class BugdownTest(TestCase):
def common_bugdown_test(self, text, expected):
converted = bugdown_convert(text)
self.assertEqual(converted, expected)
def load_bugdown_tests(self):
test_fixtures = {}
data_file = open(os.path.join(os.path.dirname(__file__), '../fixtures/bugdown-data.json'), 'r')
data = ujson.loads('\n'.join(data_file.readlines()))
for test in data['regular_tests']:
test_fixtures[test['name']] = test
return test_fixtures, data['linkify_tests']
def test_bugdown_fixtures(self):
format_tests, linkify_tests = self.load_bugdown_tests()
self.maxDiff = None
for name, test in six.iteritems(format_tests):
converted = bugdown_convert(test['input'])
print("Running Bugdown test %s" % (name,))
self.assertEqual(converted, test['expected_output'])
def replaced(payload, url, phrase=''):
target = " target=\"_blank\""
if url[:4] == 'http':
href = url
elif '@' in url:
href = 'mailto:' + url
target = ""
else:
href = 'http://' + url
return payload % ("<a href=\"%s\"%s title=\"%s\">%s</a>" % (href, target, href, url),)
print("Running Bugdown Linkify tests")
self.maxDiff = None
for inline_url, reference, url in linkify_tests:
try:
match = replaced(reference, url, phrase=inline_url)
except TypeError:
match = reference
converted = bugdown_convert(inline_url)
self.assertEqual(match, converted)
def test_inline_youtube(self):
msg = 'Check out the debate: http://www.youtube.com/watch?v=hx1mjT73xYE'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Check out the debate: <a href="http://www.youtube.com/watch?v=hx1mjT73xYE" target="_blank" title="http://www.youtube.com/watch?v=hx1mjT73xYE">http://www.youtube.com/watch?v=hx1mjT73xYE</a></p>\n<div class="message_inline_image"><a href="http://www.youtube.com/watch?v=hx1mjT73xYE" target="_blank" title="http://www.youtube.com/watch?v=hx1mjT73xYE"><img src="https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg"></a></div>')
def test_inline_dropbox(self):
msg = 'Look at how hilarious our old office was: https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG'
image_info = {'image': 'https://photos-4.dropbox.com/t/2/AABIre1oReJgPYuc_53iv0IHq1vUzRaDg2rrCfTpiWMccQ/12/129/jpeg/1024x1024/2/_/0/4/IMG_0923.JPG/CIEBIAEgAiAHKAIoBw/ymdijjcg67hv2ta/AABz2uuED1ox3vpWWvMpBxu6a/IMG_0923.JPG', 'desc': 'Shared with Dropbox', 'title': 'IMG_0923.JPG'}
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=image_info):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Look at how hilarious our old office was: <a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG" target="_blank" title="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG">https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG" target="_blank" title="IMG_0923.JPG"><img src="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG?dl=1"></a></div>')
msg = 'Look at my hilarious drawing folder: https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl='
image_info = {'image': 'https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png', 'desc': 'Shared with Dropbox', 'title': 'Saves'}
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=image_info):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Look at my hilarious drawing folder: <a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=" target="_blank" title="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=">https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=</a></p>\n<div class="message_inline_ref"><a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=" target="_blank" title="Saves"><img src="https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png"></a><div><div class="message_inline_image_title">Saves</div><desc class="message_inline_image_desc"></desc></div></div>')
def test_inline_dropbox_preview(self):
# Test photo album previews
msg = 'https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5'
image_info = {'image': 'https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0', 'desc': 'Shared with Dropbox', 'title': '1 photo'}
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=image_info):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5" target="_blank" title="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5">https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5" target="_blank" title="1 photo"><img src="https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0"></a></div>')
def test_inline_dropbox_negative(self):
# Make sure we're not overzealous in our conversion:
msg = 'Look at the new dropbox logo: https://www.dropbox.com/static/images/home_logo.png'
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=None):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Look at the new dropbox logo: <a href="https://www.dropbox.com/static/images/home_logo.png" target="_blank" title="https://www.dropbox.com/static/images/home_logo.png">https://www.dropbox.com/static/images/home_logo.png</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/static/images/home_logo.png" target="_blank" title="https://www.dropbox.com/static/images/home_logo.png"><img src="https://www.dropbox.com/static/images/home_logo.png"></a></div>')
def test_inline_dropbox_bad(self):
# Don't fail on bad dropbox links
msg = "https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM"
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=None):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM" target="_blank" title="https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM">https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM</a></p>')
def test_twitter_id_extraction(self):
self.assertEqual(bugdown.get_tweet_id('http://twitter.com/#!/VizzQuotes/status/409030735191097344'), '409030735191097344')
self.assertEqual(bugdown.get_tweet_id('http://twitter.com/VizzQuotes/status/409030735191097344'), '409030735191097344')
self.assertEqual(bugdown.get_tweet_id('http://twitter.com/VizzQuotes/statuses/409030735191097344'), '409030735191097344')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/wdaher/status/1017581858'), '1017581858')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/wdaher/status/1017581858/'), '1017581858')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/windyoona/status/410766290349879296/photo/1'), '410766290349879296')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/windyoona/status/410766290349879296/'), '410766290349879296')
def test_inline_interesting_links(self):
def make_link(url):
return '<a href="%s" target="_blank" title="%s">%s</a>' % (url, url, url)
normal_tweet_html = ('<a href="https://twitter.com/twitter" target="_blank"'
' title="https://twitter.com/twitter">@twitter</a> '
'meets @seepicturely at #tcdisrupt cc.'
'<a href="https://twitter.com/boscomonkey" target="_blank"'
' title="https://twitter.com/boscomonkey">@boscomonkey</a> '
'<a href="https://twitter.com/episod" target="_blank"'
' title="https://twitter.com/episod">@episod</a> '
'<a href="http://t.co/6J2EgYM" target="_blank"'
' title="http://t.co/6J2EgYM">http://instagram.com/p/MuW67/</a>')
mention_in_link_tweet_html = """<a href="http://t.co/@foo" target="_blank" title="http://t.co/@foo">http://foo.com</a>"""
media_tweet_html = ('<a href="http://t.co/xo7pAhK6n3" target="_blank" title="http://t.co/xo7pAhK6n3">'
'http://twitter.com/NEVNBoston/status/421654515616849920/photo/1</a>')
def make_inline_twitter_preview(url, tweet_html, image_html=''):
## As of right now, all previews are mocked to be the exact same tweet
return ('<div class="inline-preview-twitter">'
'<div class="twitter-tweet">'
'<a href="%s" target="_blank">'
'<img class="twitter-avatar"'
' src="https://si0.twimg.com/profile_images/1380912173/Screen_shot_2011-06-03_at_7.35.36_PM_normal.png">'
'</a>'
'<p>%s</p>'
'<span>- Eoin McMillan (@imeoin)</span>'
'%s'
'</div>'
'</div>') % (url, tweet_html, image_html)
msg = 'http://www.twitter.com'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>' % make_link('http://www.twitter.com'))
msg = 'http://www.twitter.com/wdaher/'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>' % make_link('http://www.twitter.com/wdaher/'))
msg = 'http://www.twitter.com/wdaher/status/3'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>' % make_link('http://www.twitter.com/wdaher/status/3'))
# id too long
msg = 'http://www.twitter.com/wdaher/status/2879779692873154569'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>' % make_link('http://www.twitter.com/wdaher/status/2879779692873154569'))
# id too large (i.e. tweet doesn't exist)
msg = 'http://www.twitter.com/wdaher/status/999999999999999999'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>' % make_link('http://www.twitter.com/wdaher/status/999999999999999999'))
msg = 'http://www.twitter.com/wdaher/status/287977969287315456'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>\n%s' % (
make_link('http://www.twitter.com/wdaher/status/287977969287315456'),
make_inline_twitter_preview('http://www.twitter.com/wdaher/status/287977969287315456', normal_tweet_html)))
msg = 'https://www.twitter.com/wdaher/status/287977969287315456'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>\n%s' % (
make_link('https://www.twitter.com/wdaher/status/287977969287315456'),
make_inline_twitter_preview('https://www.twitter.com/wdaher/status/287977969287315456', normal_tweet_html)))
msg = 'http://twitter.com/wdaher/status/287977969287315456'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>\n%s' % (
make_link('http://twitter.com/wdaher/status/287977969287315456'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315456', normal_tweet_html)))
# A max of 3 will be converted
msg = ('http://twitter.com/wdaher/status/287977969287315456 '
'http://twitter.com/wdaher/status/287977969287315457 '
'http://twitter.com/wdaher/status/287977969287315457 '
'http://twitter.com/wdaher/status/287977969287315457')
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s %s %s %s</p>\n%s%s%s' % (
make_link('http://twitter.com/wdaher/status/287977969287315456'),
make_link('http://twitter.com/wdaher/status/287977969287315457'),
make_link('http://twitter.com/wdaher/status/287977969287315457'),
make_link('http://twitter.com/wdaher/status/287977969287315457'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315456', normal_tweet_html),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315457', normal_tweet_html),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315457', normal_tweet_html)))
# Tweet has a mention in a URL, only the URL is linked
msg = 'http://twitter.com/wdaher/status/287977969287315458'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>\n%s' % (
make_link('http://twitter.com/wdaher/status/287977969287315458'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315458', mention_in_link_tweet_html)))
# Tweet with an image
msg = 'http://twitter.com/wdaher/status/287977969287315459'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>%s</p>\n%s' % (
make_link('http://twitter.com/wdaher/status/287977969287315459'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315459',
media_tweet_html,
('<div class="twitter-image">'
'<a href="http://t.co/xo7pAhK6n3" target="_blank" title="http://t.co/xo7pAhK6n3">'
'<img src="https://pbs.twimg.com/media/BdoEjD4IEAIq86Z.jpg:small">'
'</a>'
'</div>'))))
def test_fetch_tweet_data_settings_validation(self):
with self.settings(TEST_SUITE=False, TWITTER_CONSUMER_KEY=None):
self.assertIs(None, bugdown.fetch_tweet_data('287977969287315459'))
def test_realm_emoji(self):
def emoji_img(name, url):
return '<img alt="%s" class="emoji" src="%s" title="%s">' % (name, get_camo_url(url), name)
zulip_realm = get_realm('zulip.com')
url = "https://zulip.com/test_realm_emoji.png"
check_add_realm_emoji(zulip_realm, "test", url)
# Needs to mock an actual message because that's how bugdown obtains the realm
msg = Message(sender=get_user_profile_by_email("hamlet@zulip.com"))
converted = bugdown.convert(":test:", "zulip.com", msg)
self.assertEqual(converted, '<p>%s</p>' %(emoji_img(':test:', url)))
do_remove_realm_emoji(zulip_realm, 'test')
converted = bugdown.convert(":test:", "zulip.com", msg)
self.assertEqual(converted, '<p>:test:</p>')
def test_unicode_emoji(self):
msg = u'\u2615' # ☕
converted = bugdown_convert(msg)
self.assertEqual(converted, u'<p><img alt="\u2615" class="emoji" src="/static/third/gemoji/images/emoji/unicode/2615.png" title="\u2615"></p>')
msg = u'\u2615\u2615' # ☕☕
converted = bugdown_convert(msg)
self.assertEqual(converted, u'<p><img alt="\u2615" class="emoji" src="/static/third/gemoji/images/emoji/unicode/2615.png" title="\u2615"><img alt="\u2615" class="emoji" src="/static/third/gemoji/images/emoji/unicode/2615.png" title="\u2615"></p>')
def test_realm_patterns(self):
realm = get_realm('zulip.com')
url_format_string = r"https://trac.zulip.net/ticket/%(id)s"
realm_filter = RealmFilter(realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=url_format_string)
realm_filter.save()
self.assertEqual(
str(realm_filter),
'<RealmFilter(zulip.com): #(?P<id>[0-9]{2,8})'
' https://trac.zulip.net/ticket/%(id)s>')
msg = Message(sender=get_user_profile_by_email("othello@zulip.com"),
subject="#444")
content = "We should fix #224 and #115, but not issue#124 or #1124z or [trac #15](https://trac.zulip.net/ticket/16) today."
converted = bugdown.convert(content, realm_domain='zulip.com', message=msg)
converted_subject = bugdown.subject_links(realm.domain.lower(), msg.subject)
self.assertEqual(converted, '<p>We should fix <a href="https://trac.zulip.net/ticket/224" target="_blank" title="https://trac.zulip.net/ticket/224">#224</a> and <a href="https://trac.zulip.net/ticket/115" target="_blank" title="https://trac.zulip.net/ticket/115">#115</a>, but not issue#124 or #1124z or <a href="https://trac.zulip.net/ticket/16" target="_blank" title="https://trac.zulip.net/ticket/16">trac #15</a> today.</p>')
self.assertEqual(converted_subject, [u'https://trac.zulip.net/ticket/444'])
def test_realm_patterns_negative(self):
realm = get_realm('zulip.com')
RealmFilter(realm=realm, pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=r"https://trac.zulip.net/ticket/%(id)s").save()
boring_msg = Message(sender=get_user_profile_by_email("othello@zulip.com"),
subject=u"no match here")
converted_boring_subject = bugdown.subject_links(realm.domain.lower(), boring_msg.subject)
self.assertEqual(converted_boring_subject, [])
def test_alert_words(self):
user_profile = get_user_profile_by_email("othello@zulip.com")
do_set_alert_words(user_profile, ["ALERTWORD", "scaryword"])
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "We have an ALERTWORD day today!"
self.assertEqual(msg.render_markdown(content), "<p>We have an ALERTWORD day today!</p>")
self.assertEqual(msg.user_ids_with_alert_words, set([user_profile.id]))
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "We have a NOTHINGWORD day today!"
self.assertEqual(msg.render_markdown(content), "<p>We have a NOTHINGWORD day today!</p>")
self.assertEqual(msg.user_ids_with_alert_words, set())
def test_mention_wildcard(self):
user_profile = get_user_profile_by_email("othello@zulip.com")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@all test"
self.assertEqual(msg.render_markdown(content),
'<p><span class="user-mention" data-user-email="*">@all</span> test</p>')
self.assertTrue(msg.mentions_wildcard)
def test_mention_everyone(self):
user_profile = get_user_profile_by_email("othello@zulip.com")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@everyone test"
self.assertEqual(msg.render_markdown(content),
'<p><span class="user-mention" data-user-email="*">@everyone</span> test</p>')
self.assertTrue(msg.mentions_wildcard)
def test_mention_single(self):
sender_user_profile = get_user_profile_by_email("othello@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "@**King Hamlet**"
self.assertEqual(msg.render_markdown(content),
'<p><span class="user-mention" data-user-email="hamlet@zulip.com">@King Hamlet</span></p>')
self.assertEqual(msg.mentions_user_ids, set([user_profile.id]))
def test_mention_shortname(self):
sender_user_profile = get_user_profile_by_email("othello@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "@**hamlet**"
self.assertEqual(msg.render_markdown(content),
'<p><span class="user-mention" data-user-email="hamlet@zulip.com">@King Hamlet</span></p>')
self.assertEqual(msg.mentions_user_ids, set([user_profile.id]))
def test_mention_multiple(self):
sender_user_profile = get_user_profile_by_email("othello@zulip.com")
hamlet = get_user_profile_by_email("hamlet@zulip.com")
cordelia = get_user_profile_by_email("cordelia@zulip.com")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "@**King Hamlet** and @**cordelia**, check this out"
self.assertEqual(msg.render_markdown(content),
'<p>'
'<span class="user-mention" '
'data-user-email="hamlet@zulip.com">@King Hamlet</span> and '
'<span class="user-mention" '
'data-user-email="cordelia@zulip.com">@Cordelia Lear</span>, '
'check this out</p>')
self.assertEqual(msg.mentions_user_ids, set([hamlet.id, cordelia.id]))
def test_mention_invalid(self):
sender_user_profile = get_user_profile_by_email("othello@zulip.com")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "Hey @**Nonexistent User**"
self.assertEqual(msg.render_markdown(content),
'<p>Hey @<strong>Nonexistent User</strong></p>')
self.assertEqual(msg.mentions_user_ids, set())
def test_stream_subscribe_button_simple(self):
msg = '!_stream_subscribe_button(simple)'
converted = bugdown_convert(msg)
self.assertEqual(
converted,
'<p>'
'<span class="inline-subscribe" data-stream-name="simple">'
'<button class="inline-subscribe-button btn">Subscribe to simple</button>'
'<span class="inline-subscribe-error"></span>'
'</span>'
'</p>'
)
def test_stream_subscribe_button_in_name(self):
msg = '!_stream_subscribe_button(simple (not\\))'
converted = bugdown_convert(msg)
self.assertEqual(
converted,
'<p>'
'<span class="inline-subscribe" data-stream-name="simple (not)">'
'<button class="inline-subscribe-button btn">Subscribe to simple (not)</button>'
'<span class="inline-subscribe-error"></span>'
'</span>'
'</p>'
)
def test_stream_subscribe_button_after_name(self):
msg = '!_stream_subscribe_button(simple) (not)'
converted = bugdown_convert(msg)
self.assertEqual(
converted,
'<p>'
'<span class="inline-subscribe" data-stream-name="simple">'
'<button class="inline-subscribe-button btn">Subscribe to simple</button>'
'<span class="inline-subscribe-error"></span>'
'</span>'
' (not)</p>'
)
def test_stream_subscribe_button_slash(self):
msg = '!_stream_subscribe_button(simple\\\\)'
converted = bugdown_convert(msg)
self.assertEqual(
converted,
'<p>'
'<span class="inline-subscribe" data-stream-name="simple\\">'
'<button class="inline-subscribe-button btn">Subscribe to simple\\</button>'
'<span class="inline-subscribe-error"></span>'
'</span>'
'</p>'
)
def test_in_app_modal_link(self):
msg = '!modal_link(#settings, Settings page)'
converted = bugdown_convert(msg)
self.assertEqual(
converted,
'<p>'
'<a data-toggle="modal" href="#settings" title="#settings">Settings page</a>'
'</p>'
)
def test_mit_rendering(self):
"""Test the markdown configs for the MIT Zephyr mirroring system;
verifies almost all inline patterns are disabled, but
inline_interesting_links is still enabled"""
msg = "**test**"
converted = bugdown.convert(msg, "zephyr_mirror")
self.assertEqual(
converted,
"<p>**test**</p>",
)
msg = "* test"
converted = bugdown.convert(msg, "zephyr_mirror")
self.assertEqual(
converted,
"<p>* test</p>",
)
msg = "https://lists.debian.org/debian-ctte/2014/02/msg00173.html"
converted = bugdown.convert(msg, "zephyr_mirror")
self.assertEqual(
converted,
'<p><a href="https://lists.debian.org/debian-ctte/2014/02/msg00173.html" target="_blank" title="https://lists.debian.org/debian-ctte/2014/02/msg00173.html">https://lists.debian.org/debian-ctte/2014/02/msg00173.html</a></p>',
)
class BugdownApiTests(ZulipTestCase):
def test_render_message_api(self):
# type: () -> None
content = 'That is a **bold** statement'
result = self.client_get(
'/api/v1/messages/render',
dict(content=content),
**self.api_auth('othello@zulip.com')
)
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data['rendered'],
u'<p>That is a <strong>bold</strong> statement</p>')
class BugdownErrorTests(ZulipTestCase):
def test_bugdown_error_handling(self):
# type: () -> None
with self.simulated_markdown_failure():
with self.assertRaises(bugdown.BugdownRenderingException):
bugdown.convert('', 'zulip.com')
def test_send_message_errors(self):
# type: () -> None
message = 'whatever'
with self.simulated_markdown_failure():
# We don't use assertRaisesRegexp because it seems to not
# handle i18n properly here on some systems.
with self.assertRaises(JsonableError):
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, message) | unknown | codeparrot/codeparrot-clean | ||
'''
Created on 10/11/2009
@author: Fernando
Copyright 2009-2013 Fernando J. V. da Silva
This file is part of centering_py.
centering_py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
centering_py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with centering_py. If not, see <http://www.gnu.org/licenses/>.
'''
from corpus.Word import *
from anaphor_resolution.Centering_Algorithm import *
class Centering_Element:
'''
This class contains several attributes and methods
usefull for many Centering Theory related objects
'''
def __init__(self):
'''The linked_objects attribute may store objects which are linked by utility '''
self.linked_objects = {}
self.anaphors = []
self.anaphor = None
self.referent_list = []
self.centeringSets = [] # Set of possible Cf's and Cb's for this sentence
self.centeringSet = None
class Un(Centering_Element):
'''
Represents a sentence for centering viewpoint
'''
def __init__(self):
'''
Constructor
'''
Centering_Element.__init__(self)
self.re_set = []
self.index = 0
def addCenteringSet(self, cb, cf, anaphor=None):
cs = CenteringSet(cb, cf, anaphor)
if anaphor != None:
anaphor.centeringSets.append(cs)
else:
self.centeringSets.append(cs)
class CenteringSet(Centering_Element):
'''
Represents a possible Forward Looking Center (Cf) set and a possible Backward Looking Center (Cb)
'''
def __init__(self, cb=None, cf=None, anaphor=None):
Centering_Element.__init__(self)
# RE's whose represents the Referential Expressions for this possible Cf set
if cf != None:
self.Cf = cf
else:
self.Cf = []
# Word which represent the Referential Expression for this possible Cb
self.Cb = cb
# Type of center transition, assuming this Cf
self.transition_type = None
# Sets the anaphor which "owns" this centeringSet
self.anaphor = anaphor
self.referent_list = []
if cb != None and anaphor != None:
self.referent_list.append(cb)
# Mark the filter used by BFP algorithm
self.filtered = None
def transition_asString(self):
if self.transition_type == Centering_Algorithm.CONTINUING:
return 'CONTINUING'
if self.transition_type == Centering_Algorithm.RETAINING:
return 'RETAINING'
if self.transition_type == Centering_Algorithm.SMOOTH_SHIFT:
return 'SMOOTH-SHIFT'
if self.transition_type == Centering_Algorithm.SHIFT:
return 'SHIFT'
def asString(self):
result = '\t\tCf = {'
for cf in self.Cf:
if type(cf) is dict:
result = result + cf['anaphor'].word.properties['text']
if cf['referent'] != None:
result = result + '=' + cf['referent'].word.properties['text']
result += ', '
else:
#result = result + cf.word.properties['text'] + ', '
result = result + cf.referents_asString() + ', '
result = result + '}\n'
if self.Cb != None:
result = result + '\t\tCb = ' + self.Cb.word.properties['text'] + '\n'
else:
result = result + '\t\tCb = None\n'
result = result + '\t\tTransition = '+ str(self.transition_asString()) + '\n'
return result
def referents_asString(self):
result = self.anaphor.word.properties['text']
if len(self.referent_list) > 0:
for rf in self.referent_list:
result = result + '=' + rf.referents_asString()
return result
class RE(Centering_Element):
'''
Represents an Referring Expression (an word that is a pronoun or a noun phrase
or a proper name)
'''
def __init__(self, word=None, re=None):
Centering_Element.__init__(self)
# Word which represent this RE
if word != None:
self.word = word
elif re != None:
self.word = re.word
else:
self.word = None
self.marked = False
# rank information used by SH_Order algorithm
self.inf_status = -1
''' Represents an utterance index in the following form:
1001 for the first entity on the first utterance
2010 for the 10th entity on the seconde utterance
10003 for the 3rd entity on the 10th utterance '''
self.utt = -1
def referents_asString(self):
if self.word.properties['tag'] == Word.PRON and\
self.centeringSet != None:
return self.centeringSet.referents_asString()
else:
if self.referent_list != None and len(self.referent_list) > 0:
result = self.word.properties['text'] + '='
for ref in self.referent_list:
result = result + ref.referents_asString()
return result
else:
return self.word.properties['text']
def get_entity_referent(self):
if len(self.referent_list) == 0 or self.referent_list[0] == None:
return None
elif self.referent_list[0].word.properties['tag'] in [Word.PROP, Word.N]:
return self.referent_list[0]
else:
return self.referent_list[0].get_entity_referent()
def __getitem__(self, key):
if key == 'rank':
return self.word['rank']
if key == 'utt':
return self.utt
if key == 'word_id':
return self.word.properties['id']
def has_key(self, key):
return key in ['rank', 'utt', 'word_id']
@staticmethod
def word_set_to_re_set(word_set):
re_set = []
for word in word_set:
re_set.append(RE(word))
return re_set
@staticmethod
def re_set_clone(re_set):
cp_re_set = []
for re in re_set:
cp_re_set.append(RE(None, re))
return cp_re_set | unknown | codeparrot/codeparrot-clean | ||
"""
Django-environ allows you to utilize 12factor inspired environment
variables to configure your Django application.
"""
import json
import logging
import os
import re
import sys
import warnings
try:
from django.core.exceptions import ImproperlyConfigured
except ImportError:
class ImproperlyConfigured(Exception):
pass
from six.moves import urllib
from six import string_types
logger = logging.getLogger(__name__)
VERSION = '0.4.1'
__author__ = 'joke2k'
__version__ = tuple(VERSION.split('.'))
# return int if possible
def _cast_int(v):
return int(v) if hasattr(v, 'isdigit') and v.isdigit() else v
def _cast_urlstr(v):
return urllib.parse.unquote_plus(v) if isinstance(v, str) else v
# back compatibility with redis_cache package
DJANGO_REDIS_DRIVER = 'django_redis.cache.RedisCache'
DJANGO_REDIS_CACHE_DRIVER = 'redis_cache.RedisCache'
REDIS_DRIVER = DJANGO_REDIS_DRIVER
try:
import redis_cache
REDIS_DRIVER = DJANGO_REDIS_CACHE_DRIVER
except:
pass
class NoValue(object):
def __repr__(self):
return '<{0}>'.format(self.__class__.__name__)
class Env(object):
"""Provide scheme-based lookups of environment variables so that each
caller doesn't have to pass in `cast` and `default` parameters.
Usage:::
env = Env(MAIL_ENABLED=bool, SMTP_LOGIN=(str, 'DEFAULT'))
if env('MAIL_ENABLED'):
...
"""
ENVIRON = os.environ
NOTSET = NoValue()
BOOLEAN_TRUE_STRINGS = ('true', 'on', 'ok', 'y', 'yes', '1')
URL_CLASS = urllib.parse.ParseResult
DEFAULT_DATABASE_ENV = 'DATABASE_URL'
DB_SCHEMES = {
'postgres': 'django.db.backends.postgresql_psycopg2',
'postgresql': 'django.db.backends.postgresql_psycopg2',
'psql': 'django.db.backends.postgresql_psycopg2',
'pgsql': 'django.db.backends.postgresql_psycopg2',
'postgis': 'django.contrib.gis.db.backends.postgis',
'mysql': 'django.db.backends.mysql',
'mysql2': 'django.db.backends.mysql',
'mysqlgis': 'django.contrib.gis.db.backends.mysql',
'oracle': 'django.db.backends.oracle',
'spatialite': 'django.contrib.gis.db.backends.spatialite',
'sqlite': 'django.db.backends.sqlite3',
'ldap': 'ldapdb.backends.ldap',
}
_DB_BASE_OPTIONS = ['CONN_MAX_AGE', 'ATOMIC_REQUESTS', 'AUTOCOMMIT']
DEFAULT_CACHE_ENV = 'CACHE_URL'
CACHE_SCHEMES = {
'dbcache': 'django.core.cache.backends.db.DatabaseCache',
'dummycache': 'django.core.cache.backends.dummy.DummyCache',
'filecache': 'django.core.cache.backends.filebased.FileBasedCache',
'locmemcache': 'django.core.cache.backends.locmem.LocMemCache',
'memcache': 'django.core.cache.backends.memcached.MemcachedCache',
'pymemcache': 'django.core.cache.backends.memcached.PyLibMCCache',
'rediscache': REDIS_DRIVER,
'redis': REDIS_DRIVER,
}
_CACHE_BASE_OPTIONS = ['TIMEOUT', 'KEY_PREFIX', 'VERSION', 'KEY_FUNCTION', 'BINARY']
DEFAULT_EMAIL_ENV = 'EMAIL_URL'
EMAIL_SCHEMES = {
'smtp': 'django.core.mail.backends.smtp.EmailBackend',
'smtps': 'django.core.mail.backends.smtp.EmailBackend',
'smtp+tls': 'django.core.mail.backends.smtp.EmailBackend',
'smtp+ssl': 'django.core.mail.backends.smtp.EmailBackend',
'consolemail': 'django.core.mail.backends.console.EmailBackend',
'filemail': 'django.core.mail.backends.filebased.EmailBackend',
'memorymail': 'django.core.mail.backends.locmem.EmailBackend',
'dummymail': 'django.core.mail.backends.dummy.EmailBackend'
}
_EMAIL_BASE_OPTIONS = ['EMAIL_USE_TLS', 'EMAIL_USE_SSL']
DEFAULT_SEARCH_ENV = 'SEARCH_URL'
SEARCH_SCHEMES = {
"elasticsearch": "haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine",
"solr": "haystack.backends.solr_backend.SolrEngine",
"whoosh": "haystack.backends.whoosh_backend.WhooshEngine",
"xapian": "haystack.backends.xapian_backend.XapianEngine",
"simple": "haystack.backends.simple_backend.SimpleEngine",
}
def __init__(self, **scheme):
self.scheme = scheme
def __call__(self, var, cast=None, default=NOTSET, parse_default=False):
return self.get_value(var, cast=cast, default=default, parse_default=parse_default)
# Shortcuts
def str(self, var, default=NOTSET):
"""
:rtype: str
"""
return self.get_value(var, default=default)
def unicode(self, var, default=NOTSET):
"""Helper for python2
:rtype: unicode
"""
return self.get_value(var, cast=str, default=default)
def bool(self, var, default=NOTSET):
"""
:rtype: bool
"""
return self.get_value(var, cast=bool, default=default)
def int(self, var, default=NOTSET):
"""
:rtype: int
"""
return self.get_value(var, cast=int, default=default)
def float(self, var, default=NOTSET):
"""
:rtype: float
"""
return self.get_value(var, cast=float, default=default)
def json(self, var, default=NOTSET):
"""
:returns: Json parsed
"""
return self.get_value(var, cast=json.loads, default=default)
def list(self, var, cast=None, default=NOTSET):
"""
:rtype: list
"""
return self.get_value(var, cast=list if not cast else [cast], default=default)
def tuple(self, var, cast=None, default=NOTSET):
"""
:rtype: tuple
"""
return self.get_value(var, cast=tuple if not cast else (cast,), default=default)
def dict(self, var, cast=dict, default=NOTSET):
"""
:rtype: dict
"""
return self.get_value(var, cast=cast, default=default)
def url(self, var, default=NOTSET):
"""
:rtype: urlparse.ParseResult
"""
return self.get_value(var, cast=urllib.parse.urlparse, default=default, parse_default=True)
def db_url(self, var=DEFAULT_DATABASE_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to DATABASE_URL.
:rtype: dict
"""
return self.db_url_config(self.get_value(var, default=default), engine=engine)
db = db_url
def cache_url(self, var=DEFAULT_CACHE_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to CACHE_URL.
:rtype: dict
"""
return self.cache_url_config(self.url(var, default=default), backend=backend)
cache = cache_url
def email_url(self, var=DEFAULT_EMAIL_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to EMAIL_URL.
:rtype: dict
"""
return self.email_url_config(self.url(var, default=default), backend=backend)
email = email_url
def search_url(self, var=DEFAULT_SEARCH_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to SEARCH_URL.
:rtype: dict
"""
return self.search_url_config(self.url(var, default=default), engine=engine)
def path(self, var, default=NOTSET, **kwargs):
"""
:rtype: Path
"""
return Path(self.get_value(var, default=default), **kwargs)
def get_value(self, var, cast=None, default=NOTSET, parse_default=False):
"""Return value for given environment variable.
:param var: Name of variable.
:param cast: Type to cast return value as.
:param default: If var not present in environ, return this instead.
:param parse_default: force to parse default..
:returns: Value from environment or default (if set)
"""
logger.debug("get '{0}' casted as '{1}' with default '{2}'".format(
var, cast, default
))
if var in self.scheme:
var_info = self.scheme[var]
try:
has_default = len(var_info) == 2
except TypeError:
has_default = False
if has_default:
if not cast:
cast = var_info[0]
if default is self.NOTSET:
try:
default = var_info[1]
except IndexError:
pass
else:
if not cast:
cast = var_info
try:
value = self.ENVIRON[var]
except KeyError:
if default is self.NOTSET:
error_msg = "Set the {0} environment variable".format(var)
raise ImproperlyConfigured(error_msg)
value = default
# Resolve any proxied values
if hasattr(value, 'startswith') and value.startswith('$'):
value = value.lstrip('$')
value = self.get_value(value, cast=cast, default=default)
if value != default or (parse_default and value):
value = self.parse_value(value, cast)
return value
# Class and static methods
@classmethod
def parse_value(cls, value, cast):
"""Parse and cast provided value
:param value: Stringed value.
:param cast: Type to cast return value as.
:returns: Casted value
"""
if cast is None:
return value
elif cast is bool:
try:
value = int(value) != 0
except ValueError:
value = value.lower() in cls.BOOLEAN_TRUE_STRINGS
elif isinstance(cast, list):
value = list(map(cast[0], [x for x in value.split(',') if x]))
elif isinstance(cast, tuple):
val = value.strip('(').strip(')').split(',')
value = tuple(map(cast[0], [x for x in val if x]))
elif isinstance(cast, dict):
key_cast = cast.get('key', str)
value_cast = cast.get('value', str)
value_cast_by_key = cast.get('cast', dict())
value = dict(map(
lambda kv: (
key_cast(kv[0]),
cls.parse_value(kv[1], value_cast_by_key.get(kv[0], value_cast))
),
[val.split('=') for val in value.split(';') if val]
))
elif cast is dict:
value = dict([val.split('=') for val in value.split(',') if val])
elif cast is list:
value = [x for x in value.split(',') if x]
elif cast is tuple:
val = value.strip('(').strip(')').split(',')
value = tuple([x for x in val if x])
elif cast is float:
# clean string
float_str = re.sub(r'[^\d,\.]', '', value)
# split for avoid thousand separator and different locale comma/dot symbol
parts = re.split(r'[,\.]', float_str)
if len(parts) == 1:
float_str = parts[0]
else:
float_str = "{0}.{1}".format(''.join(parts[0:-1]), parts[-1])
value = float(float_str)
else:
value = cast(value)
return value
@classmethod
def db_url_config(cls, url, engine=None):
"""Pulled from DJ-Database-URL, parse an arbitrary Database URL.
Support currently exists for PostgreSQL, PostGIS, MySQL, Oracle and SQLite.
SQLite connects to file based databases. The same URL format is used, omitting the hostname,
and using the "file" portion as the filename of the database.
This has the effect of four slashes being present for an absolute file path:
>>> from environ import Env
>>> Env.db_url_config('sqlite:////full/path/to/your/file.sqlite')
{'ENGINE': 'django.db.backends.sqlite3', 'HOST': '', 'NAME': '/full/path/to/your/file.sqlite', 'PASSWORD': '', 'PORT': '', 'USER': ''}
>>> Env.db_url_config('postgres://uf07k1i6d8ia0v:wegauwhgeuioweg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722r2kuvn')
{'ENGINE': 'django.db.backends.postgresql_psycopg2', 'HOST': 'ec2-107-21-253-135.compute-1.amazonaws.com', 'NAME': 'd8r82722r2kuvn', 'PASSWORD': 'wegauwhgeuioweg', 'PORT': 5431, 'USER': 'uf07k1i6d8ia0v'}
"""
if not isinstance(url, cls.URL_CLASS):
if url == 'sqlite://:memory:':
# this is a special case, because if we pass this URL into
# urlparse, urlparse will choke trying to interpret "memory"
# as a port number
return {
'ENGINE': cls.DB_SCHEMES['sqlite'],
'NAME': ':memory:'
}
# note: no other settings are required for sqlite
url = urllib.parse.urlparse(url)
config = {}
# Remove query strings.
path = url.path[1:]
path = urllib.parse.unquote_plus(path.split('?', 2)[0])
# if we are using sqlite and we have no path, then assume we
# want an in-memory database (this is the behaviour of sqlalchemy)
if url.scheme == 'sqlite' and path == '':
path = ':memory:'
if url.scheme == 'ldap':
path = '{scheme}://{hostname}'.format(scheme=url.scheme, hostname=url.hostname)
if url.port:
path += ':{port}'.format(port=url.port)
# Update with environment configuration.
config.update({
'NAME': path or '',
'USER': _cast_urlstr(url.username) or '',
'PASSWORD': _cast_urlstr(url.password) or '',
'HOST': url.hostname or '',
'PORT': _cast_int(url.port) or '',
})
if url.scheme == 'oracle' and path == '':
config['NAME'] = config['HOST']
config['HOST'] = ''
if url.scheme == 'oracle':
# Django oracle/base.py strips port and fails on non-string value
if not config['PORT']:
del(config['PORT'])
else:
config['PORT'] = str(config['PORT'])
if url.query:
config_options = {}
for k, v in urllib.parse.parse_qs(url.query).items():
if k.upper() in cls._DB_BASE_OPTIONS:
config.update({k.upper(): _cast_int(v[0])})
else:
config_options.update({k: _cast_int(v[0])})
config['OPTIONS'] = config_options
if engine:
config['ENGINE'] = engine
if url.scheme in Env.DB_SCHEMES:
config['ENGINE'] = Env.DB_SCHEMES[url.scheme]
if not config.get('ENGINE', False):
warnings.warn("Engine not recognized from url: {0}".format(config))
return {}
return config
@classmethod
def cache_url_config(cls, url, backend=None):
"""Pulled from DJ-Cache-URL, parse an arbitrary Cache URL.
:param url:
:param backend:
:return:
"""
url = urllib.parse.urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
location = url.netloc.split(',')
if len(location) == 1:
location = location[0]
config = {
'BACKEND': cls.CACHE_SCHEMES[url.scheme],
'LOCATION': location,
}
# Add the drive to LOCATION
if url.scheme == 'filecache':
config.update({
'LOCATION': url.netloc + url.path,
})
if url.path and url.scheme in ['memcache', 'pymemcache']:
config.update({
'LOCATION': 'unix:' + url.path,
})
elif url.scheme.startswith('redis'):
if url.hostname:
scheme = url.scheme.replace('cache', '')
else:
scheme = 'unix'
config['LOCATION'] = scheme + '://' + url.netloc + url.path
if url.query:
config_options = {}
for k, v in urllib.parse.parse_qs(url.query).items():
opt = {k.upper(): _cast_int(v[0])}
if k.upper() in cls._CACHE_BASE_OPTIONS:
config.update(opt)
else:
config_options.update(opt)
config['OPTIONS'] = config_options
if backend:
config['BACKEND'] = backend
return config
@classmethod
def email_url_config(cls, url, backend=None):
"""Parses an email URL."""
config = {}
url = urllib.parse.urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings
path = url.path[1:]
path = urllib.parse.unquote_plus(path.split('?', 2)[0])
# Update with environment configuration
config.update({
'EMAIL_FILE_PATH': path,
'EMAIL_HOST_USER': _cast_urlstr(url.username),
'EMAIL_HOST_PASSWORD': _cast_urlstr(url.password),
'EMAIL_HOST': url.hostname,
'EMAIL_PORT': _cast_int(url.port),
})
if backend:
config['EMAIL_BACKEND'] = backend
elif url.scheme not in cls.EMAIL_SCHEMES:
raise ImproperlyConfigured('Invalid email schema %s' % url.scheme)
elif url.scheme in cls.EMAIL_SCHEMES:
config['EMAIL_BACKEND'] = cls.EMAIL_SCHEMES[url.scheme]
if url.scheme in ('smtps', 'smtp+tls'):
config['EMAIL_USE_TLS'] = True
elif url.scheme == 'smtp+ssl':
config['EMAIL_USE_SSL'] = True
if url.query:
config_options = {}
for k, v in urllib.parse.parse_qs(url.query).items():
opt = {k.upper(): _cast_int(v[0])}
if k.upper() in cls._EMAIL_BASE_OPTIONS:
config.update(opt)
else:
config_options.update(opt)
config['OPTIONS'] = config_options
return config
@classmethod
def search_url_config(cls, url, engine=None):
config = {}
url = urllib.parse.urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings.
path = url.path[1:]
path = urllib.parse.unquote_plus(path.split('?', 2)[0])
if url.scheme not in cls.SEARCH_SCHEMES:
raise ImproperlyConfigured('Invalid search schema %s' % url.scheme)
config["ENGINE"] = cls.SEARCH_SCHEMES[url.scheme]
# check commons params
params = {}
if url.query:
params = urllib.parse.parse_qs(url.query)
if 'EXCLUDED_INDEXES' in params.keys():
config['EXCLUDED_INDEXES'] = params['EXCLUDED_INDEXES'][0].split(',')
if 'INCLUDE_SPELLING' in params.keys():
config['INCLUDE_SPELLING'] = cls.parse_value(params['INCLUDE_SPELLING'][0], bool)
if 'BATCH_SIZE' in params.keys():
config['BATCH_SIZE'] = cls.parse_value(params['BATCH_SIZE'][0], int)
if url.scheme == 'simple':
return config
elif url.scheme in ['solr', 'elasticsearch']:
if 'KWARGS' in params.keys():
config['KWARGS'] = params['KWARGS'][0]
# remove trailing slash
if path.endswith("/"):
path = path[:-1]
if url.scheme == 'solr':
config['URL'] = urllib.parse.urlunparse(('http',) + url[1:2] + (path,) + ('', '', ''))
if 'TIMEOUT' in params.keys():
config['TIMEOUT'] = cls.parse_value(params['TIMEOUT'][0], int)
return config
if url.scheme == 'elasticsearch':
split = path.rsplit("/", 1)
if len(split) > 1:
path = "/".join(split[:-1])
index = split[-1]
else:
path = ""
index = split[0]
config['URL'] = urllib.parse.urlunparse(('http',) + url[1:2] + (path,) + ('', '', ''))
if 'TIMEOUT' in params.keys():
config['TIMEOUT'] = cls.parse_value(params['TIMEOUT'][0], int)
config['INDEX_NAME'] = index
return config
config['PATH'] = '/' + path
if url.scheme == 'whoosh':
if 'STORAGE' in params.keys():
config['STORAGE'] = params['STORAGE'][0]
if 'POST_LIMIT' in params.keys():
config['POST_LIMIT'] = cls.parse_value(params['POST_LIMIT'][0], int)
elif url.scheme == 'xapian':
if 'FLAGS' in params.keys():
config['FLAGS'] = params['FLAGS'][0]
if engine:
config['ENGINE'] = engine
return config
@classmethod
def read_env(cls, env_file=None, **overrides):
"""Read a .env file into os.environ.
If not given a path to a dotenv path, does filthy magic stack backtracking
to find manage.py and then find the dotenv.
http://www.wellfireinteractive.com/blog/easier-12-factor-django/
https://gist.github.com/bennylope/2999704
"""
if env_file is None:
frame = sys._getframe()
env_file = os.path.join(os.path.dirname(frame.f_back.f_code.co_filename), '.env')
if not os.path.exists(env_file):
warnings.warn(
"%s doesn't exist - if you're not configuring your "
"environment separately, create one." % env_file)
return
try:
with open(env_file) if isinstance(env_file, string_types) else env_file as f:
content = f.read()
except IOError:
warnings.warn(
"Error reading %s - if you're not configuring your "
"environment separately, check this." % env_file)
return
logger.debug('Read environment variables from: {0}'.format(env_file))
for line in content.splitlines():
m1 = re.match(r'\A([A-Za-z_0-9]+)=(.*)\Z', line)
if m1:
key, val = m1.group(1), m1.group(2)
m2 = re.match(r"\A'(.*)'\Z", val)
if m2:
val = m2.group(1)
m3 = re.match(r'\A"(.*)"\Z', val)
if m3:
val = re.sub(r'\\(.)', r'\1', m3.group(1))
cls.ENVIRON.setdefault(key, str(val))
# set defaults
for key, value in overrides.items():
cls.ENVIRON.setdefault(key, value)
class Path(object):
"""Inspired to Django Two-scoops, handling File Paths in Settings.
>>> from environ import Path
>>> root = Path('/home')
>>> root, root(), root('dev')
(<Path:/home>, '/home', '/home/dev')
>>> root == Path('/home')
True
>>> root in Path('/'), root not in Path('/other/path')
(True, True)
>>> root('dev', 'not_existing_dir', required=True)
Traceback (most recent call last):
environ.environ.ImproperlyConfigured: Create required path: /home/not_existing_dir
>>> public = root.path('public')
>>> public, public.root, public('styles')
(<Path:/home/public>, '/home/public', '/home/public/styles')
>>> assets, scripts = public.path('assets'), public.path('assets', 'scripts')
>>> assets.root, scripts.root
('/home/public/assets', '/home/public/assets/scripts')
>>> assets + 'styles', str(assets + 'styles'), ~assets
(<Path:/home/public/assets/styles>, '/home/public/assets/styles', <Path:/home/public>)
"""
def path(self, *paths, **kwargs):
"""Create new Path based on self.root and provided paths.
:param paths: List of sub paths
:param kwargs: required=False
:rtype: Path
"""
return self.__class__(self.__root__, *paths, **kwargs)
def file(self, name, *args, **kwargs):
"""Open a file.
:param name: Filename appended to self.root
:param args: passed to open()
:param kwargs: passed to open()
:rtype: file
"""
return open(self(name), *args, **kwargs)
@property
def root(self):
"""Current directory for this Path"""
return self.__root__
def __init__(self, start='', *paths, **kwargs):
super(Path, self).__init__()
if kwargs.get('is_file', False):
start = os.path.dirname(start)
self.__root__ = self._absolute_join(start, *paths, **kwargs)
def __call__(self, *paths, **kwargs):
"""Retrieve the absolute path, with appended paths
:param paths: List of sub path of self.root
:param kwargs: required=False
"""
return self._absolute_join(self.__root__, *paths, **kwargs)
def __eq__(self, other):
return self.__root__ == other.__root__
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
return Path(self.__root__, other if not isinstance(other, Path) else other.__root__)
def __sub__(self, other):
if isinstance(other, int):
return self.path('../' * other)
elif isinstance(other, string_types):
return Path(self.__root__.rstrip(other))
raise TypeError(
"unsupported operand type(s) for -: '{0}' and '{1}'".format(self, type(other)))
def __invert__(self):
return self.path('..')
def __contains__(self, item):
base_path = self.__root__
if len(base_path) > 1:
base_path = os.path.join(base_path, '')
return item.__root__.startswith(base_path)
def __repr__(self):
return "<Path:{0}>".format(self.__root__)
def __str__(self):
return self.__root__
def __unicode__(self):
return self.__str__()
def __getitem__(self, *args, **kwargs):
return self.__str__().__getitem__(*args, **kwargs)
def rfind(self, *args, **kwargs):
return self.__str__().rfind(*args, **kwargs)
def find(self, *args, **kwargs):
return self.__str__().find(*args, **kwargs)
@staticmethod
def _absolute_join(base, *paths, **kwargs):
absolute_path = os.path.abspath(os.path.join(base, *paths))
if kwargs.get('required', False) and not os.path.exists(absolute_path):
raise ImproperlyConfigured(
"Create required path: {0}".format(absolute_path))
return absolute_path
def register_scheme(scheme):
for method in dir(urllib.parse):
if method.startswith('uses_'):
getattr(urllib.parse, method).append(scheme)
def register_schemes(schemes):
for scheme in schemes:
register_scheme(scheme)
# Register database and cache schemes in URLs.
register_schemes(Env.DB_SCHEMES.keys())
register_schemes(Env.CACHE_SCHEMES.keys())
register_schemes(Env.SEARCH_SCHEMES.keys())
register_schemes(Env.EMAIL_SCHEMES.keys()) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The testing Environment class.
It holds the WebsiteTest instances, provides them with credentials,
provides clean browser environment, runs the tests, and gathers the
results.
"""
import os
import shutil
import time
from xml.etree import ElementTree
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
# Message strings to look for in chrome://password-manager-internals.
MESSAGE_ASK = "Message: Decision: ASK the user"
MESSAGE_SAVE = "Message: Decision: SAVE the password"
INTERNALS_PAGE_URL = "chrome://password-manager-internals/"
class Environment:
"""Sets up the testing Environment. """
def __init__(self, chrome_path, chromedriver_path, profile_path,
passwords_path, enable_automatic_password_saving):
"""Creates a new testing Environment, starts Chromedriver.
Args:
chrome_path: The chrome binary file.
chromedriver_path: The chromedriver binary file.
profile_path: The chrome testing profile folder.
passwords_path: The usernames and passwords file.
enable_automatic_password_saving: If True, the passwords are going to be
saved without showing the prompt.
Raises:
IOError: When the passwords file cannot be accessed.
ParseError: When the passwords file cannot be parsed.
Exception: An exception is raised if |profile_path| folder could not be
removed.
"""
# Cleaning the chrome testing profile folder.
if os.path.exists(profile_path):
shutil.rmtree(profile_path)
options = Options()
if enable_automatic_password_saving:
options.add_argument("enable-automatic-password-saving")
# TODO(vabr): show_prompt is used in WebsiteTest for asserting that
# Chrome set-up corresponds to the test type. Remove that knowledge
# about Environment from the WebsiteTest.
self.show_prompt = not enable_automatic_password_saving
options.binary_location = chrome_path
options.add_argument("user-data-dir=%s" % profile_path)
# The webdriver. It's possible to choose the port the service is going to
# run on. If it's left to 0, a free port will be found.
self.driver = webdriver.Chrome(chromedriver_path, 0, options)
# Password internals page tab/window handle.
self.internals_window = self.driver.current_window_handle
# An xml tree filled with logins and passwords.
self.passwords_tree = ElementTree.parse(passwords_path).getroot()
self.website_window = self._OpenNewTab()
self.websitetests = []
# Map messages to the number of their appearance in the log.
self.message_count = { MESSAGE_ASK: 0, MESSAGE_SAVE: 0 }
# A list of (test_name, test_type, test_success, failure_log).
self.tests_results = []
def AddWebsiteTest(self, websitetest):
"""Adds a WebsiteTest to the testing Environment.
TODO(vabr): Currently, this is only called at most once for each
Environment instance. That is because to run all tests efficiently in
parallel, each test gets its own process spawned (outside of Python).
That makes sense, but then we should flatten the hierarchy of calls
and consider making the 1:1 relation of environment to tests more
explicit.
Args:
websitetest: The WebsiteTest instance to be added.
"""
websitetest.environment = self
# TODO(vabr): Make driver a property of WebsiteTest.
websitetest.driver = self.driver
if not websitetest.username:
username_tag = (self.passwords_tree.find(
".//*[@name='%s']/username" % websitetest.name))
websitetest.username = username_tag.text
if not websitetest.password:
password_tag = (self.passwords_tree.find(
".//*[@name='%s']/password" % websitetest.name))
websitetest.password = password_tag.text
self.websitetests.append(websitetest)
def _ClearBrowserDataInit(self):
"""Opens and resets the chrome://settings/clearBrowserData dialog.
It unchecks all checkboxes, and sets the time range to the "beginning of
time".
"""
self.driver.get("chrome://settings-frame/clearBrowserData")
time_range_selector = "#clear-browser-data-time-period"
# TODO(vabr): Wait until time_range_selector is displayed instead.
time.sleep(2)
set_time_range = (
"var range = document.querySelector('{0}');".format(
time_range_selector) +
"range.value = 4" # 4 == the beginning of time
)
self.driver.execute_script(set_time_range)
all_cboxes_selector = (
"#clear-data-checkboxes [type=\"checkbox\"]")
uncheck_all = (
"var checkboxes = document.querySelectorAll('{0}');".format(
all_cboxes_selector ) +
"for (var i = 0; i < checkboxes.length; ++i) {"
" checkboxes[i].checked = false;"
"}"
)
self.driver.execute_script(uncheck_all)
def _ClearDataForCheckbox(self, selector):
"""Causes the data associated with |selector| to be cleared.
Opens chrome://settings/clearBrowserData, unchecks all checkboxes, then
checks the one described by |selector|, then clears the corresponding
browsing data for the full time range.
Args:
selector: describes the checkbox through which to delete the data.
"""
self._ClearBrowserDataInit()
check_cookies_and_submit = (
"document.querySelector('{0}').checked = true;".format(selector) +
"document.querySelector('#clear-browser-data-commit').click();"
)
self.driver.execute_script(check_cookies_and_submit)
def _EnablePasswordSaving(self):
"""Make sure that password manager is enabled."""
# TODO(melandory): We should check why it's off in a first place.
# TODO(melandory): Investigate, maybe there is no need to enable it that
# often.
self.driver.get("chrome://settings-frame")
script = "document.getElementById('advanced-settings-expander').click();"
self.driver.execute_script(script)
# TODO(vabr): Wait until element is displayed instead.
time.sleep(2)
script = (
"if (!document.querySelector('#password-manager-enabled').checked) {"
" document.querySelector('#password-manager-enabled').click();"
"}")
self.driver.execute_script(script)
time.sleep(2)
def _OpenNewTab(self):
"""Open a new tab, and loads the internals page in the old tab.
Returns:
A handle to the new tab.
"""
number_old_tabs = len(self.driver.window_handles)
# There is no straightforward way to open a new tab with chromedriver.
# One work-around is to go to a website, insert a link that is going
# to be opened in a new tab, and click on it.
self.driver.get("about:blank")
a = self.driver.execute_script(
"var a = document.createElement('a');"
"a.target = '_blank';"
"a.href = 'about:blank';"
"a.innerHTML = '.';"
"document.body.appendChild(a);"
"return a;")
a.click()
while number_old_tabs == len(self.driver.window_handles):
time.sleep(1) # Wait until the new tab is opened.
new_tab = self.driver.window_handles[-1]
self.driver.get(INTERNALS_PAGE_URL)
self.driver.switch_to_window(new_tab)
return new_tab
def _DidStringAppearUntilTimeout(self, strings, timeout):
"""Checks whether some of |strings| appeared in the current page.
Waits for up to |timeout| seconds until at least one of |strings| is
shown in the current page. Updates self.message_count with the current
number of occurrences of the shown string. Assumes that at most
one of |strings| is newly shown.
Args:
strings: A list of strings to look for.
timeout: If any such string does not appear within the first |timeout|
seconds, it is considered a no-show.
Returns:
True if one of |strings| is observed until |timeout|, False otherwise.
"""
log = self.driver.find_element_by_css_selector("#log-entries")
while timeout:
for string in strings:
count = log.text.count(string)
if count > self.message_count[string]:
self.message_count[string] = count
return True
time.sleep(1)
timeout -= 1
return False
def CheckForNewString(self, strings, string_should_show_up, error):
"""Checks that |strings| show up on the internals page as it should.
Switches to the internals page and looks for a new instances of |strings|
being shown up there. It checks that |string_should_show_up| is true if
and only if at leas one string from |strings| shows up, and throws an
Exception if that check fails.
Args:
strings: A list of strings to look for in the internals page.
string_should_show_up: Whether or not at least one string from |strings|
is expected to be shown.
error: Error message for the exception.
Raises:
Exception: (See above.)
"""
self.driver.switch_to_window(self.internals_window)
try:
if (self._DidStringAppearUntilTimeout(strings, 15) !=
string_should_show_up):
raise Exception(error)
finally:
self.driver.switch_to_window(self.website_window)
def DeleteCookies(self):
"""Deletes cookies via the settings page."""
self._ClearDataForCheckbox("#delete-cookies-checkbox")
def RunTestsOnSites(self, test_case_name):
"""Runs the specified test on the known websites.
Also saves the test results in the environment. Note that test types
differ in their requirements on whether the save password prompt
should be displayed. Make sure that such requirements are consistent
with the enable_automatic_password_saving argument passed to |self|
on construction.
Args:
test_case_name: A test name which is a method of WebsiteTest.
"""
self.DeleteCookies()
self._ClearDataForCheckbox("#delete-passwords-checkbox")
self._EnablePasswordSaving()
for websitetest in self.websitetests:
successful = True
error = ""
try:
# TODO(melandory): Implement a decorator for WesiteTest methods
# which allows to mark them as test cases. And then add a check if
# test_case_name is a valid test case.
getattr(websitetest, test_case_name)()
except Exception as e:
successful = False
# httplib.CannotSendRequest doesn't define a message,
# so type(e).__name__ will at least log exception name as a reason.
# TODO(melandory): logging.exception(e) produces meaningful result
# for httplib.CannotSendRequest, so we can try to propagate information
# that reason is an exception to the logging phase.
error = "Exception %s %s" % (type(e).__name__, e)
self.tests_results.append(
(websitetest.name, test_case_name, successful, error))
def Quit(self):
"""Shuts down the driver."""
self.driver.quit() | unknown | codeparrot/codeparrot-clean | ||
package v0alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
common "k8s.io/kube-openapi/pkg/common"
spec "k8s.io/kube-openapi/pkg/validation/spec"
)
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
return map[string]common.OpenAPIDefinition{
AlertRule{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_AlertRule(ref),
AlertRuleExpression{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_AlertRuleExpression(ref),
AlertRuleIntervalTrigger{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_AlertRuleIntervalTrigger(ref),
AlertRuleList{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_AlertRuleList(ref),
AlertRuleRelativeTimeRange{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_AlertRuleRelativeTimeRange(ref),
AlertRuleSpec{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_AlertRuleSpec(ref),
AlertRuleStatus{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_AlertRuleStatus(ref),
AlertRuleV0alpha1SpecNotificationSettings{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_AlertRuleV0alpha1SpecNotificationSettings(ref),
AlertRuleV0alpha1SpecPanelRef{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_AlertRuleV0alpha1SpecPanelRef(ref),
AlertRulestatusOperatorState{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_AlertRulestatusOperatorState(ref),
RecordingRule{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_RecordingRule(ref),
RecordingRuleExpression{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_RecordingRuleExpression(ref),
RecordingRuleIntervalTrigger{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_RecordingRuleIntervalTrigger(ref),
RecordingRuleList{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_RecordingRuleList(ref),
RecordingRuleRelativeTimeRange{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_RecordingRuleRelativeTimeRange(ref),
RecordingRuleSpec{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_RecordingRuleSpec(ref),
RecordingRuleStatus{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_RecordingRuleStatus(ref),
RecordingRulestatusOperatorState{}.OpenAPIModelName(): schema_pkg_apis_alerting_v0alpha1_RecordingRulestatusOperatorState(ref),
}
}
func schema_pkg_apis_alerting_v0alpha1_AlertRule(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref(metav1.ObjectMeta{}.OpenAPIModelName()),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec is the spec of the AlertRule",
Default: map[string]interface{}{},
Ref: ref(AlertRuleSpec{}.OpenAPIModelName()),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref(AlertRuleStatus{}.OpenAPIModelName()),
},
},
},
Required: []string{"metadata", "spec", "status"},
},
},
Dependencies: []string{
AlertRuleSpec{}.OpenAPIModelName(), AlertRuleStatus{}.OpenAPIModelName(), metav1.ObjectMeta{}.OpenAPIModelName()},
}
}
func schema_pkg_apis_alerting_v0alpha1_AlertRuleExpression(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"queryType": {
SchemaProps: spec.SchemaProps{
Description: "The type of query if this is a query expression",
Type: []string{"string"},
Format: "",
},
},
"relativeTimeRange": {
SchemaProps: spec.SchemaProps{
Ref: ref(AlertRuleRelativeTimeRange{}.OpenAPIModelName()),
},
},
"datasourceUID": {
SchemaProps: spec.SchemaProps{
Description: "The UID of the datasource to run this expression against. If omitted, the expression will be run against the `__expr__` datasource",
Type: []string{"string"},
Format: "",
},
},
"model": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Format: "",
},
},
"source": {
SchemaProps: spec.SchemaProps{
Description: "Used to mark the expression to be used as the final source for the rule evaluation Only one expression in a rule can be marked as the source For AlertRules, this is the expression that will be evaluated against the alerting condition For RecordingRules, this is the expression that will be recorded",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"model"},
},
},
Dependencies: []string{
AlertRuleRelativeTimeRange{}.OpenAPIModelName()},
}
}
func schema_pkg_apis_alerting_v0alpha1_AlertRuleIntervalTrigger(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"interval": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"interval"},
},
},
}
}
func schema_pkg_apis_alerting_v0alpha1_AlertRuleList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref(metav1.ListMeta{}.OpenAPIModelName()),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref(AlertRule{}.OpenAPIModelName()),
},
},
},
},
},
},
Required: []string{"metadata", "items"},
},
},
Dependencies: []string{
AlertRule{}.OpenAPIModelName(), metav1.ListMeta{}.OpenAPIModelName()},
}
}
func schema_pkg_apis_alerting_v0alpha1_AlertRuleRelativeTimeRange(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"from": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"to": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"from", "to"},
},
},
}
}
func schema_pkg_apis_alerting_v0alpha1_AlertRuleSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"title": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"paused": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"trigger": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref(AlertRuleIntervalTrigger{}.OpenAPIModelName()),
},
},
"labels": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"for": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"keepFiringFor": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"missingSeriesEvalsToResolve": {
SchemaProps: spec.SchemaProps{
Type: []string{"integer"},
Format: "int64",
},
},
"noDataState": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"execErrState": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"notificationSettings": {
SchemaProps: spec.SchemaProps{
Ref: ref(AlertRuleV0alpha1SpecNotificationSettings{}.OpenAPIModelName()),
},
},
"expressions": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref(AlertRuleExpression{}.OpenAPIModelName()),
},
},
},
},
},
"panelRef": {
SchemaProps: spec.SchemaProps{
Ref: ref(AlertRuleV0alpha1SpecPanelRef{}.OpenAPIModelName()),
},
},
},
Required: []string{"title", "trigger", "noDataState", "execErrState", "expressions"},
},
},
Dependencies: []string{
AlertRuleExpression{}.OpenAPIModelName(), AlertRuleIntervalTrigger{}.OpenAPIModelName(), AlertRuleV0alpha1SpecNotificationSettings{}.OpenAPIModelName(), AlertRuleV0alpha1SpecPanelRef{}.OpenAPIModelName()},
}
}
func schema_pkg_apis_alerting_v0alpha1_AlertRuleStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"operatorStates": {
SchemaProps: spec.SchemaProps{
Description: "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref(AlertRulestatusOperatorState{}.OpenAPIModelName()),
},
},
},
},
},
"additionalFields": {
SchemaProps: spec.SchemaProps{
Description: "additionalFields is reserved for future use",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Format: "",
},
},
},
},
},
},
},
},
Dependencies: []string{
AlertRulestatusOperatorState{}.OpenAPIModelName()},
}
}
func schema_pkg_apis_alerting_v0alpha1_AlertRuleV0alpha1SpecNotificationSettings(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"receiver": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"groupBy": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"groupWait": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"groupInterval": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"repeatInterval": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"muteTimeIntervals": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"activeTimeIntervals": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"receiver"},
},
},
}
}
func schema_pkg_apis_alerting_v0alpha1_AlertRuleV0alpha1SpecPanelRef(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"dashboardUID": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"panelID": {
SchemaProps: spec.SchemaProps{
Default: 0,
Type: []string{"integer"},
Format: "int64",
},
},
},
Required: []string{"dashboardUID", "panelID"},
},
},
}
}
func schema_pkg_apis_alerting_v0alpha1_AlertRulestatusOperatorState(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"lastEvaluation": {
SchemaProps: spec.SchemaProps{
Description: "lastEvaluation is the ResourceVersion last evaluated",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"state": {
SchemaProps: spec.SchemaProps{
Description: "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"descriptiveState": {
SchemaProps: spec.SchemaProps{
Description: "descriptiveState is an optional more descriptive state field which has no requirements on format",
Type: []string{"string"},
Format: "",
},
},
"details": {
SchemaProps: spec.SchemaProps{
Description: "details contains any extra information that is operator-specific",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Format: "",
},
},
},
},
},
},
Required: []string{"lastEvaluation", "state"},
},
},
}
}
func schema_pkg_apis_alerting_v0alpha1_RecordingRule(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref(metav1.ObjectMeta{}.OpenAPIModelName()),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "Spec is the spec of the RecordingRule",
Default: map[string]interface{}{},
Ref: ref(RecordingRuleSpec{}.OpenAPIModelName()),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref(RecordingRuleStatus{}.OpenAPIModelName()),
},
},
},
Required: []string{"metadata", "spec", "status"},
},
},
Dependencies: []string{
RecordingRuleSpec{}.OpenAPIModelName(), RecordingRuleStatus{}.OpenAPIModelName(), metav1.ObjectMeta{}.OpenAPIModelName()},
}
}
func schema_pkg_apis_alerting_v0alpha1_RecordingRuleExpression(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"queryType": {
SchemaProps: spec.SchemaProps{
Description: "The type of query if this is a query expression",
Type: []string{"string"},
Format: "",
},
},
"relativeTimeRange": {
SchemaProps: spec.SchemaProps{
Ref: ref(RecordingRuleRelativeTimeRange{}.OpenAPIModelName()),
},
},
"datasourceUID": {
SchemaProps: spec.SchemaProps{
Description: "The UID of the datasource to run this expression against. If omitted, the expression will be run against the `__expr__` datasource",
Type: []string{"string"},
Format: "",
},
},
"model": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Format: "",
},
},
"source": {
SchemaProps: spec.SchemaProps{
Description: "Used to mark the expression to be used as the final source for the rule evaluation Only one expression in a rule can be marked as the source For AlertRules, this is the expression that will be evaluated against the alerting condition For RecordingRules, this is the expression that will be recorded",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"model"},
},
},
Dependencies: []string{
RecordingRuleRelativeTimeRange{}.OpenAPIModelName()},
}
}
func schema_pkg_apis_alerting_v0alpha1_RecordingRuleIntervalTrigger(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"interval": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"interval"},
},
},
}
}
func schema_pkg_apis_alerting_v0alpha1_RecordingRuleList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref(metav1.ListMeta{}.OpenAPIModelName()),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref(RecordingRule{}.OpenAPIModelName()),
},
},
},
},
},
},
Required: []string{"metadata", "items"},
},
},
Dependencies: []string{
RecordingRule{}.OpenAPIModelName(), metav1.ListMeta{}.OpenAPIModelName()},
}
}
func schema_pkg_apis_alerting_v0alpha1_RecordingRuleRelativeTimeRange(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"from": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"to": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"from", "to"},
},
},
}
}
func schema_pkg_apis_alerting_v0alpha1_RecordingRuleSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"title": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"paused": {
SchemaProps: spec.SchemaProps{
Type: []string{"boolean"},
Format: "",
},
},
"trigger": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref(RecordingRuleIntervalTrigger{}.OpenAPIModelName()),
},
},
"labels": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"metric": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"expressions": {
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref(RecordingRuleExpression{}.OpenAPIModelName()),
},
},
},
},
},
"targetDatasourceUID": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"title", "trigger", "metric", "expressions", "targetDatasourceUID"},
},
},
Dependencies: []string{
RecordingRuleExpression{}.OpenAPIModelName(), RecordingRuleIntervalTrigger{}.OpenAPIModelName()},
}
}
func schema_pkg_apis_alerting_v0alpha1_RecordingRuleStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"operatorStates": {
SchemaProps: spec.SchemaProps{
Description: "operatorStates is a map of operator ID to operator state evaluations. Any operator which consumes this kind SHOULD add its state evaluation information to this field.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref(RecordingRulestatusOperatorState{}.OpenAPIModelName()),
},
},
},
},
},
"additionalFields": {
SchemaProps: spec.SchemaProps{
Description: "additionalFields is reserved for future use",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Format: "",
},
},
},
},
},
},
},
},
Dependencies: []string{
RecordingRulestatusOperatorState{}.OpenAPIModelName()},
}
}
func schema_pkg_apis_alerting_v0alpha1_RecordingRulestatusOperatorState(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
"lastEvaluation": {
SchemaProps: spec.SchemaProps{
Description: "lastEvaluation is the ResourceVersion last evaluated",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"state": {
SchemaProps: spec.SchemaProps{
Description: "state describes the state of the lastEvaluation. It is limited to three possible states for machine evaluation.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"descriptiveState": {
SchemaProps: spec.SchemaProps{
Description: "descriptiveState is an optional more descriptive state field which has no requirements on format",
Type: []string{"string"},
Format: "",
},
},
"details": {
SchemaProps: spec.SchemaProps{
Description: "details contains any extra information that is operator-specific",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Format: "",
},
},
},
},
},
},
Required: []string{"lastEvaluation", "state"},
},
},
}
} | go | github | https://github.com/grafana/grafana | apps/alerting/rules/pkg/apis/alerting/v0alpha1/zz_openapi_gen.go |
import os
from django.test import RequestFactory
from onadata.apps.api.tests.viewsets.test_abstract_viewset import \
TestAbstractViewSet
from onadata.apps.api.viewsets.osm_viewset import OsmViewSet
from onadata.apps.api.viewsets.xform_viewset import XFormViewSet
from onadata.apps.logger.models import Attachment
class TestOSM(TestAbstractViewSet):
def setUp(self):
super(self.__class__, self).setUp()
self._login_user_and_profile()
self.factory = RequestFactory()
self.extra = {
'HTTP_AUTHORIZATION': 'Token %s' % self.user.auth_token}
def test_data_retrieve_instance_osm_format(self):
filenames = [
'OSMWay234134797.osm',
'OSMWay34298972.osm',
]
osm_fixtures_dir = os.path.realpath(os.path.join(
os.path.dirname(__file__), '..', 'fixtures', 'osm'))
paths = [
os.path.join(osm_fixtures_dir, filename)
for filename in filenames]
xlsform_path = os.path.join(osm_fixtures_dir, 'osm.xlsx')
combined_osm_path = os.path.join(osm_fixtures_dir, 'combined.osm')
self._publish_xls_form_to_project(xlsform_path=xlsform_path)
# look at the forms.json?instances_with_osm=False
request = self.factory.get('/', {'instances_with_osm': 'True'},
**self.extra)
view = XFormViewSet.as_view({'get': 'list'})
response = view(request, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [])
submission_path = os.path.join(osm_fixtures_dir, 'instance_a.xml')
files = [open(path) for path in paths]
count = Attachment.objects.filter(extension='osm').count()
self._make_submission(submission_path, media_file=files)
self.assertTrue(
Attachment.objects.filter(extension='osm').count() > count)
formid = self.xform.pk
dataid = self.xform.instances.latest('date_created').pk
request = self.factory.get('/')
# look at the data/[pk]/[dataid].osm endpoint
view = OsmViewSet.as_view({'get': 'retrieve'})
response = view(request, pk=formid, dataid=dataid, format='osm')
self.assertEqual(response.status_code, 200)
with open(combined_osm_path) as f:
osm = f.read()
response.render()
self.assertMultiLineEqual(response.content, osm)
# look at the data/[pk].osm endpoint
view = OsmViewSet.as_view({'get': 'list'})
response = view(request, pk=formid, format='osm')
self.assertEqual(response.status_code, 200)
response.render()
self.assertMultiLineEqual(response.content, osm)
# look at the data.osm endpoint
view = OsmViewSet.as_view({'get': 'list'})
response = view(request, format='osm')
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'],
'http://testserver/api/v1/osm.json')
response = view(request, format='json')
self.assertEqual(response.status_code, 200)
data = [{
'url': 'http://testserver/api/v1/osm/{}'.format(self.xform.pk),
'title': self.xform.title,
'id_string': self.xform.id_string, 'user': self.xform.user.username
}]
self.assertEqual(response.data, data)
# look at the forms.json?instances_with_osm=True
request = self.factory.get('/', {'instances_with_osm': 'True'},
**self.extra)
view = XFormViewSet.as_view({'get': 'list'})
response = view(request, format='json')
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.data, []) | unknown | codeparrot/codeparrot-clean | ||
{
"title": "Add routing",
"type": "local",
"answerSrc": "../11-details-page/src",
"answerRootDir": "../11-details-page/",
"openFiles": ["src/main.ts", "src/app/app.ts"]
} | json | github | https://github.com/angular/angular | adev/src/content/tutorials/first-app/steps/10-routing/config.json |
"""
Enhanced Metafile backend. See http://pyemf.sourceforge.net for the EMF
driver library.
"""
from __future__ import division
try:
import pyemf
except ImportError:
raise ImportError('You must first install pyemf from http://pyemf.sf.net')
import os,sys,math,re
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox
from matplotlib.font_manager import findfont, FontProperties
from matplotlib.ft2font import FT2Font, KERNING_UNFITTED, KERNING_DEFAULT, KERNING_UNSCALED
# Font handling stuff snarfed from backend_ps, but only using TTF fonts
_fontd = {}
# Debug print stuff
debugHandle = False
debugPrint = False
debugText = False
# Hashable font properties class. In EMF, angle of rotation is a part
# of the font properties, so a handle to a new font must be obtained
# if the rotation changes.
class EMFFontProperties(FontProperties):
def __init__(self,other,angle):
FontProperties.__init__(self,other.get_family(),
other.get_style(),
other.get_variant(),
other.get_weight(),
other.get_stretch(),
other.get_size())
self.__angle=angle
def __hash__(self):
return hash( (FontProperties.__hash__(self), self.__angle))
def __str__(self):
return str( (FontProperties.__str__(self), self.__angle))
def set_angle(self,angle):
self.__angle=angle
# Hashable pen (line style) properties.
class EMFPen:
def __init__(self,emf,gc):
self.emf=emf
self.gc=gc
r,g,b=gc.get_rgb()
self.r=int(r*255)
self.g=int(g*255)
self.b=int(b*255)
self.width=int(gc.get_linewidth())
self.style=0
self.set_linestyle()
if debugHandle: print "EMFPen: style=%d width=%d rgb=(%d,%d,%d)" % (self.style,self.width,self.r,self.g,self.b)
def __hash__(self):
return hash((self.style,self.width,self.r,self.g,self.b))
def set_linestyle(self):
# Hack. Negative width lines will not get drawn.
if self.width<0:
self.style=pyemf.PS_NULL
else:
styles={'solid':pyemf.PS_SOLID, 'dashed':pyemf.PS_DASH,
'dashdot':pyemf.PS_DASHDOT, 'dotted':pyemf.PS_DOT}
#style=styles.get(self.gc.get_linestyle('solid'))
style=self.gc.get_linestyle('solid')
if debugHandle: print "EMFPen: style=%d" % style
if style in styles:
self.style=styles[style]
else:
self.style=pyemf.PS_SOLID
def get_handle(self):
handle=self.emf.CreatePen(self.style,self.width,(self.r,self.g,self.b))
return handle
# Hashable brush (fill style) properties.
class EMFBrush:
def __init__(self,emf,rgb):
self.emf=emf
r,g,b=rgb
self.r=int(r*255)
self.g=int(g*255)
self.b=int(b*255)
if debugHandle: print "EMFBrush: rgb=(%d,%d,%d)" % (self.r,self.g,self.b)
def __hash__(self):
return hash((self.r,self.g,self.b))
def get_handle(self):
handle=self.emf.CreateSolidBrush((self.r,self.g,self.b))
return handle
class RendererEMF(RendererBase):
"""
The renderer handles drawing/rendering operations through a
pyemf.EMF instance.
"""
def __init__(self, outfile, width, height, dpi):
"Initialize the renderer with a gd image instance"
self.outfile = outfile
# a map from get_color args to colors
self._cached = {}
# dict of hashed properties to already created font handles
self._fontHandle = {}
self.lastHandle = {'font':-1, 'pen':-1, 'brush':-1}
self.emf=pyemf.EMF(width,height,dpi,'in')
self.width=int(width*dpi)
self.height=int(height*dpi)
self.dpi = dpi
self.pointstodpi = dpi/72.0
self.hackPointsForMathExponent = 2.0
# set background transparent for text
self.emf.SetBkMode(pyemf.TRANSPARENT)
# set baseline for text to be bottom left corner
self.emf.SetTextAlign( pyemf.TA_BOTTOM|pyemf.TA_LEFT)
if debugPrint: print "RendererEMF: (%f,%f) %s dpi=%f" % (self.width,self.height,outfile,dpi)
def save(self):
self.emf.save(self.outfile)
def draw_arc(self, gcEdge, rgbFace, x, y, width, height, angle1, angle2, rotation):
"""
Draw an arc using GraphicsContext instance gcEdge, centered at x,y,
with width and height and angles from 0.0 to 360.0
0 degrees is at 3-o'clock
positive angles are anti-clockwise
If the color rgbFace is not None, fill the arc with it.
"""
if debugPrint: print "draw_arc: (%f,%f) angles=(%f,%f) w,h=(%f,%f)" % (x,y,angle1,angle2,width,height)
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
# This algorithm doesn't work very well on small circles
# because of rounding error. This shows up most obviously on
# legends where the circles are small anyway, and it is
# compounded by the fact that it puts several circles right
# next to each other so the differences are obvious.
hw=width/2
hh=height/2
x1=int(x-width/2)
y1=int(y-height/2)
if brush:
self.emf.Pie(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh)))
else:
self.emf.Arc(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh)))
def draw_image(self, x, y, im, bbox):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas. y is
the distance from the origin. That is, if origin is upper, y
is the distance from top. If origin is lower, y is the
distance from bottom
bbox is a matplotlib.transforms.BBox instance for clipping, or
None
"""
# pyemf2 currently doesn't support bitmaps.
pass
def draw_line(self, gc, x1, y1, x2, y2):
"""
Draw a single line from x1,y1 to x2,y2
"""
if debugPrint: print "draw_line: (%f,%f) - (%f,%f)" % (x1,y1,x2,y2)
if self.select_pen(gc):
self.emf.Polyline([(long(x1),long(self.height-y1)),(long(x2),long(self.height-y2))])
else:
if debugPrint: print "draw_line: optimizing away (%f,%f) - (%f,%f)" % (x1,y1,x2,y2)
def draw_lines(self, gc, x, y):
"""
x and y are equal length arrays, draw lines connecting each
point in x, y
"""
if debugPrint: print "draw_lines: %d points" % len(str(x))
# optimize away anything that won't actually be drawn. Edge
# style must not be PS_NULL for it to appear on screen.
if self.select_pen(gc):
points = [(long(x[i]), long(self.height-y[i])) for i in range(len(x))]
self.emf.Polyline(points)
def draw_point(self, gc, x, y):
"""
Draw a single point at x,y
Where 'point' is a device-unit point (or pixel), not a matplotlib point
"""
if debugPrint: print "draw_point: (%f,%f)" % (x,y)
# don't cache this pen
pen=EMFPen(self.emf,gc)
self.emf.SetPixel(long(x),long(self.height-y),(pen.r,pen.g,pen.b))
def draw_polygon(self, gcEdge, rgbFace, points):
"""
Draw a polygon using the GraphicsContext instance gc.
points is a len vertices tuple, each element
giving the x,y coords a vertex
If the color rgbFace is not None, fill the polygon with it
"""
if debugPrint: print "draw_polygon: %d points" % len(points)
# optimize away anything that won't actually draw. Either a
# face color or edge style must be defined
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
if pen or brush:
points = [(long(x), long(self.height-y)) for x,y in points]
self.emf.Polygon(points)
else:
points = [(long(x), long(self.height-y)) for x,y in points]
if debugPrint: print "draw_polygon: optimizing away polygon: %d points = %s" % (len(points),str(points))
def draw_rectangle(self, gcEdge, rgbFace, x, y, width, height):
"""
Draw a non-filled rectangle using the GraphicsContext instance gcEdge,
with lower left at x,y with width and height.
If rgbFace is not None, fill the rectangle with it.
"""
if debugPrint: print "draw_rectangle: (%f,%f) w=%f,h=%f" % (x,y,width,height)
# optimize away anything that won't actually draw. Either a
# face color or edge style must be defined
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
if pen or brush:
self.emf.Rectangle(int(x),int(self.height-y),int(x)+int(width),int(self.height-y)-int(height))
else:
if debugPrint: print "draw_rectangle: optimizing away (%f,%f) w=%f,h=%f" % (x,y,width,height)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
"""
Draw the text.Text instance s at x,y (display coords) with font
properties instance prop at angle in degrees, using GraphicsContext gc
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be blotted along with
your text.
"""
if debugText: print "draw_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
if ismath:
self.draw_math_text(gc,x,y,s,prop,angle)
else:
self.draw_plain_text(gc,x,y,s,prop,angle)
def draw_plain_text(self, gc, x, y, s, prop, angle):
"""
Draw a text string verbatim; no conversion is done.
"""
if debugText: print "draw_plain_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
if debugText: print " properties:\n"+str(prop)
self.select_font(prop,angle)
# haxor follows! The subtleties of text placement in EMF
# still elude me a bit. It always seems to be too high on the
# page, about 10 pixels too high on a 300dpi resolution image.
# So, I'm adding this hack for the moment:
hackoffsetper300dpi=10
xhack=math.sin(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0
yhack=math.cos(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0
self.emf.TextOut(long(x+xhack),long(y+yhack),s)
def draw_math_text(self, gc, x, y, s, prop, angle):
"""
Draw a subset of TeX, currently handles exponents only. Since
pyemf doesn't have any raster functionality yet, the
texmanager.get_rgba won't help.
"""
if debugText: print "draw_math_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
s = s[1:-1] # strip the $ from front and back
match=re.match("10\^\{(.+)\}",s)
if match:
exp=match.group(1)
if debugText: print " exponent=%s" % exp
font = self._get_font_ttf(prop)
font.set_text("10", 0.0)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
self.draw_plain_text(gc,x,y,"10",prop,angle)
propexp=prop.copy()
propexp.set_size(prop.get_size_in_points()*.8)
self.draw_plain_text(gc,x+w+self.points_to_pixels(self.hackPointsForMathExponent),y-(h/2),exp,propexp,angle)
else:
# if it isn't an exponent, then render the raw TeX string.
self.draw_plain_text(gc,x,y,s,prop,angle)
def get_math_text_width_height(self, s, prop):
"""
get the width and height in display coords of the string s
with FontPropertry prop, ripped right out of backend_ps. This
method must be kept in sync with draw_math_text.
"""
if debugText: print "get_math_text_width_height:"
s = s[1:-1] # strip the $ from front and back
match=re.match("10\^\{(.+)\}",s)
if match:
exp=match.group(1)
if debugText: print " exponent=%s" % exp
font = self._get_font_ttf(prop)
font.set_text("10", 0.0)
w1, h1 = font.get_width_height()
propexp=prop.copy()
propexp.set_size(prop.get_size_in_points()*.8)
fontexp=self._get_font_ttf(propexp)
fontexp.set_text(exp, 0.0)
w2, h2 = fontexp.get_width_height()
w=w1+w2
h=h1+(h2/2)
w /= 64.0 # convert from subpixels
h /= 64.0
w+=self.points_to_pixels(self.hackPointsForMathExponent)
if debugText: print " math string=%s w,h=(%f,%f)" % (s, w, h)
else:
w,h=self.get_text_width_height(s,prop,False)
return w, h
def flipy(self):
"""return true if y small numbers are top for renderer
Is used for drawing text (text.py) and images (image.py) only
"""
return True
def get_canvas_width_height(self):
"""
return the canvas width and height in display coords
"""
return self.width,self.height
def set_handle(self,type,handle):
"""
Update the EMF file with the current handle, but only if it
isn't the same as the last one. Don't want to flood the file
with duplicate info.
"""
if self.lastHandle[type] != handle:
self.emf.SelectObject(handle)
self.lastHandle[type]=handle
def get_font_handle(self, prop, angle):
"""
Look up the handle for the font based on the dict of
properties *and* the rotation angle, since in EMF the font
rotation is a part of the font definition.
"""
prop=EMFFontProperties(prop,angle)
size=int(prop.get_size_in_points()*self.pointstodpi)
face=prop.get_name()
key = hash(prop)
handle = self._fontHandle.get(key)
if handle is None:
handle=self.emf.CreateFont(-size, 0, int(angle)*10, int(angle)*10,
pyemf.FW_NORMAL, 0, 0, 0,
pyemf.ANSI_CHARSET, pyemf.OUT_DEFAULT_PRECIS,
pyemf.CLIP_DEFAULT_PRECIS, pyemf.DEFAULT_QUALITY,
pyemf.DEFAULT_PITCH | pyemf.FF_DONTCARE, face);
if debugHandle: print "get_font_handle: creating handle=%d for face=%s size=%d" % (handle,face,size)
self._fontHandle[key]=handle
if debugHandle: print " found font handle %d for face=%s size=%d" % (handle,face,size)
self.set_handle("font",handle)
return handle
def select_font(self,prop,angle):
handle=self.get_font_handle(prop,angle)
self.set_handle("font",handle)
def select_pen(self, gc):
"""
Select a pen that includes the color, line width and line
style. Return the pen if it will draw a line, or None if the
pen won't produce any output (i.e. the style is PS_NULL)
"""
pen=EMFPen(self.emf,gc)
key=hash(pen)
handle=self._fontHandle.get(key)
if handle is None:
handle=pen.get_handle()
self._fontHandle[key]=handle
if debugHandle: print " found pen handle %d" % handle
self.set_handle("pen",handle)
if pen.style != pyemf.PS_NULL:
return pen
else:
return None
def select_brush(self, rgb):
"""
Select a fill color, and return the brush if the color is
valid or None if this won't produce a fill operation.
"""
if rgb is not None:
brush=EMFBrush(self.emf,rgb)
key=hash(brush)
handle=self._fontHandle.get(key)
if handle is None:
handle=brush.get_handle()
self._fontHandle[key]=handle
if debugHandle: print " found brush handle %d" % handle
self.set_handle("brush",handle)
return brush
else:
return None
def _get_font_ttf(self, prop):
"""
get the true type font properties, used because EMFs on
windows will use true type fonts.
"""
key = hash(prop)
font = _fontd.get(key)
if font is None:
fname = findfont(prop)
if debugText: print "_get_font_ttf: name=%s" % fname
font = FT2Font(str(fname))
_fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def get_text_width_height(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop, ripped right out of backend_ps
"""
if debugText: print "get_text_width_height: ismath=%s properties: %s" % (str(ismath),str(prop))
if ismath:
if debugText: print " MATH TEXT! = %s" % str(ismath)
w,h = self.get_math_text_width_height(s, prop)
return w,h
font = self._get_font_ttf(prop)
font.set_text(s, 0.0)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
if debugText: print " text string=%s w,h=(%f,%f)" % (s, w, h)
return w, h
def new_gc(self):
return GraphicsContextEMF()
def points_to_pixels(self, points):
# if backend doesn't have dpi, eg, postscript or svg
#return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
return points/72.0 * self.dpi
class GraphicsContextEMF(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the gtk
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In GTK this is done by wrapping a gtk.gdk.GC object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the GTK backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, eg, (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
pass
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required
For GUI backends - this should be overriden if drawing should be done in
interactive python mode
"""
pass
def show():
"""
For image backends - is not required
For GUI backends - show() is usually the last line of a pylab script and
tells the backend that it is time to draw. In interactive mode, this may
be a do nothing func. See the GTK backend for an example of how to handle
interactive versus batch mode
"""
for manager in Gcf.get_all_fig_managers():
# do something to display the GUI
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasEMF(thisFig)
manager = FigureManagerEMF(canvas, num)
return manager
class FigureCanvasEMF(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def draw(self):
"""
Draw the figure using the renderer
"""
pass
filetypes = {'emf': 'Enhanced Metafile'}
def print_emf(self, filename, dpi=300, **kwargs):
width, height = self.figure.get_size_inches()
renderer = RendererEMF(filename,width,height,dpi)
self.figure.draw(renderer)
renderer.save()
def get_default_filetype(self):
return 'emf'
class FigureManagerEMF(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerEMF | unknown | codeparrot/codeparrot-clean | ||
{
"description": "i18n",
"files":[
"!**/*.d.ts",
"!**/*.js",
"!**/*.[0-9].*",
"!doc-files/**/*",
"**/*.xlf"
],
"file": "src/app/app.component.ts",
"tags": ["Angular", "i18n", "internationalization"],
"devDependencies": ["@angular/compiler-cli", "typescript"]
} | json | github | https://github.com/angular/angular | adev/src/content/examples/i18n/stackblitz.json |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.python.tasks.checkstyle.common import Nit, PythonFile
from pants.backend.python.tasks.checkstyle.variable_names import (PEP8VariableNames,
allow_underscores,
is_builtin_name, is_lower_snake,
is_reserved_name,
is_reserved_with_trailing_underscore,
is_upper_camel)
def test_allow_underscores():
@allow_underscores(0)
def no_underscores(name):
return name
assert no_underscores('foo') == 'foo'
assert no_underscores('foo_') == 'foo_'
assert no_underscores('_foo') is False
assert no_underscores('__foo') is False
@allow_underscores(1)
def one_underscore(name):
return name
assert one_underscore('foo') == 'foo'
assert one_underscore('_foo') == 'foo'
assert one_underscore('_foo_') == 'foo_'
assert one_underscore('__foo') is False
assert one_underscore('___foo') is False
UPPER_CAMEL = (
'Rate',
'HTTPRate',
'HttpRate',
'Justastringofwords'
)
LOWER_SNAKE = (
'quiet',
'quiet_noises',
)
def test_is_upper_camel():
for word in UPPER_CAMEL:
assert is_upper_camel(word)
assert is_upper_camel('_' + word)
assert not is_upper_camel('__' + word)
assert not is_upper_camel(word + '_')
for word in LOWER_SNAKE:
assert not is_upper_camel(word)
assert not is_upper_camel('_' + word)
assert not is_upper_camel(word + '_')
def test_is_lower_snake():
for word in LOWER_SNAKE:
assert is_lower_snake(word)
assert is_lower_snake('_' + word)
assert is_lower_snake('__' + word)
for word in UPPER_CAMEL:
assert not is_lower_snake(word)
assert not is_lower_snake('_' + word)
def test_is_builtin_name():
assert is_builtin_name('__foo__')
assert not is_builtin_name('__fo_o__')
assert not is_builtin_name('__Foo__')
assert not is_builtin_name('__fOo__')
assert not is_builtin_name('__foo')
assert not is_builtin_name('foo__')
def test_is_reserved_name():
for name in ('for', 'super', 'id', 'type', 'class'):
assert is_reserved_name(name)
assert not is_reserved_name('none')
def test_is_reserved_with_trailing_underscore():
for name in ('super', 'id', 'type', 'class'):
assert is_reserved_with_trailing_underscore(name + '_')
assert not is_reserved_with_trailing_underscore(name + '__')
for name in ('garbage', 'slots', 'metaclass'):
assert not is_reserved_with_trailing_underscore(name + '_')
def test_class_names():
p8 = PEP8VariableNames(PythonFile.from_statement("""
class dhis_not_right(object):
pass
"""))
nits = list(p8.nits())
assert len(nits) == 1
assert nits[0].code == 'T000'
assert nits[0]._line_number == 1
assert nits[0].severity == Nit.ERROR
def test_class_globals():
p8 = PEP8VariableNames(PythonFile.from_statement("""
class DhisRight(object):
RIGHT = 123
notRight = 321
"""))
nits = list(p8.nits())
assert len(nits) == 1
assert nits[0].code == 'T001'
assert nits[0]._line_number == 3
assert nits[0].severity == Nit.ERROR
def test_builtin_overrides():
p8 = PEP8VariableNames(PythonFile.from_statement("""
def range():
print("Not in a class body")
class DhisRight(object):
def any(self):
print("In a class body")
"""))
nits = list(p8.nits())
assert len(nits) == 1
assert nits[0].code == 'T801'
assert nits[0]._line_number == 1
assert nits[0].severity == Nit.ERROR
def test_lower_snake_method_names():
p8 = PEP8VariableNames(PythonFile.from_statement("""
def totally_fine():
print("Not in a class body")
class DhisRight(object):
def clearlyNotThinking(self):
print("In a class body")
"""))
nits = list(p8.nits())
assert len(nits) == 1
assert nits[0].code == 'T002'
assert nits[0]._line_number == 5
assert nits[0].severity == Nit.ERROR
p8 = PEP8VariableNames(PythonFile.from_statement("""
class DhisRight:
def clearlyNotThinking(self):
print("In a class body")
"""))
nits = list(p8.nits())
assert len(nits) == 1
assert nits[0].code == 'T002'
assert nits[0]._line_number == 2
assert nits[0].severity == Nit.ERROR
# Allow derivations from other modules to be ok.
p8 = PEP8VariableNames(PythonFile.from_statement("""
class TestCase(unittest.TestCase):
def setUp(self):
pass
"""))
nits = list(p8.nits())
assert len(list(p8.nits())) == 0
p8 = PEP8VariableNames(PythonFile.from_statement("""
def clearlyNotThinking():
print("Not in a class body")
class DhisRight(object):
def totally_fine(self):
print("In a class body")
"""))
nits = list(p8.nits())
assert len(nits) == 1
assert nits[0].code == 'T002'
assert nits[0]._line_number == 1
assert nits[0].severity == Nit.ERROR | unknown | codeparrot/codeparrot-clean | ||
import pytest
import numpy as np
from numpy._core.multiarray import _vec_string
from numpy.testing import (
assert_,
assert_array_equal,
assert_equal,
assert_raises,
assert_raises_regex,
)
kw_unicode_true = {'unicode': True} # make 2to3 work properly
kw_unicode_false = {'unicode': False}
ignore_charray_deprecation = pytest.mark.filterwarnings(
r"ignore:\w+ chararray \w+:DeprecationWarning"
)
class TestBasic:
def test_from_object_array(self):
A = np.array([['abc', 2],
['long ', '0123456789']], dtype='O')
B = np.char.array(A)
assert_equal(B.dtype.itemsize, 10)
assert_array_equal(B, [[b'abc', b'2'],
[b'long', b'0123456789']])
def test_from_object_array_unicode(self):
A = np.array([['abc', 'Sigma \u03a3'],
['long ', '0123456789']], dtype='O')
assert_raises(ValueError, np.char.array, (A,))
B = np.char.array(A, **kw_unicode_true)
assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
assert_array_equal(B, [['abc', 'Sigma \u03a3'],
['long', '0123456789']])
def test_from_string_array(self):
A = np.array([[b'abc', b'foo'],
[b'long ', b'0123456789']])
assert_equal(A.dtype.type, np.bytes_)
B = np.char.array(A)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
B[0, 0] = 'changed'
assert_(B[0, 0] != A[0, 0])
C = np.char.asarray(A)
assert_array_equal(C, A)
assert_equal(C.dtype, A.dtype)
C[0, 0] = 'changed again'
assert_(C[0, 0] != B[0, 0])
assert_(C[0, 0] == A[0, 0])
def test_from_unicode_array(self):
A = np.array([['abc', 'Sigma \u03a3'],
['long ', '0123456789']])
assert_equal(A.dtype.type, np.str_)
B = np.char.array(A)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
B = np.char.array(A, **kw_unicode_true)
assert_array_equal(B, A)
assert_equal(B.dtype, A.dtype)
assert_equal(B.shape, A.shape)
def fail():
np.char.array(A, **kw_unicode_false)
assert_raises(UnicodeEncodeError, fail)
def test_unicode_upconvert(self):
A = np.char.array(['abc'])
B = np.char.array(['\u03a3'])
assert_(issubclass((A + B).dtype.type, np.str_))
def test_from_string(self):
A = np.char.array(b'abc')
assert_equal(len(A), 1)
assert_equal(len(A[0]), 3)
assert_(issubclass(A.dtype.type, np.bytes_))
def test_from_unicode(self):
A = np.char.array('\u03a3')
assert_equal(len(A), 1)
assert_equal(len(A[0]), 1)
assert_equal(A.itemsize, 4)
assert_(issubclass(A.dtype.type, np.str_))
class TestVecString:
def test_non_existent_method(self):
def fail():
_vec_string('a', np.bytes_, 'bogus')
assert_raises(AttributeError, fail)
def test_non_string_array(self):
def fail():
_vec_string(1, np.bytes_, 'strip')
assert_raises(TypeError, fail)
def test_invalid_args_tuple(self):
def fail():
_vec_string(['a'], np.bytes_, 'strip', 1)
assert_raises(TypeError, fail)
def test_invalid_type_descr(self):
def fail():
_vec_string(['a'], 'BOGUS', 'strip')
assert_raises(TypeError, fail)
def test_invalid_function_args(self):
def fail():
_vec_string(['a'], np.bytes_, 'strip', (1,))
assert_raises(TypeError, fail)
def test_invalid_result_type(self):
def fail():
_vec_string(['a'], np.int_, 'strip')
assert_raises(TypeError, fail)
def test_broadcast_error(self):
def fail():
_vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],))
assert_raises(ValueError, fail)
@ignore_charray_deprecation
class TestWhitespace:
def test1(self):
A = np.array([['abc ', '123 '],
['789 ', 'xyz ']]).view(np.char.chararray)
B = np.array([['abc', '123'],
['789', 'xyz']]).view(np.char.chararray)
assert_(np.all(A == B))
assert_(np.all(A >= B))
assert_(np.all(A <= B))
assert_(not np.any(A > B))
assert_(not np.any(A < B))
assert_(not np.any(A != B))
@ignore_charray_deprecation
class TestChar:
def test_it(self):
A = np.array('abc1', dtype='c').view(np.char.chararray)
assert_equal(A.shape, (4,))
assert_equal(A.upper()[:2].tobytes(), b'AB')
@ignore_charray_deprecation
class TestComparisons:
def A(self):
return np.array([['abc', 'abcc', '123'],
['789', 'abc', 'xyz']]).view(np.char.chararray)
def B(self):
return np.array([['efg', 'efg', '123 '],
['051', 'efgg', 'tuv']]).view(np.char.chararray)
def test_not_equal(self):
A, B = self.A(), self.B()
assert_array_equal((A != B),
[[True, True, False], [True, True, True]])
def test_equal(self):
A, B = self.A(), self.B()
assert_array_equal((A == B),
[[False, False, True], [False, False, False]])
def test_greater_equal(self):
A, B = self.A(), self.B()
assert_array_equal((A >= B),
[[False, False, True], [True, False, True]])
def test_less_equal(self):
A, B = self.A(), self.B()
assert_array_equal((A <= B),
[[True, True, True], [False, True, False]])
def test_greater(self):
A, B = self.A(), self.B()
assert_array_equal((A > B),
[[False, False, False], [True, False, True]])
def test_less(self):
A, B = self.A(), self.B()
assert_array_equal((A < B),
[[True, True, False], [False, True, False]])
def test_type(self):
A, B = self.A(), self.B()
out1 = np.char.equal(A, B)
out2 = np.char.equal('a', 'a')
assert_(isinstance(out1, np.ndarray))
assert_(isinstance(out2, np.ndarray))
@ignore_charray_deprecation
class TestComparisonsMixed1(TestComparisons):
"""Ticket #1276"""
def B(self):
return np.array(
[['efg', 'efg', '123 '],
['051', 'efgg', 'tuv']], np.str_).view(np.char.chararray)
@ignore_charray_deprecation
class TestComparisonsMixed2(TestComparisons):
"""Ticket #1276"""
def A(self):
return np.array(
[['abc', 'abcc', '123'],
['789', 'abc', 'xyz']], np.str_).view(np.char.chararray)
@ignore_charray_deprecation
class TestInformation:
def A(self):
return np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]) \
.view(np.char.chararray)
def B(self):
return np.array([[' \u03a3 ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]) \
.view(np.char.chararray)
def test_len(self):
A, B = self.A(), self.B()
assert_(issubclass(np.char.str_len(A).dtype.type, np.integer))
assert_array_equal(np.char.str_len(A), [[5, 0], [5, 9], [12, 5]])
assert_array_equal(np.char.str_len(B), [[3, 0], [5, 9], [12, 5]])
def test_count(self):
A, B = self.A(), self.B()
assert_(issubclass(A.count('').dtype.type, np.integer))
assert_array_equal(A.count('a'), [[1, 0], [0, 1], [0, 0]])
assert_array_equal(A.count('123'), [[0, 0], [1, 0], [1, 0]])
# Python doesn't seem to like counting NULL characters
assert_array_equal(A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]])
assert_array_equal(B.count('a'), [[0, 0], [0, 1], [0, 0]])
assert_array_equal(B.count('123'), [[0, 0], [1, 0], [1, 0]])
def test_endswith(self):
A = self.A()
assert_(issubclass(A.endswith('').dtype.type, np.bool))
assert_array_equal(A.endswith(' '), [[1, 0], [0, 0], [1, 0]])
assert_array_equal(A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]])
def fail():
A.endswith('3', 'fdjk')
assert_raises(TypeError, fail)
@pytest.mark.parametrize(
"dtype, encode",
[("U", str),
("S", lambda x: x.encode('ascii')),
])
def test_find(self, dtype, encode):
A = self.A().astype(dtype)
assert_(issubclass(A.find(encode('a')).dtype.type, np.integer))
assert_array_equal(A.find(encode('a')),
[[1, -1], [-1, 6], [-1, -1]])
assert_array_equal(A.find(encode('3')),
[[-1, -1], [2, -1], [2, -1]])
assert_array_equal(A.find(encode('a'), 0, 2),
[[1, -1], [-1, -1], [-1, -1]])
assert_array_equal(A.find([encode('1'), encode('P')]),
[[-1, -1], [0, -1], [0, 1]])
C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'01234567890123456789012345'])
.view(np.char.chararray)).astype(dtype)
assert_array_equal(C.find(encode('M')), [12, -1])
def test_index(self):
A = self.A()
def fail():
A.index('a')
assert_raises(ValueError, fail)
assert_(np.char.index('abcba', 'b') == 1)
assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
def test_isalnum(self):
A = self.A()
assert_(issubclass(A.isalnum().dtype.type, np.bool))
assert_array_equal(A.isalnum(), [[False, False], [True, True], [False, True]])
def test_isalpha(self):
A = self.A()
assert_(issubclass(A.isalpha().dtype.type, np.bool))
assert_array_equal(A.isalpha(), [[False, False], [False, True], [False, True]])
def test_isdigit(self):
A = self.A()
assert_(issubclass(A.isdigit().dtype.type, np.bool))
assert_array_equal(A.isdigit(), [[False, False], [True, False], [False, False]])
def test_islower(self):
A = self.A()
assert_(issubclass(A.islower().dtype.type, np.bool))
assert_array_equal(A.islower(), [[True, False], [False, False], [False, False]])
def test_isspace(self):
A = self.A()
assert_(issubclass(A.isspace().dtype.type, np.bool))
assert_array_equal(
A.isspace(),
[[False, False], [False, False], [False, False]],
)
def test_istitle(self):
A = self.A()
assert_(issubclass(A.istitle().dtype.type, np.bool))
assert_array_equal(
A.istitle(),
[[False, False], [False, False], [False, False]],
)
def test_isupper(self):
A = self.A()
assert_(issubclass(A.isupper().dtype.type, np.bool))
assert_array_equal(A.isupper(), [[False, False], [False, False], [False, True]])
def test_rfind(self):
A = self.A()
assert_(issubclass(A.rfind('a').dtype.type, np.integer))
assert_array_equal(A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]])
assert_array_equal(A.rfind('3'), [[-1, -1], [2, -1], [6, -1]])
assert_array_equal(A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
assert_array_equal(A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]])
def test_rindex(self):
A = self.A()
def fail():
A.rindex('a')
assert_raises(ValueError, fail)
assert_(np.char.rindex('abcba', 'b') == 3)
assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
def test_startswith(self):
A = self.A()
assert_(issubclass(A.startswith('').dtype.type, np.bool))
assert_array_equal(A.startswith(' '), [[1, 0], [0, 0], [0, 0]])
assert_array_equal(A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]])
def fail():
A.startswith('3', 'fdjk')
assert_raises(TypeError, fail)
@ignore_charray_deprecation
class TestMethods:
def A(self):
return np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']],
dtype='S').view(np.char.chararray)
def B(self):
return np.array([[' \u03a3 ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]) \
.view(np.char.chararray)
def test_capitalize(self):
A, B = self.A(), self.B()
tgt = [[b' abc ', b''],
[b'12345', b'Mixedcase'],
[b'123 \t 345 \0 ', b'Upper']]
assert_(issubclass(A.capitalize().dtype.type, np.bytes_))
assert_array_equal(A.capitalize(), tgt)
tgt = [[' \u03c3 ', ''],
['12345', 'Mixedcase'],
['123 \t 345 \0 ', 'Upper']]
assert_(issubclass(B.capitalize().dtype.type, np.str_))
assert_array_equal(B.capitalize(), tgt)
def test_center(self):
A = self.A()
assert_(issubclass(A.center(10).dtype.type, np.bytes_))
C = A.center([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = A.center(20, b'#')
assert_(np.all(C.startswith(b'#')))
assert_(np.all(C.endswith(b'#')))
C = np.char.center(b'FOO', [[10, 20], [15, 8]])
tgt = [[b' FOO ', b' FOO '],
[b' FOO ', b' FOO ']]
assert_(issubclass(C.dtype.type, np.bytes_))
assert_array_equal(C, tgt)
def test_decode(self):
A = np.char.array([b'\\u03a3'])
assert_(A.decode('unicode-escape')[0] == '\u03a3')
def test_encode(self):
B = self.B().encode('unicode_escape')
assert_(B[0][0] == ' \\u03a3 '.encode('latin1'))
def test_expandtabs(self):
T = self.A().expandtabs()
assert_(T[2, 0] == b'123 345 \0')
def test_join(self):
# NOTE: list(b'123') == [49, 50, 51]
# so that b','.join(b'123') results to an error on Py3
A0 = self.A().decode('ascii')
A = np.char.join([',', '#'], A0)
assert_(issubclass(A.dtype.type, np.str_))
tgt = np.array([[' ,a,b,c, ', ''],
['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],
['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']])
assert_array_equal(np.char.join([',', '#'], A0), tgt)
def test_ljust(self):
A = self.A()
assert_(issubclass(A.ljust(10).dtype.type, np.bytes_))
C = A.ljust([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = A.ljust(20, b'#')
assert_array_equal(C.startswith(b'#'), [
[False, True], [False, False], [False, False]])
assert_(np.all(C.endswith(b'#')))
C = np.char.ljust(b'FOO', [[10, 20], [15, 8]])
tgt = [[b'FOO ', b'FOO '],
[b'FOO ', b'FOO ']]
assert_(issubclass(C.dtype.type, np.bytes_))
assert_array_equal(C, tgt)
def test_lower(self):
A, B = self.A(), self.B()
tgt = [[b' abc ', b''],
[b'12345', b'mixedcase'],
[b'123 \t 345 \0 ', b'upper']]
assert_(issubclass(A.lower().dtype.type, np.bytes_))
assert_array_equal(A.lower(), tgt)
tgt = [[' \u03c3 ', ''],
['12345', 'mixedcase'],
['123 \t 345 \0 ', 'upper']]
assert_(issubclass(B.lower().dtype.type, np.str_))
assert_array_equal(B.lower(), tgt)
def test_lstrip(self):
A, B = self.A(), self.B()
tgt = [[b'abc ', b''],
[b'12345', b'MixedCase'],
[b'123 \t 345 \0 ', b'UPPER']]
assert_(issubclass(A.lstrip().dtype.type, np.bytes_))
assert_array_equal(A.lstrip(), tgt)
tgt = [[b' abc', b''],
[b'2345', b'ixedCase'],
[b'23 \t 345 \x00', b'UPPER']]
assert_array_equal(A.lstrip([b'1', b'M']), tgt)
tgt = [['\u03a3 ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]
assert_(issubclass(B.lstrip().dtype.type, np.str_))
assert_array_equal(B.lstrip(), tgt)
def test_partition(self):
A = self.A()
P = A.partition([b'3', b'M'])
tgt = [[(b' abc ', b'', b''), (b'', b'', b'')],
[(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
[(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]]
assert_(issubclass(P.dtype.type, np.bytes_))
assert_array_equal(P, tgt)
def test_replace(self):
A = self.A()
R = A.replace([b'3', b'a'],
[b'##########', b'@'])
tgt = [[b' abc ', b''],
[b'12##########45', b'MixedC@se'],
[b'12########## \t ##########45 \x00 ', b'UPPER']]
assert_(issubclass(R.dtype.type, np.bytes_))
assert_array_equal(R, tgt)
# Test special cases that should just return the input array,
# since replacements are not possible or do nothing.
S1 = A.replace(b'A very long byte string, longer than A', b'')
assert_array_equal(S1, A)
S2 = A.replace(b'', b'')
assert_array_equal(S2, A)
S3 = A.replace(b'3', b'3')
assert_array_equal(S3, A)
S4 = A.replace(b'3', b'', count=0)
assert_array_equal(S4, A)
def test_replace_count_and_size(self):
a = np.array(['0123456789' * i for i in range(4)]
).view(np.char.chararray)
r1 = a.replace('5', 'ABCDE')
assert r1.dtype.itemsize == (3 * 10 + 3 * 4) * 4
assert_array_equal(r1, np.array(['01234ABCDE6789' * i
for i in range(4)]))
r2 = a.replace('5', 'ABCDE', count=1)
assert r2.dtype.itemsize == (3 * 10 + 4) * 4
r3 = a.replace('5', 'ABCDE', count=0)
assert r3.dtype.itemsize == a.dtype.itemsize
assert_array_equal(r3, a)
# Negative values mean to replace all.
r4 = a.replace('5', 'ABCDE', count=-1)
assert r4.dtype.itemsize == (3 * 10 + 3 * 4) * 4
assert_array_equal(r4, r1)
# We can do count on an element-by-element basis.
r5 = a.replace('5', 'ABCDE', count=[-1, -1, -1, 1])
assert r5.dtype.itemsize == (3 * 10 + 4) * 4
assert_array_equal(r5, np.array(
['01234ABCDE6789' * i for i in range(3)]
+ ['01234ABCDE6789' + '0123456789' * 2]))
def test_replace_broadcasting(self):
a = np.array('0,0,0').view(np.char.chararray)
r1 = a.replace('0', '1', count=np.arange(3))
assert r1.dtype == a.dtype
assert_array_equal(r1, np.array(['0,0,0', '1,0,0', '1,1,0']))
r2 = a.replace('0', [['1'], ['2']], count=np.arange(1, 4))
assert_array_equal(r2, np.array([['1,0,0', '1,1,0', '1,1,1'],
['2,0,0', '2,2,0', '2,2,2']]))
r3 = a.replace(['0', '0,0', '0,0,0'], 'X')
assert_array_equal(r3, np.array(['X,X,X', 'X,0', 'X']))
def test_rjust(self):
A = self.A()
assert_(issubclass(A.rjust(10).dtype.type, np.bytes_))
C = A.rjust([10, 20])
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
C = A.rjust(20, b'#')
assert_(np.all(C.startswith(b'#')))
assert_array_equal(C.endswith(b'#'),
[[False, True], [False, False], [False, False]])
C = np.char.rjust(b'FOO', [[10, 20], [15, 8]])
tgt = [[b' FOO', b' FOO'],
[b' FOO', b' FOO']]
assert_(issubclass(C.dtype.type, np.bytes_))
assert_array_equal(C, tgt)
def test_rpartition(self):
A = self.A()
P = A.rpartition([b'3', b'M'])
tgt = [[(b'', b'', b' abc '), (b'', b'', b'')],
[(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
[(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]]
assert_(issubclass(P.dtype.type, np.bytes_))
assert_array_equal(P, tgt)
def test_rsplit(self):
A = self.A().rsplit(b'3')
tgt = [[[b' abc '], [b'']],
[[b'12', b'45'], [b'MixedCase']],
[[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
assert_(issubclass(A.dtype.type, np.object_))
assert_equal(A.tolist(), tgt)
def test_rstrip(self):
A, B = self.A(), self.B()
assert_(issubclass(A.rstrip().dtype.type, np.bytes_))
tgt = [[b' abc', b''],
[b'12345', b'MixedCase'],
[b'123 \t 345', b'UPPER']]
assert_array_equal(A.rstrip(), tgt)
tgt = [[b' abc ', b''],
[b'1234', b'MixedCase'],
[b'123 \t 345 \x00', b'UPP']
]
assert_array_equal(A.rstrip([b'5', b'ER']), tgt)
tgt = [[' \u03a3', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']]
assert_(issubclass(B.rstrip().dtype.type, np.str_))
assert_array_equal(B.rstrip(), tgt)
def test_strip(self):
A, B = self.A(), self.B()
tgt = [[b'abc', b''],
[b'12345', b'MixedCase'],
[b'123 \t 345', b'UPPER']]
assert_(issubclass(A.strip().dtype.type, np.bytes_))
assert_array_equal(A.strip(), tgt)
tgt = [[b' abc ', b''],
[b'234', b'ixedCas'],
[b'23 \t 345 \x00', b'UPP']]
assert_array_equal(A.strip([b'15', b'EReM']), tgt)
tgt = [['\u03a3', ''],
['12345', 'MixedCase'],
['123 \t 345', 'UPPER']]
assert_(issubclass(B.strip().dtype.type, np.str_))
assert_array_equal(B.strip(), tgt)
def test_split(self):
A = self.A().split(b'3')
tgt = [
[[b' abc '], [b'']],
[[b'12', b'45'], [b'MixedCase']],
[[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
assert_(issubclass(A.dtype.type, np.object_))
assert_equal(A.tolist(), tgt)
def test_splitlines(self):
A = np.char.array(['abc\nfds\nwer']).splitlines()
assert_(issubclass(A.dtype.type, np.object_))
assert_(A.shape == (1,))
assert_(len(A[0]) == 3)
def test_swapcase(self):
A, B = self.A(), self.B()
tgt = [[b' ABC ', b''],
[b'12345', b'mIXEDcASE'],
[b'123 \t 345 \0 ', b'upper']]
assert_(issubclass(A.swapcase().dtype.type, np.bytes_))
assert_array_equal(A.swapcase(), tgt)
tgt = [[' \u03c3 ', ''],
['12345', 'mIXEDcASE'],
['123 \t 345 \0 ', 'upper']]
assert_(issubclass(B.swapcase().dtype.type, np.str_))
assert_array_equal(B.swapcase(), tgt)
def test_title(self):
A, B = self.A(), self.B()
tgt = [[b' Abc ', b''],
[b'12345', b'Mixedcase'],
[b'123 \t 345 \0 ', b'Upper']]
assert_(issubclass(A.title().dtype.type, np.bytes_))
assert_array_equal(A.title(), tgt)
tgt = [[' \u03a3 ', ''],
['12345', 'Mixedcase'],
['123 \t 345 \0 ', 'Upper']]
assert_(issubclass(B.title().dtype.type, np.str_))
assert_array_equal(B.title(), tgt)
def test_upper(self):
A, B = self.A(), self.B()
tgt = [[b' ABC ', b''],
[b'12345', b'MIXEDCASE'],
[b'123 \t 345 \0 ', b'UPPER']]
assert_(issubclass(A.upper().dtype.type, np.bytes_))
assert_array_equal(A.upper(), tgt)
tgt = [[' \u03a3 ', ''],
['12345', 'MIXEDCASE'],
['123 \t 345 \0 ', 'UPPER']]
assert_(issubclass(B.upper().dtype.type, np.str_))
assert_array_equal(B.upper(), tgt)
def test_isnumeric(self):
A, B = self.A(), self.B()
def fail():
A.isnumeric()
assert_raises(TypeError, fail)
assert_(issubclass(B.isnumeric().dtype.type, np.bool))
assert_array_equal(B.isnumeric(), [
[False, False], [True, False], [False, False]])
def test_isdecimal(self):
A, B = self.A(), self.B()
def fail():
A.isdecimal()
assert_raises(TypeError, fail)
assert_(issubclass(B.isdecimal().dtype.type, np.bool))
assert_array_equal(B.isdecimal(), [
[False, False], [True, False], [False, False]])
@ignore_charray_deprecation
class TestOperations:
def A(self):
return np.array([['abc', '123'],
['789', 'xyz']]).view(np.char.chararray)
def B(self):
return np.array([['efg', '456'],
['051', 'tuv']]).view(np.char.chararray)
def test_argsort(self):
arr = np.array(['abc'] * 4).view(np.char.chararray)
actual = arr.argsort(stable=True)
assert_array_equal(actual, [0, 1, 2, 3])
def test_add(self):
A, B = self.A(), self.B()
AB = np.array([['abcefg', '123456'],
['789051', 'xyztuv']]).view(np.char.chararray)
assert_array_equal(AB, (A + B))
assert_(len((A + B)[0][0]) == 6)
def test_radd(self):
A = self.A()
QA = np.array([['qabc', 'q123'],
['q789', 'qxyz']]).view(np.char.chararray)
assert_array_equal(QA, ('q' + A))
def test_mul(self):
A = self.A()
for r in (2, 3, 5, 7, 197):
Ar = np.array([[A[0, 0] * r, A[0, 1] * r],
[A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray)
assert_array_equal(Ar, (A * r))
for ob in [object(), 'qrs']:
with assert_raises_regex(ValueError,
'Can only multiply by integers'):
A * ob
def test_rmul(self):
A = self.A()
for r in (2, 3, 5, 7, 197):
Ar = np.array([[A[0, 0] * r, A[0, 1] * r],
[A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray)
assert_array_equal(Ar, (r * A))
for ob in [object(), 'qrs']:
with assert_raises_regex(ValueError,
'Can only multiply by integers'):
ob * A
def test_mod(self):
"""Ticket #856"""
F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.char.chararray)
C = np.array([[3, 7], [19, 1]], dtype=np.int64)
FC = np.array([['3', '7.000000'],
['19', 'np.int64(1)']]).view(np.char.chararray)
assert_array_equal(FC, F % C)
A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.char.chararray)
A1 = np.array([['1.000', '1'],
['1', repr(np.array(1)[()])]]).view(np.char.chararray)
assert_array_equal(A1, (A % 1))
A2 = np.array([['1.000', '2'],
['3', repr(np.array(4)[()])]]).view(np.char.chararray)
assert_array_equal(A2, (A % [[1, 2], [3, 4]]))
def test_rmod(self):
A = self.A()
assert_(f"{A}" == str(A))
assert_(f"{A!r}" == repr(A))
for ob in [42, object()]:
with assert_raises_regex(
TypeError, "unsupported operand type.* and 'chararray'"):
ob % A
def test_slice(self):
"""Regression test for https://github.com/numpy/numpy/issues/5982"""
arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']],
dtype='S4').view(np.char.chararray)
sl1 = arr[:]
assert_array_equal(sl1, arr)
assert_(sl1.base is arr)
assert_(sl1.base.base is arr.base)
sl2 = arr[:, :]
assert_array_equal(sl2, arr)
assert_(sl2.base is arr)
assert_(sl2.base.base is arr.base)
assert_(arr[0, 0] == b'abc')
@pytest.mark.parametrize('data', [['plate', ' ', 'shrimp'],
[b'retro', b' ', b'encabulator']])
def test_getitem_length_zero_item(self, data):
# Regression test for gh-26375.
a = np.char.array(data)
# a.dtype.type() will be an empty string or bytes instance.
# The equality test will fail if a[1] has the wrong type
# or does not have length 0.
assert_equal(a[1], a.dtype.type())
class TestMethodsEmptyArray:
def test_encode(self):
res = np.char.encode(np.array([], dtype='U'))
assert_array_equal(res, [])
assert_(res.dtype.char == 'S')
def test_decode(self):
res = np.char.decode(np.array([], dtype='S'))
assert_array_equal(res, [])
assert_(res.dtype.char == 'U')
def test_decode_with_reshape(self):
res = np.char.decode(np.array([], dtype='S').reshape((1, 0, 1)))
assert_(res.shape == (1, 0, 1))
class TestMethodsScalarValues:
def test_mod(self):
A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']], dtype='S')
tgt = [[b'123 abc ', b'123'],
[b'12312345', b'123MixedCase'],
[b'123123 \t 345 \0 ', b'123UPPER']]
assert_array_equal(np.char.mod(b"123%s", A), tgt)
def test_decode(self):
bytestring = b'\x81\xc1\x81\xc1\x81\xc1'
assert_equal(np.char.decode(bytestring, encoding='cp037'),
'aAaAaA')
def test_encode(self):
unicode = 'aAaAaA'
assert_equal(np.char.encode(unicode, encoding='cp037'),
b'\x81\xc1\x81\xc1\x81\xc1')
def test_expandtabs(self):
s = "\tone level of indentation\n\t\ttwo levels of indentation"
assert_equal(
np.char.expandtabs(s, tabsize=2),
" one level of indentation\n two levels of indentation"
)
def test_join(self):
seps = np.array(['-', '_'])
assert_array_equal(np.char.join(seps, 'hello'),
['h-e-l-l-o', 'h_e_l_l_o'])
def test_partition(self):
assert_equal(np.char.partition('This string', ' '),
['This', ' ', 'string'])
def test_rpartition(self):
assert_equal(np.char.rpartition('This string here', ' '),
['This string', ' ', 'here'])
def test_replace(self):
assert_equal(np.char.replace('Python is good', 'good', 'great'),
'Python is great')
@ignore_charray_deprecation
def test_empty_indexing():
"""Regression test for ticket 1948."""
# Check that indexing a chararray with an empty list/array returns an
# empty chararray instead of a chararray with a single empty string in it.
s = np.char.chararray((4,))
assert_(s[[]].size == 0) | python | github | https://github.com/numpy/numpy | numpy/_core/tests/test_defchararray.py |
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["ResponseMcpCallInProgress"]
class ResponseMcpCallInProgress(BaseModel):
"""Returned when an MCP tool call has started and is in progress."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the MCP tool call item."""
output_index: int
"""The index of the output item in the response."""
type: Literal["response.mcp_call.in_progress"]
"""The event type, must be `response.mcp_call.in_progress`.""" | python | github | https://github.com/openai/openai-python | src/openai/types/realtime/response_mcp_call_in_progress.py |
import attr
import importscan
import sentaku
from cfme.generic_objects.definition.button_groups import GenericObjectButtonGroupsCollection
from cfme.generic_objects.definition.button_groups import GenericObjectButtonsCollection
from cfme.generic_objects.instance import GenericObjectInstanceCollection
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.utils.update import Updateable
@attr.s
class GenericObjectDefinition(BaseEntity, Updateable, sentaku.modeling.ElementMixin):
"""Generic Objects Definition class to context switch between UI and REST.
Read/Update/Delete functionality.
"""
_collections = {
'generic_objects': GenericObjectInstanceCollection,
'generic_object_groups_buttons': GenericObjectButtonGroupsCollection,
'generic_object_buttons': GenericObjectButtonsCollection
}
update = sentaku.ContextualMethod()
delete = sentaku.ContextualMethod()
exists = sentaku.ContextualProperty()
add_button = sentaku.ContextualMethod()
add_button_group = sentaku.ContextualMethod()
generic_objects = sentaku.ContextualProperty()
generic_object_buttons = sentaku.ContextualProperty()
instance_count = sentaku.ContextualProperty()
name = attr.ib()
description = attr.ib()
attributes = attr.ib(default=None) # e.g. {'address': 'string'}
associations = attr.ib(default=None) # e.g. {'services': 'Service'}
methods = attr.ib(default=None) # e.g. ['method1', 'method2']
custom_image_file_path = attr.ib(default=None)
rest_response = attr.ib(default=None, init=False)
@attr.s
class GenericObjectDefinitionCollection(BaseCollection, sentaku.modeling.ElementMixin):
ENTITY = GenericObjectDefinition
create = sentaku.ContextualMethod()
all = sentaku.ContextualMethod()
from cfme.generic_objects.definition import rest, ui # NOQA last for import cycles
importscan.scan(rest)
importscan.scan(ui) | unknown | codeparrot/codeparrot-clean | ||
---
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/plugins/current/analysis-icu-normalization.html
---
# ICU normalization token filter [analysis-icu-normalization]
Normalizes characters as explained [here](https://unicode-org.github.io/icu/userguide/transforms/normalization/). It registers itself as the `icu_normalizer` token filter, which is available to all indices without any further configuration. The type of normalization can be specified with the `name` parameter, which accepts `nfc`, `nfkc`, and `nfkc_cf` (default).
Which letters are normalized can be controlled by specifying the `unicode_set_filter` parameter, which accepts a [UnicodeSet](https://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.md).
You should probably prefer the [Normalization character filter](/reference/elasticsearch-plugins/analysis-icu-normalization-charfilter.md).
Here are two examples, the default usage and a customised token filter:
```console
PUT icu_sample
{
"settings": {
"index": {
"analysis": {
"analyzer": {
"nfkc_cf_normalized": { <1>
"tokenizer": "icu_tokenizer",
"filter": [
"icu_normalizer"
]
},
"nfc_normalized": { <2>
"tokenizer": "icu_tokenizer",
"filter": [
"nfc_normalizer"
]
}
},
"filter": {
"nfc_normalizer": {
"type": "icu_normalizer",
"name": "nfc"
}
}
}
}
}
}
```
1. Uses the default `nfkc_cf` normalization.
2. Uses the customized `nfc_normalizer` token filter, which is set to use `nfc` normalization. | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/elasticsearch-plugins/analysis-icu-normalization.md |
"""
This module parse an UPnP device's XML definition in an Object.
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>}
@version: 0.1.0
"""
__revision__ = "$id"
from xml.dom import minidom
import logging
# Allowed UPnP services to use when mapping ports/external addresses
WANSERVICES = ['urn:schemas-upnp-org:service:WANIPConnection:1',
'urn:schemas-upnp-org:service:WANPPPConnection:1']
class UPnPXml:
"""
This objects parses the XML definition, and stores the useful
results in attributes.
The device infos dictionnary may contain the following keys:
- friendlyname: A friendly name to call the device.
- manufacturer: A manufacturer name for the device.
Here are the different attributes:
- deviceinfos: A dictionnary of device infos as defined above.
- controlurl: The control url, this is the url to use when sending SOAP
requests to the device, relative to the base url.
- wanservice: The WAN service to be used, one of the L{WANSERVICES}
- urlbase: The base url to use when talking in SOAP to the device.
The full url to use is obtained by urljoin(urlbase, controlurl)
"""
def __init__(self, xml):
"""
Parse the given XML string for UPnP infos. This creates the attributes
when they are found, or None if no value was found.
@param xml: a xml string to parse
"""
logging.debug("Got UPNP Xml description:\n%s", xml)
doc = minidom.parseString(xml)
# Fetch various device info
self.deviceinfos = {}
try:
attributes = {
'friendlyname':'friendlyName',
'manufacturer' : 'manufacturer'
}
device = doc.getElementsByTagName('device')[0]
for name, tag in attributes.iteritems():
try:
self.deviceinfos[name] = device.getElementsByTagName(
tag)[0].firstChild.datas.encode('utf-8')
except:
pass
except:
pass
# Fetch device control url
self.controlurl = None
self.wanservice = None
for service in doc.getElementsByTagName('service'):
try:
stype = service.getElementsByTagName(
'serviceType')[0].firstChild.data.encode('utf-8')
if stype in WANSERVICES:
self.controlurl = service.getElementsByTagName(
'controlURL')[0].firstChild.data.encode('utf-8')
self.wanservice = stype
break
except:
pass
# Find base url
self.urlbase = None
try:
self.urlbase = doc.getElementsByTagName(
'URLBase')[0].firstChild.data.encode('utf-8')
except:
pass | unknown | codeparrot/codeparrot-clean | ||
#include <c10/util/Exception.h>
#include <c10/util/ThreadLocal.h>
#include <c10/util/ThreadLocalDebugInfo.h>
#include <utility>
namespace c10 {
C10_DEFINE_TLS_static(std::shared_ptr<ThreadLocalDebugInfo>, tls_debug_info);
#define debug_info (tls_debug_info.get())
/* static */
DebugInfoBase* ThreadLocalDebugInfo::get(DebugInfoKind kind) {
ThreadLocalDebugInfo* cur = debug_info.get();
while (cur) {
if (cur->kind_ == kind) {
return cur->info_.get();
}
cur = cur->parent_info_.get();
}
return nullptr;
}
/* static */
std::shared_ptr<ThreadLocalDebugInfo> ThreadLocalDebugInfo::current() {
return debug_info;
}
/* static */
void ThreadLocalDebugInfo::_forceCurrentDebugInfo(
std::shared_ptr<ThreadLocalDebugInfo> info) {
debug_info = std::move(info);
}
/* static */
void ThreadLocalDebugInfo::_push(
DebugInfoKind kind,
std::shared_ptr<DebugInfoBase> info) {
auto prev_info = debug_info;
debug_info = std::make_shared<ThreadLocalDebugInfo>();
debug_info->parent_info_ = prev_info;
debug_info->kind_ = kind;
debug_info->info_ = std::move(info);
}
/* static */
std::shared_ptr<DebugInfoBase> ThreadLocalDebugInfo::_pop(DebugInfoKind kind) {
TORCH_CHECK(
debug_info && debug_info->kind_ == kind,
"Expected debug info of type ",
(size_t)kind);
auto res = debug_info;
debug_info = debug_info->parent_info_;
return res->info_;
}
/* static */
std::shared_ptr<DebugInfoBase> ThreadLocalDebugInfo::_peek(DebugInfoKind kind) {
TORCH_CHECK(
debug_info && debug_info->kind_ == kind,
"Expected debug info of type ",
(size_t)kind);
return debug_info->info_;
}
DebugInfoGuard::DebugInfoGuard(
DebugInfoKind kind,
std::shared_ptr<DebugInfoBase> info) {
if (!info) {
return;
}
prev_info_ = debug_info;
ThreadLocalDebugInfo::_push(kind, std::move(info));
active_ = true;
}
DebugInfoGuard::~DebugInfoGuard() {
if (active_) {
debug_info = prev_info_;
}
}
// Used only for setting a debug info after crossing the thread boundary;
// in this case we assume that thread pool's thread does not have an
// active debug info
DebugInfoGuard::DebugInfoGuard(std::shared_ptr<ThreadLocalDebugInfo> info) {
if (!info) {
return;
}
prev_info_ = std::move(debug_info);
debug_info = std::move(info);
active_ = true;
}
} // namespace c10 | cpp | github | https://github.com/pytorch/pytorch | c10/util/ThreadLocalDebugInfo.cpp |
{
"compileOnSave": false,
"compilerOptions": {
"baseUrl": "./",
"outDir": "./dist/out-tsc",
"sourceMap": true,
"esModuleInterop": true,
"declaration": false,
"experimentalDecorators": true,
"module": "es2022",
"moduleResolution": "bundler",
"importHelpers": true,
"target": "es2022",
"typeRoots": ["node_modules/@types"],
"lib": ["es2018", "dom"]
},
"angularCompilerOptions": {
"fullTemplateTypeCheck": true,
"strictInjectionParameters": true
}
} | json | github | https://github.com/angular/angular | integration/cli-hello-world-ivy-i18n/tsconfig.json |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.misc.crashdialog."""
from qutebrowser.misc import crashdialog
VALID_CRASH_TEXT = """
Fatal Python error: Segmentation fault
_
Current thread 0x00007f09b538d700 (most recent call first):
File "", line 1 in testfunc
File "filename", line 88 in func
"""
VALID_CRASH_TEXT_EMPTY = """
Fatal Python error: Aborted
_
Current thread 0x00007f09b538d700 (most recent call first):
File "", line 1 in_
File "filename", line 88 in func
"""
VALID_CRASH_TEXT_THREAD = """
Fatal Python error: Segmentation fault
_
Thread 0x00007fa135ac7700 (most recent call first):
File "", line 1 in testfunc
"""
INVALID_CRASH_TEXT = """
Hello world!
"""
class TestParseFatalStacktrace:
"""Tests for parse_fatal_stacktrace."""
def test_valid_text(self):
"""Test parse_fatal_stacktrace with a valid text."""
text = VALID_CRASH_TEXT.strip().replace('_', ' ')
typ, func = crashdialog.parse_fatal_stacktrace(text)
assert (typ, func) == ("Segmentation fault", 'testfunc')
def test_valid_text_thread(self):
"""Test parse_fatal_stacktrace with a valid text #2."""
text = VALID_CRASH_TEXT_THREAD.strip().replace('_', ' ')
typ, func = crashdialog.parse_fatal_stacktrace(text)
assert (typ, func) == ("Segmentation fault", 'testfunc')
def test_valid_text_empty(self):
"""Test parse_fatal_stacktrace with a valid text but empty function."""
text = VALID_CRASH_TEXT_EMPTY.strip().replace('_', ' ')
typ, func = crashdialog.parse_fatal_stacktrace(text)
assert (typ, func) == ('Aborted', '')
def test_invalid_text(self):
"""Test parse_fatal_stacktrace with an invalid text."""
text = INVALID_CRASH_TEXT.strip().replace('_', ' ')
typ, func = crashdialog.parse_fatal_stacktrace(text)
assert (typ, func) == ('', '') | unknown | codeparrot/codeparrot-clean | ||
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Assets Management',
'version': '1.0',
'depends': ['account'],
'author': 'OpenERP S.A.',
'description': """
Financial and accounting asset management.
==========================================
This Module manages the assets owned by a company or an individual. It will keep
track of depreciation's occurred on those assets. And it allows to create Move's
of the depreciation lines.
""",
'website': 'https://www.odoo.com/page/accounting',
'category': 'Accounting & Finance',
'sequence': 32,
'demo': [ 'account_asset_demo.xml'],
'test': [
'test/account_asset_demo.yml',
'test/account_asset.yml',
'test/account_asset_wizard.yml',
],
'data': [
'security/account_asset_security.xml',
'security/ir.model.access.csv',
'wizard/account_asset_change_duration_view.xml',
'wizard/wizard_asset_compute_view.xml',
'account_asset_view.xml',
'account_asset_invoice_view.xml',
'report/account_asset_report_view.xml',
],
'auto_install': False,
'installable': True,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#!/bin/bash
# Script used only in CD pipeline
set -eux
ACL_VERSION=${ACL_VERSION:-"v52.6.0"}
ACL_INSTALL_DIR="/acl"
# Clone ACL
git clone https://github.com/ARM-software/ComputeLibrary.git -b "${ACL_VERSION}" --depth 1 --shallow-submodules
ACL_CHECKOUT_DIR="ComputeLibrary"
# Build with scons
pushd $ACL_CHECKOUT_DIR
scons -j8 Werror=0 debug=0 neon=1 opencl=0 embed_kernels=0 \
os=linux arch=armv8a build=native multi_isa=1 \
fixed_format_kernels=1 openmp=1 cppthreads=0
popd
# Install ACL
sudo mkdir -p ${ACL_INSTALL_DIR}
for d in arm_compute include utils support src build
do
sudo cp -r ${ACL_CHECKOUT_DIR}/${d} ${ACL_INSTALL_DIR}/${d}
done
rm -rf $ACL_CHECKOUT_DIR | unknown | github | https://github.com/pytorch/pytorch | .ci/docker/common/install_acl.sh |
//===--- ArgsToFrontendOutputsConverter.cpp -------------------------------===//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "ArgsToFrontendOutputsConverter.h"
#include "ArgsToFrontendInputsConverter.h"
#include "ArgsToFrontendOptionsConverter.h"
#include "swift/AST/DiagnosticsFrontend.h"
#include "swift/Basic/Assertions.h"
#include "swift/Basic/OutputFileMap.h"
#include "swift/Basic/Platform.h"
#include "swift/Frontend/Frontend.h"
#include "swift/Option/Options.h"
#include "swift/Option/SanitizerOptions.h"
#include "swift/Strings.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/TargetParser/Triple.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/Path.h"
using namespace swift;
using namespace llvm::opt;
bool ArgsToFrontendOutputsConverter::convert(
std::vector<std::string> &mainOutputs,
std::vector<std::string> &mainOutputsForIndexUnits,
std::vector<SupplementaryOutputPaths> &supplementaryOutputs) {
std::optional<OutputFilesComputer> ofc = OutputFilesComputer::create(
Args, Diags, InputsAndOutputs,
{"output", options::OPT_o, options::OPT_output_filelist, "-o"});
if (!ofc)
return true;
std::optional<std::vector<std::string>> mains = ofc->computeOutputFiles();
if (!mains)
return true;
std::optional<std::vector<std::string>> indexMains;
if (Args.hasArg(options::OPT_index_unit_output_path,
options::OPT_index_unit_output_path_filelist)) {
std::optional<OutputFilesComputer> iuofc = OutputFilesComputer::create(
Args, Diags, InputsAndOutputs,
{"index unit output path", options::OPT_index_unit_output_path,
options::OPT_index_unit_output_path_filelist,
"-index-unit-output-path"});
if (!iuofc)
return true;
indexMains = iuofc->computeOutputFiles();
if (!indexMains)
return true;
assert(mains->size() == indexMains->size() && "checks not equivalent?");
}
std::optional<std::vector<SupplementaryOutputPaths>> supplementaries =
SupplementaryOutputPathsComputer(Args, Diags, InputsAndOutputs, *mains,
ModuleName)
.computeOutputPaths();
if (!supplementaries)
return true;
mainOutputs = std::move(*mains);
if (indexMains)
mainOutputsForIndexUnits = std::move(*indexMains);
supplementaryOutputs = std::move(*supplementaries);
return false;
}
std::optional<std::vector<std::string>>
ArgsToFrontendOutputsConverter::readOutputFileList(const StringRef filelistPath,
DiagnosticEngine &diags) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> buffer =
llvm::MemoryBuffer::getFile(filelistPath);
if (!buffer) {
diags.diagnose(SourceLoc(), diag::cannot_open_file, filelistPath,
buffer.getError().message());
return std::nullopt;
}
std::vector<std::string> outputFiles;
for (StringRef line : make_range(llvm::line_iterator(*buffer.get()), {})) {
outputFiles.push_back(line.str());
}
return outputFiles;
}
std::optional<std::vector<std::string>>
OutputFilesComputer::getOutputFilenamesFromCommandLineOrFilelist(
const ArgList &args, DiagnosticEngine &diags, options::ID singleOpt,
options::ID filelistOpt) {
if (const Arg *A = args.getLastArg(filelistOpt)) {
assert(!args.hasArg(singleOpt) &&
"don't use -o with -output-filelist or -index-unit-output-path with "
" -index-unit-output-filelist");
return ArgsToFrontendOutputsConverter::readOutputFileList(A->getValue(),
diags);
}
return args.getAllArgValues(singleOpt);
}
std::optional<OutputFilesComputer> OutputFilesComputer::create(
const llvm::opt::ArgList &args, DiagnosticEngine &diags,
const FrontendInputsAndOutputs &inputsAndOutputs, OutputOptInfo optInfo) {
std::optional<std::vector<std::string>> outputArguments =
getOutputFilenamesFromCommandLineOrFilelist(args, diags, optInfo.SingleID,
optInfo.FilelistID);
if (!outputArguments)
return std::nullopt;
const StringRef outputDirectoryArgument =
outputArguments->size() == 1 &&
llvm::sys::fs::is_directory(outputArguments->front())
? StringRef(outputArguments->front())
: StringRef();
ArrayRef<std::string> outputFileArguments =
outputDirectoryArgument.empty() ? ArrayRef<std::string>(*outputArguments)
: ArrayRef<std::string>();
const StringRef firstInput =
inputsAndOutputs.hasSingleInput()
? StringRef(inputsAndOutputs.getFilenameOfFirstInput())
: StringRef();
const FrontendOptions::ActionType requestedAction =
ArgsToFrontendOptionsConverter::determineRequestedAction(args);
if (!outputFileArguments.empty() &&
outputFileArguments.size() !=
inputsAndOutputs.countOfInputsProducingMainOutputs()) {
diags.diagnose(
SourceLoc(),
diag::error_if_any_output_files_are_specified_they_all_must_be,
optInfo.PrettyName);
return std::nullopt;
}
const file_types::ID outputType =
FrontendOptions::formatForPrincipalOutputFileForAction(requestedAction);
return OutputFilesComputer(
diags, inputsAndOutputs, std::move(outputFileArguments),
outputDirectoryArgument, firstInput, requestedAction,
args.getLastArg(options::OPT_module_name),
file_types::getExtension(outputType),
FrontendOptions::doesActionProduceTextualOutput(requestedAction),
optInfo);
}
OutputFilesComputer::OutputFilesComputer(
DiagnosticEngine &diags,
const FrontendInputsAndOutputs &inputsAndOutputs,
std::vector<std::string> outputFileArguments,
const StringRef outputDirectoryArgument, const StringRef firstInput,
const FrontendOptions::ActionType requestedAction,
const llvm::opt::Arg *moduleNameArg, const StringRef suffix,
const bool hasTextualOutput, OutputOptInfo optInfo)
: Diags(diags), InputsAndOutputs(inputsAndOutputs),
OutputFileArguments(outputFileArguments),
OutputDirectoryArgument(outputDirectoryArgument), FirstInput(firstInput),
RequestedAction(requestedAction), ModuleNameArg(moduleNameArg),
Suffix(suffix), HasTextualOutput(hasTextualOutput),
OutputInfo(optInfo) {}
std::optional<std::vector<std::string>>
OutputFilesComputer::computeOutputFiles() const {
std::vector<std::string> outputFiles;
unsigned i = 0;
bool hadError = InputsAndOutputs.forEachInputProducingAMainOutputFile(
[&](const InputFile &input) -> bool {
StringRef outputArg = OutputFileArguments.empty()
? StringRef()
: StringRef(OutputFileArguments[i++]);
std::optional<std::string> outputFile =
computeOutputFile(outputArg, input);
if (!outputFile)
return true;
outputFiles.push_back(*outputFile);
return false;
});
return hadError ? std::nullopt
: std::optional<std::vector<std::string>>(outputFiles);
}
std::optional<std::string>
OutputFilesComputer::computeOutputFile(StringRef outputArg,
const InputFile &input) const {
// Return an empty string to signify no output.
// The frontend does not currently produce a diagnostic
// if a -o argument is present for such an action
// for instance swiftc -frontend -o foo -interpret foo.swift
if (!FrontendOptions::doesActionProduceOutput(RequestedAction))
return std::string();
if (!OutputDirectoryArgument.empty())
return deriveOutputFileForDirectory(input);
if (!outputArg.empty())
return outputArg.str();
return deriveOutputFileFromInput(input);
}
std::optional<std::string>
OutputFilesComputer::deriveOutputFileFromInput(const InputFile &input) const {
if (input.getFileName() == "-" || HasTextualOutput)
return std::string("-");
std::string baseName = determineBaseNameOfOutput(input);
if (baseName.empty()) {
// Assuming FrontendOptions::doesActionProduceOutput(RequestedAction)
Diags.diagnose(SourceLoc(), diag::error_no_output_filename_specified,
OutputInfo.PrettyName);
return std::nullopt;
}
return deriveOutputFileFromParts("", baseName);
}
std::optional<std::string> OutputFilesComputer::deriveOutputFileForDirectory(
const InputFile &input) const {
std::string baseName = determineBaseNameOfOutput(input);
if (baseName.empty()) {
Diags.diagnose(SourceLoc(), diag::error_implicit_output_file_is_directory,
OutputDirectoryArgument, OutputInfo.SingleOptSpelling);
return std::nullopt;
}
return deriveOutputFileFromParts(OutputDirectoryArgument, baseName);
}
std::string
OutputFilesComputer::determineBaseNameOfOutput(const InputFile &input) const {
std::string nameToStem =
input.isPrimary()
? input.getFileName()
: ModuleNameArg ? ModuleNameArg->getValue() : FirstInput;
return llvm::sys::path::stem(nameToStem).str();
}
std::string
OutputFilesComputer::deriveOutputFileFromParts(StringRef dir,
StringRef base) const {
assert(!base.empty());
llvm::SmallString<128> path(dir);
llvm::sys::path::append(path, base);
llvm::sys::path::replace_extension(path, Suffix);
return std::string(path.str());
}
SupplementaryOutputPathsComputer::SupplementaryOutputPathsComputer(
const ArgList &args, DiagnosticEngine &diags,
const FrontendInputsAndOutputs &inputsAndOutputs,
ArrayRef<std::string> outputFiles, StringRef moduleName)
: Args(args), Diags(diags), InputsAndOutputs(inputsAndOutputs),
OutputFiles(outputFiles), ModuleName(moduleName),
RequestedAction(
ArgsToFrontendOptionsConverter::determineRequestedAction(Args)) {}
std::optional<std::vector<SupplementaryOutputPaths>>
SupplementaryOutputPathsComputer::computeOutputPaths() const {
std::optional<std::vector<SupplementaryOutputPaths>> pathsFromUser =
Args.hasArg(options::OPT_supplementary_output_file_map)
? readSupplementaryOutputFileMap()
: getSupplementaryOutputPathsFromArguments();
if (!pathsFromUser)
return std::nullopt;
if (InputsAndOutputs.hasPrimaryInputs())
assert(OutputFiles.size() == pathsFromUser->size());
else {
if (!InputsAndOutputs.isSingleThreadedWMO()) {
assert(OutputFiles.size() == InputsAndOutputs.inputCount());
}
assert(pathsFromUser->size() == 1 ||
pathsFromUser->size() == InputsAndOutputs.inputCount());
}
// For other cases, process the paths normally
std::vector<SupplementaryOutputPaths> outputPaths;
unsigned i = 0;
bool hadError = false;
// In multi-threaded WMO with supplementary output file map, we have paths
// for all inputs, so process them all through computeOutputPathsForOneInput
if (!InputsAndOutputs.hasPrimaryInputs() && OutputFiles.size() > 1 &&
pathsFromUser->size() == InputsAndOutputs.inputCount()) {
hadError = InputsAndOutputs.forEachInput([&](const InputFile &input) -> bool {
if (auto suppPaths = computeOutputPathsForOneInput(
OutputFiles[i], (*pathsFromUser)[i], input)) {
++i;
outputPaths.push_back(*suppPaths);
return false;
}
return true;
});
} else {
// Standard path: process inputs that produce supplementary output
hadError = InputsAndOutputs.forEachInputProducingSupplementaryOutput(
[&](const InputFile &input) -> bool {
if (auto suppPaths = computeOutputPathsForOneInput(
OutputFiles[i], (*pathsFromUser)[i], input)) {
++i;
outputPaths.push_back(*suppPaths);
return false;
}
return true;
});
}
if (hadError)
return std::nullopt;
// In WMO mode without supplementary output file map, compute supplementary
// output paths for optimization records for inputs beyond the first one.
if (!InputsAndOutputs.hasPrimaryInputs() && OutputFiles.size() > 1 &&
pathsFromUser->size() != InputsAndOutputs.inputCount()) {
unsigned i = 0;
InputsAndOutputs.forEachInput([&](const InputFile &input) -> bool {
// First input is already computed.
if (InputsAndOutputs.firstInput().getFileName() == input.getFileName()) {
++i;
return false;
}
SupplementaryOutputPaths outputs;
// Compute auxiliar opt record paths.
if(OutputFiles.size() > 1) {
StringRef defaultSupplementaryOutputPathExcludingExtension =
deriveDefaultSupplementaryOutputPathExcludingExtension(
OutputFiles[i], input);
auto YAMLOptRecordPath = determineSupplementaryOutputFilename(
options::OPT_save_optimization_record,
"",
file_types::TY_YAMLOptRecord, "",
defaultSupplementaryOutputPathExcludingExtension, true);
outputs.YAMLOptRecordPath = YAMLOptRecordPath;
auto bitstreamOptRecordPath = determineSupplementaryOutputFilename(
options::OPT_save_optimization_record,
"",
file_types::TY_BitstreamOptRecord, "",
defaultSupplementaryOutputPathExcludingExtension, true);
outputs.BitstreamOptRecordPath = bitstreamOptRecordPath;
}
outputPaths.emplace_back(std::move(outputs));
++i;
return false;
});
}
return outputPaths;
}
std::optional<std::vector<SupplementaryOutputPaths>>
SupplementaryOutputPathsComputer::getSupplementaryOutputPathsFromArguments()
const {
auto clangHeaderOutput = getSupplementaryFilenamesFromArguments(
options::OPT_emit_objc_header_path);
auto moduleOutput =
getSupplementaryFilenamesFromArguments(options::OPT_emit_module_path);
auto moduleDocOutput =
getSupplementaryFilenamesFromArguments(options::OPT_emit_module_doc_path);
auto dependenciesFile = getSupplementaryFilenamesFromArguments(
options::OPT_emit_dependencies_path);
auto referenceDependenciesFile = getSupplementaryFilenamesFromArguments(
options::OPT_emit_reference_dependencies_path);
auto serializedDiagnostics = getSupplementaryFilenamesFromArguments(
options::OPT_serialize_diagnostics_path);
auto loadedModuleTrace = getSupplementaryFilenamesFromArguments(
options::OPT_emit_loaded_module_trace_path);
auto TBD = getSupplementaryFilenamesFromArguments(options::OPT_emit_tbd_path);
auto moduleInterfaceOutput = getSupplementaryFilenamesFromArguments(
options::OPT_emit_module_interface_path);
auto privateModuleInterfaceOutput = getSupplementaryFilenamesFromArguments(
options::OPT_emit_private_module_interface_path);
auto packageModuleInterfaceOutput = getSupplementaryFilenamesFromArguments(options::OPT_emit_package_module_interface_path);
auto moduleSourceInfoOutput = getSupplementaryFilenamesFromArguments(
options::OPT_emit_module_source_info_path);
auto moduleSummaryOutput = getSupplementaryFilenamesFromArguments(
options::OPT_emit_module_summary_path);
auto abiDescriptorOutput = getSupplementaryFilenamesFromArguments(
options::OPT_emit_abi_descriptor_path);
auto apiDescriptorOutput = getSupplementaryFilenamesFromArguments(
options::OPT_emit_api_descriptor_path);
auto constValuesOutput = getSupplementaryFilenamesFromArguments(
options::OPT_emit_const_values_path);
auto moduleSemanticInfoOutput = getSupplementaryFilenamesFromArguments(
options::OPT_emit_module_semantic_info_path);
auto optRecordOutput = getSupplementaryFilenamesFromArguments(
options::OPT_save_optimization_record_path);
auto silOutput =
getSupplementaryFilenamesFromArguments(options::OPT_sil_output_path);
auto irOutput =
getSupplementaryFilenamesFromArguments(options::OPT_ir_output_path);
if (!clangHeaderOutput || !moduleOutput || !moduleDocOutput ||
!dependenciesFile || !referenceDependenciesFile ||
!serializedDiagnostics || !loadedModuleTrace || !TBD ||
!moduleInterfaceOutput || !privateModuleInterfaceOutput ||
!packageModuleInterfaceOutput || !moduleSourceInfoOutput ||
!moduleSummaryOutput || !abiDescriptorOutput ||
!moduleSemanticInfoOutput || !optRecordOutput || !silOutput ||
!irOutput) {
return std::nullopt;
}
std::vector<SupplementaryOutputPaths> result;
// In WMO mode with multiple IR output paths, we need to create one
// SupplementaryOutputPaths per input file, not just one for the module
unsigned N = InputsAndOutputs.countOfFilesProducingSupplementaryOutput();
if (!InputsAndOutputs.hasPrimaryInputs() && irOutput->size() > 1) {
// WMO mode with multiple IR outputs: use input count instead of 1
N = InputsAndOutputs.inputCount();
}
// Find the index of SIL output path matching module name
auto findSILIndexForModuleName = [&]() -> unsigned {
if (!InputsAndOutputs.hasPrimaryInputs() && silOutput->size() > 1) {
// In WMO mode with multiple SIL output paths, find the one whose matches
// module name
for (unsigned i = 0; i < silOutput->size(); ++i) {
StringRef silPath = (*silOutput)[i];
if (!silPath.empty()) {
StringRef basename = llvm::sys::path::stem(silPath);
if (basename == ModuleName) {
return i;
}
}
}
// If no match found, fall back to first
return 0;
}
return 0;
};
unsigned silOutputIndex = findSILIndexForModuleName();
for (unsigned i = 0; i < N; ++i) {
SupplementaryOutputPaths sop;
// In multi-threaded WMO with multiple IR outputs, most supplementary outputs
// are per-module (size 1), only IR is per-file. Use index 0 for module outputs.
unsigned moduleIndex = (!InputsAndOutputs.hasPrimaryInputs() && irOutput->size() > 1) ? 0 : i;
sop.ClangHeaderOutputPath = (*clangHeaderOutput)[moduleIndex];
sop.ModuleOutputPath = (*moduleOutput)[moduleIndex];
sop.ModuleDocOutputPath = (*moduleDocOutput)[moduleIndex];
sop.DependenciesFilePath = (*dependenciesFile)[moduleIndex];
sop.ReferenceDependenciesFilePath = (*referenceDependenciesFile)[moduleIndex];
sop.SerializedDiagnosticsPath = (*serializedDiagnostics)[moduleIndex];
sop.LoadedModuleTracePath = (*loadedModuleTrace)[moduleIndex];
sop.TBDPath = (*TBD)[moduleIndex];
sop.ModuleInterfaceOutputPath = (*moduleInterfaceOutput)[moduleIndex];
sop.PrivateModuleInterfaceOutputPath = (*privateModuleInterfaceOutput)[moduleIndex];
sop.PackageModuleInterfaceOutputPath = (*packageModuleInterfaceOutput)[moduleIndex];
sop.ModuleSourceInfoOutputPath = (*moduleSourceInfoOutput)[moduleIndex];
sop.ModuleSummaryOutputPath = (*moduleSummaryOutput)[moduleIndex];
sop.ABIDescriptorOutputPath = (*abiDescriptorOutput)[moduleIndex];
sop.APIDescriptorOutputPath = (*apiDescriptorOutput)[moduleIndex];
sop.ConstValuesOutputPath = (*constValuesOutput)[moduleIndex];
sop.ModuleSemanticInfoOutputPath = (*moduleSemanticInfoOutput)[moduleIndex];
// Optimization record paths are per-file in multi-threaded WMO, like IR
sop.YAMLOptRecordPath = (*optRecordOutput)[i];
sop.BitstreamOptRecordPath = (*optRecordOutput)[i];
sop.SILOutputPath = (*silOutput)[silOutputIndex];
sop.LLVMIROutputPath = (*irOutput)[i];
result.push_back(sop);
}
return result;
}
// Extend this routine for filelists if/when we have them.
std::optional<std::vector<std::string>>
SupplementaryOutputPathsComputer::getSupplementaryFilenamesFromArguments(
options::ID pathID) const {
std::vector<std::string> paths = Args.getAllArgValues(pathID);
const unsigned N =
InputsAndOutputs.countOfFilesProducingSupplementaryOutput();
if (paths.size() == N)
return paths;
else if (pathID == options::OPT_emit_loaded_module_trace_path &&
paths.size() < N) {
// We only need one file to output the module trace file because they
// are all equivalent. Add additional empty output paths for module trace to
// make sure the compiler won't panic for diag::error_wrong_number_of_arguments.
for(unsigned I = paths.size(); I != N; I ++)
paths.emplace_back();
return paths;
}
// Special handling for IR and optimization record output paths: allow multiple paths per file
// type. Note: SIL is NOT included here because in WMO mode, SIL is generated once
// per module, not per source file.
else if ((pathID == options::OPT_ir_output_path ||
pathID == options::OPT_save_optimization_record_path) &&
paths.size() > N) {
// For parallel compilation, we can have multiple IR/opt-record output paths
// so return all the paths.
return paths;
}
if (paths.empty()) {
// For IR and optimization records in multi-threaded WMO, we need one entry per input file.
// Check if WMO is enabled and we have multiple output files (multi-threaded WMO).
if ((pathID == options::OPT_ir_output_path ||
pathID == options::OPT_save_optimization_record_path) &&
Args.hasArg(options::OPT_whole_module_optimization) &&
OutputFiles.size() > 1) {
return std::vector<std::string>(OutputFiles.size(), std::string());
}
return std::vector<std::string>(N, std::string());
}
Diags.diagnose(SourceLoc(), diag::error_wrong_number_of_arguments,
Args.getLastArg(pathID)->getOption().getPrefixedName(), N,
paths.size());
return std::nullopt;
}
static bool shouldEmitFineModuleTrace(FrontendOptions::ActionType action) {
// Only full compilation jobs should emit fine module tracing file.
// Other partial compilation jobs, such as emitting modules, only typecheck partially
// so walking into every function bodies may be risky.
switch(action) {
case swift::FrontendOptions::ActionType::Typecheck:
case swift::FrontendOptions::ActionType::EmitSILGen:
case swift::FrontendOptions::ActionType::EmitSIL:
case swift::FrontendOptions::ActionType::EmitAssembly:
case swift::FrontendOptions::ActionType::EmitLoweredSIL:
case swift::FrontendOptions::ActionType::EmitIRGen:
case swift::FrontendOptions::ActionType::EmitIR:
case swift::FrontendOptions::ActionType::EmitBC:
case swift::FrontendOptions::ActionType::EmitObject:
return true;
case swift::FrontendOptions::ActionType::NoneAction:
case swift::FrontendOptions::ActionType::Parse:
case swift::FrontendOptions::ActionType::ResolveImports:
case swift::FrontendOptions::ActionType::DumpParse:
case swift::FrontendOptions::ActionType::DumpInterfaceHash:
case swift::FrontendOptions::ActionType::DumpAST:
case swift::FrontendOptions::ActionType::PrintAST:
case swift::FrontendOptions::ActionType::PrintASTDecl:
case swift::FrontendOptions::ActionType::DumpScopeMaps:
case swift::FrontendOptions::ActionType::EmitImportedModules:
case swift::FrontendOptions::ActionType::EmitPCH:
case swift::FrontendOptions::ActionType::EmitModuleOnly:
case swift::FrontendOptions::ActionType::MergeModules:
case swift::FrontendOptions::ActionType::CompileModuleFromInterface:
case swift::FrontendOptions::ActionType::TypecheckModuleFromInterface:
case swift::FrontendOptions::ActionType::EmitSIBGen:
case swift::FrontendOptions::ActionType::EmitSIB:
case swift::FrontendOptions::ActionType::Immediate:
case swift::FrontendOptions::ActionType::REPL:
case swift::FrontendOptions::ActionType::DumpTypeInfo:
case swift::FrontendOptions::ActionType::EmitPCM:
case swift::FrontendOptions::ActionType::DumpPCM:
case swift::FrontendOptions::ActionType::ScanDependencies:
case swift::FrontendOptions::ActionType::PrintVersion:
case swift::FrontendOptions::ActionType::PrintArguments:
return false;
}
}
std::optional<SupplementaryOutputPaths>
SupplementaryOutputPathsComputer::computeOutputPathsForOneInput(
StringRef outputFile, const SupplementaryOutputPaths &pathsFromArguments,
const InputFile &input) const {
StringRef defaultSupplementaryOutputPathExcludingExtension =
deriveDefaultSupplementaryOutputPathExcludingExtension(outputFile, input);
using namespace options;
auto dependenciesFilePath = determineSupplementaryOutputFilename(
OPT_emit_dependencies, pathsFromArguments.DependenciesFilePath,
file_types::TY_Dependencies, "",
defaultSupplementaryOutputPathExcludingExtension);
auto referenceDependenciesFilePath = determineSupplementaryOutputFilename(
OPT_emit_reference_dependencies,
pathsFromArguments.ReferenceDependenciesFilePath,
file_types::TY_SwiftDeps, "",
defaultSupplementaryOutputPathExcludingExtension);
auto constValuesOutputPath = determineSupplementaryOutputFilename(
OPT_emit_const_values,
pathsFromArguments.ConstValuesOutputPath,
file_types::TY_ConstValues, "",
defaultSupplementaryOutputPathExcludingExtension);
auto serializedDiagnosticsPath = determineSupplementaryOutputFilename(
OPT_serialize_diagnostics, pathsFromArguments.SerializedDiagnosticsPath,
file_types::TY_SerializedDiagnostics, "",
defaultSupplementaryOutputPathExcludingExtension);
// There is no non-path form of -emit-fixits-path
auto fixItsOutputPath = pathsFromArguments.FixItsOutputPath;
auto clangHeaderOutputPath = determineSupplementaryOutputFilename(
OPT_emit_objc_header, pathsFromArguments.ClangHeaderOutputPath,
file_types::TY_ClangHeader, "",
defaultSupplementaryOutputPathExcludingExtension);
auto loadedModuleTracePath = determineSupplementaryOutputFilename(
OPT_emit_loaded_module_trace, pathsFromArguments.LoadedModuleTracePath,
file_types::TY_ModuleTrace, "",
defaultSupplementaryOutputPathExcludingExtension);
// We piggy-back on the loadedModuleTracePath to decide (1) whether
// to emit the fine module Trace file, and (2) where to emit the fine module
// trace file if the path isn't explicitly given by
// SWIFT_COMPILER_FINE_GRAINED_TRACE_PATH.
// FIXME: we probably need to move this to a frontend argument.
llvm::SmallString<128> FineModuleTracePath;
if (!loadedModuleTracePath.empty() &&
shouldEmitFineModuleTrace(RequestedAction) &&
!Args.hasArg(OPT_disable_fine_module_tracing)) {
if (const char *P = ::getenv("SWIFT_COMPILER_FINE_GRAINED_TRACE_PATH")) {
StringRef FilePath = P;
llvm::sys::path::append(FineModuleTracePath, FilePath);
} else {
llvm::sys::path::append(FineModuleTracePath, loadedModuleTracePath);
llvm::sys::path::remove_filename(FineModuleTracePath);
llvm::sys::path::append(FineModuleTracePath,
".SWIFT_FINE_DEPENDENCY_TRACE.json");
}
}
auto tbdPath = determineSupplementaryOutputFilename(
OPT_emit_tbd, pathsFromArguments.TBDPath, file_types::TY_TBD, "",
defaultSupplementaryOutputPathExcludingExtension);
auto moduleDocOutputPath = determineSupplementaryOutputFilename(
OPT_emit_module_doc, pathsFromArguments.ModuleDocOutputPath,
file_types::TY_SwiftModuleDocFile, "",
defaultSupplementaryOutputPathExcludingExtension);
auto moduleSourceInfoOutputPath = determineSupplementaryOutputFilename(
OPT_emit_module_source_info, pathsFromArguments.ModuleSourceInfoOutputPath,
file_types::TY_SwiftSourceInfoFile, "",
defaultSupplementaryOutputPathExcludingExtension);
auto moduleSummaryOutputPath = determineSupplementaryOutputFilename(
OPT_emit_module_summary, pathsFromArguments.ModuleSummaryOutputPath,
file_types::TY_SwiftModuleSummaryFile, "",
defaultSupplementaryOutputPathExcludingExtension);
// There is no non-path form of -emit-interface-path
auto ModuleInterfaceOutputPath =
pathsFromArguments.ModuleInterfaceOutputPath;
auto PrivateModuleInterfaceOutputPath =
pathsFromArguments.PrivateModuleInterfaceOutputPath;
auto PackageModuleInterfaceOutputPath =
pathsFromArguments.PackageModuleInterfaceOutputPath;
// There is no non-path form of -emit-abi-descriptor-path
auto ABIDescriptorOutputPath = pathsFromArguments.ABIDescriptorOutputPath;
// There is no non-path form of -emit-api-descriptor-path
auto APIDescriptorOutputPath = pathsFromArguments.APIDescriptorOutputPath;
// There is no non-path form of -emit-module-semantic-info-path
auto ModuleSemanticInfoOutputPath =
pathsFromArguments.ModuleSemanticInfoOutputPath;
ID emitModuleOption;
std::string moduleExtension;
std::string mainOutputIfUsableForModule;
deriveModulePathParameters(outputFile, emitModuleOption, moduleExtension,
mainOutputIfUsableForModule);
auto moduleOutputPath = determineSupplementaryOutputFilename(
emitModuleOption, pathsFromArguments.ModuleOutputPath,
file_types::TY_SwiftModuleFile, mainOutputIfUsableForModule,
defaultSupplementaryOutputPathExcludingExtension);
auto YAMLOptRecordPath = determineSupplementaryOutputFilename(
OPT_save_optimization_record, pathsFromArguments.YAMLOptRecordPath,
file_types::TY_YAMLOptRecord, "",
defaultSupplementaryOutputPathExcludingExtension);
auto bitstreamOptRecordPath = determineSupplementaryOutputFilename(
OPT_save_optimization_record, pathsFromArguments.BitstreamOptRecordPath,
file_types::TY_BitstreamOptRecord, "",
defaultSupplementaryOutputPathExcludingExtension);
auto SILOutputPath = pathsFromArguments.SILOutputPath;
auto LLVMIROutputPath = pathsFromArguments.LLVMIROutputPath;
SupplementaryOutputPaths sop;
sop.ClangHeaderOutputPath = clangHeaderOutputPath;
sop.ModuleOutputPath = moduleOutputPath;
sop.ModuleDocOutputPath = moduleDocOutputPath;
sop.DependenciesFilePath = dependenciesFilePath;
sop.ReferenceDependenciesFilePath = referenceDependenciesFilePath;
sop.SerializedDiagnosticsPath = serializedDiagnosticsPath;
sop.FixItsOutputPath = fixItsOutputPath;
sop.LoadedModuleTracePath = loadedModuleTracePath;
sop.FineModuleTracePath = FineModuleTracePath.str().str();
sop.TBDPath = tbdPath;
sop.ModuleInterfaceOutputPath = ModuleInterfaceOutputPath;
sop.PrivateModuleInterfaceOutputPath = PrivateModuleInterfaceOutputPath;
sop.PackageModuleInterfaceOutputPath = PackageModuleInterfaceOutputPath;
sop.ModuleSourceInfoOutputPath = moduleSourceInfoOutputPath;
sop.ModuleSummaryOutputPath = moduleSummaryOutputPath;
sop.ABIDescriptorOutputPath = ABIDescriptorOutputPath;
sop.APIDescriptorOutputPath = APIDescriptorOutputPath;
sop.ConstValuesOutputPath = constValuesOutputPath;
sop.ModuleSemanticInfoOutputPath = ModuleSemanticInfoOutputPath;
sop.YAMLOptRecordPath = YAMLOptRecordPath;
sop.BitstreamOptRecordPath = bitstreamOptRecordPath;
sop.SILOutputPath = SILOutputPath;
sop.LLVMIROutputPath = LLVMIROutputPath;
return sop;
}
StringRef SupplementaryOutputPathsComputer::
deriveDefaultSupplementaryOutputPathExcludingExtension(
StringRef outputFilename, const InputFile &input) const {
// Put the supplementary output file next to the output file if possible.
if (!outputFilename.empty() && outputFilename != "-")
return outputFilename;
if (input.isPrimary() && input.getFileName() != "-")
return llvm::sys::path::filename(input.getFileName());
return ModuleName;
}
std::string
SupplementaryOutputPathsComputer::determineSupplementaryOutputFilename(
options::ID emitOpt, std::string pathFromArguments, file_types::ID type,
StringRef mainOutputIfUsable,
StringRef defaultSupplementaryOutputPathExcludingExtension,
bool forceDefaultSupplementaryOutputPathExcludingExtension) const {
auto hasEmitOptArg = [&] () -> bool {
if (Args.hasArg(emitOpt))
return true;
if (emitOpt == options::OPT_save_optimization_record &&
Args.hasArg(options::OPT_save_optimization_record_EQ))
return true;
return false;
};
auto computeDefaultSupplementaryOutputPathExcludingExtension =
[&] () -> std::string {
llvm::SmallString<128> path(
defaultSupplementaryOutputPathExcludingExtension);
llvm::sys::path::replace_extension(path, file_types::getExtension(type));
return path.str().str();
};
if (forceDefaultSupplementaryOutputPathExcludingExtension) {
if (!hasEmitOptArg()) {
return std::string();
}
return computeDefaultSupplementaryOutputPathExcludingExtension();
}
if (!pathFromArguments.empty())
return pathFromArguments;
if (!hasEmitOptArg())
return std::string();
if (!mainOutputIfUsable.empty()) {
return mainOutputIfUsable.str();
}
return computeDefaultSupplementaryOutputPathExcludingExtension();
}
void SupplementaryOutputPathsComputer::deriveModulePathParameters(
StringRef mainOutputFile, options::ID &emitOption, std::string &extension,
std::string &mainOutputIfUsable) const {
bool isSIB = RequestedAction == FrontendOptions::ActionType::EmitSIB ||
RequestedAction == FrontendOptions::ActionType::EmitSIBGen;
emitOption = !isSIB ? options::OPT_emit_module
: RequestedAction == FrontendOptions::ActionType::EmitSIB
? options::OPT_emit_sib
: options::OPT_emit_sibgen;
bool canUseMainOutputForModule =
RequestedAction == FrontendOptions::ActionType::MergeModules ||
RequestedAction == FrontendOptions::ActionType::EmitModuleOnly || isSIB;
extension = file_types::getExtension(isSIB ? file_types::TY_SIB
: file_types::TY_SwiftModuleFile)
.str();
mainOutputIfUsable = canUseMainOutputForModule && !OutputFiles.empty()
? mainOutputFile.str()
: "";
}
static SupplementaryOutputPaths
createFromTypeToPathMap(const TypeToPathMap *map) {
SupplementaryOutputPaths paths;
if (!map)
return paths;
const std::pair<file_types::ID, std::string &> typesAndStrings[] = {
#define OUTPUT(NAME, TYPE) {file_types::TYPE, paths.NAME},
#include "swift/Basic/SupplementaryOutputPaths.def"
#undef OUTPUT
};
for (const std::pair<file_types::ID, std::string &> &typeAndString :
typesAndStrings) {
auto const out = map->find(typeAndString.first);
typeAndString.second = out == map->end() ? "" : out->second;
}
return paths;
}
std::optional<std::vector<SupplementaryOutputPaths>>
SupplementaryOutputPathsComputer::readSupplementaryOutputFileMap() const {
if (Arg *A = Args.getLastArg(
options::OPT_emit_objc_header_path, options::OPT_emit_module_path,
options::OPT_emit_module_doc_path,
options::OPT_emit_dependencies_path,
options::OPT_emit_reference_dependencies_path,
options::OPT_serialize_diagnostics_path,
options::OPT_emit_loaded_module_trace_path,
options::OPT_emit_module_interface_path,
options::OPT_emit_private_module_interface_path,
options::OPT_emit_package_module_interface_path,
options::OPT_emit_module_source_info_path, options::OPT_emit_tbd_path,
options::OPT_sil_output_path, options::OPT_ir_output_path)) {
Diags.diagnose(SourceLoc(),
diag::error_cannot_have_supplementary_outputs,
A->getSpelling(), "-supplementary-output-file-map");
return std::nullopt;
}
const StringRef supplementaryFileMapPath =
Args.getLastArgValue(options::OPT_supplementary_output_file_map);
unsigned BadFileDescriptorRetryCount = 0;
if (const Arg *A = Args.getLastArg(options::OPT_bad_file_descriptor_retry_count)) {
if (StringRef(A->getValue()).getAsInteger(10, BadFileDescriptorRetryCount)) {
Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value,
A->getAsString(Args), A->getValue());
return std::nullopt;
}
}
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> buffer = nullptr;
for (unsigned I = 0; I < BadFileDescriptorRetryCount + 1; ++I) {
buffer = llvm::MemoryBuffer::getFile(supplementaryFileMapPath);
if (buffer)
break;
if (buffer.getError().value() != EBADF)
break;
}
if (!buffer) {
Diags.diagnose(SourceLoc(), diag::cannot_open_file,
supplementaryFileMapPath, buffer.getError().message());
return std::nullopt;
}
llvm::Expected<OutputFileMap> OFM =
OutputFileMap::loadFromBuffer(std::move(buffer.get()), "");
if (auto Err = OFM.takeError()) {
Diags.diagnose(SourceLoc(),
diag::error_unable_to_load_supplementary_output_file_map,
supplementaryFileMapPath, llvm::toString(std::move(Err)));
return std::nullopt;
}
std::vector<SupplementaryOutputPaths> outputPaths;
bool hadError = false;
InputsAndOutputs.forEachInputProducingSupplementaryOutput(
[&](const InputFile &input) -> bool {
const TypeToPathMap *mapForInput =
OFM->getOutputMapForInput(input.getFileName());
if (!mapForInput) {
Diags.diagnose(
SourceLoc(),
diag::error_missing_entry_in_supplementary_output_file_map,
supplementaryFileMapPath, input.getFileName());
hadError = true;
}
outputPaths.push_back(createFromTypeToPathMap(mapForInput));
return false;
});
if (hadError)
return std::nullopt;
// In multi-threaded WMO mode, we need to read supplementary output paths
// for all inputs beyond the first one (which was already processed above).
// Entries in the map are optional, so if an input is missing, the regular
// WMO path generation logic will handle it.
if (!InputsAndOutputs.hasPrimaryInputs() && OutputFiles.size() > 1) {
InputsAndOutputs.forEachInput([&](const InputFile &input) -> bool {
// Skip the first input, which was already processed above
if (InputsAndOutputs.firstInput().getFileName() == input.getFileName()) {
return false;
}
// Check if this input has an entry in the supplementary output file map
const TypeToPathMap *mapForInput =
OFM->getOutputMapForInput(input.getFileName());
if (mapForInput) {
// Entry exists, use it
outputPaths.push_back(createFromTypeToPathMap(mapForInput));
}
// If no entry exists, skip - the regular WMO logic will generate paths
return false;
});
}
return outputPaths;
} | cpp | github | https://github.com/apple/swift | lib/Frontend/ArgsToFrontendOutputsConverter.cpp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
import math
import argparse
import pickle
import jieba
import numpy as np
import tensorflow as tf
from data_loader import TextLoader
from rnn_model import RNNModel
from rnn_model import BIDIRNNModel
from cnn_model import CNNModel
class RNNClassifier(object):
def __init__(self, model_path, args):
assert os.path.isdir(model_path), '%s must be a path' % model_path
self.model_path = model_path
self.config_vocab_labels_file = os.path.join(self.model_path, 'config_vocab_labels.pkl')
self.args = args
self.args.label_size = None
self.args.vocab_size = None
self.vocab = None
self.labels = None
self.model = None
self.sess = tf.Session()
if os.path.exists(self.config_vocab_labels_file):
self._load_config()
def _load_config(self):
with open(self.config_vocab_labels_file, 'rb') as f:
saved_args, vocab, labels = pickle.load(f)
assert saved_args, 'load config error'
assert vocab, 'load vocab error'
assert labels, 'load labels error'
self.args = saved_args
self.vocab = vocab
self.labels = labels
self.id2labels = dict(list(zip(list(labels.values()), list(labels.keys()))))
def _load_model(self, batch_size=None):
print('loading model ... ')
# self.__load_config()
if batch_size:
self.args.batch_size = batch_size
self._init_model()
saver =tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(self.args.model_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(self.sess, ckpt.model_checkpoint_path)
def _init_model(self):
# if not self.model:
try:
with tf.variable_scope('classifier'):
self.model = RNNModel(self.args)
# self.model = BIDIRNNModel(self.args)
except ValueError as ve:
with tf.variable_scope('classifier', reuse=True):
self.model = RNNModel(self.args)
# self.model = BIDIRNNModel(self.args)
def _transform(self, text):
text = text if type('') == type(text) else text.decode('utf-8')
text = [word for word in jieba.cut(text)] if self.args.segment else text
x = list(map(self.vocab.get, text))
x = [i if i else 0 for i in x]
x_len = len(x)
x = x[:self.args.seq_length] if x_len >= self.args.seq_length else x + [0] * (self.args.seq_length - x_len)
return x
def load(self):
self.close()
self._load_model()
def close(self):
self.args = None
self.vocab = None
self.labels = None
self.id2labels = None
self.model = None
if self.sess:
self.sess.close()
self.sess = None
def train(self, data_file=None, data=None, dev_data_file=None, vocab_corpus_file=None, args=None, continued=True):
train_data_loader = TextLoader(model_dir=self.args.model_path,
data_file=data_file,
vocab_corpus_file=vocab_corpus_file,
batch_size=self.args.batch_size,
seq_length=self.args.seq_length,
vocab=self.vocab,
labels=self.labels,
segment=self.args.segment)
if dev_data_file:
if self.vocab and self.labels:
vocab = self.vocab
labels = self.labels
else:
vocab = train_data_loader.vocab
labels = train_data_loader.labels
dev_data_loader = TextLoader(model_dir=self.args.model_path,
data_file=data_file,
batch_size=self.args.batch_size,
seq_length=self.args.seq_length,
vocab=vocab,
labels=labels,
segment=self.args.segment)
if not self.args.vocab_size and not self.args.label_size:
self.args.vocab_size = train_data_loader.vocab_size
self.args.label_size = train_data_loader.label_size
self._init_model()
init = tf.global_variables_initializer()
self.sess.run(init)
saver = tf.train.Saver(tf.global_variables())
if os.path.isfile(self.config_vocab_labels_file) and continued:
ckpt = tf.train.get_checkpoint_state(self.args.model_path)
assert ckpt, 'No checkpoint found'
assert ckpt.model_checkpoint_path, 'No model path found in checkpoint'
with open(self.config_vocab_labels_file, 'rb') as f:
saved_args, vocab, labels = pickle.load(f)
need_be_same = ['model', 'rnn_size', 'num_layers', 'seq_length']
for checkme in need_be_same:
assert vars(saved_args)[checkme] == vars(self.args)[checkme], 'command line argument and saved model disagree on %s' % checkme
assert len(self.vocab) == len(train_data_loader.vocab), 'data and loaded model disagree on dictionary mappings'
assert len(self.labels) == len(train_data_loader.labels), 'data and loaded model disagree on label dictionary mappings'
print('loading last training model and continue')
saver.restore(self.sess, ckpt.model_checkpoint_path)
else:
self.vocab = train_data_loader.vocab
self.labels = train_data_loader.labels
self.args.vocab_size = train_data_loader.vocab_size
self.args.label_size = train_data_loader.label_size
with open(self.config_vocab_labels_file, 'wb') as f:
pickle.dump([self.args, self.vocab, self.labels], f)
with tf.Graph().as_default():
# Summaries for loss and accuracy
loss_summary = tf.summary.scalar('loss', self.model.loss)
acc_summary = tf.summary.scalar('accuracy', self.model.accuracy)
# Train Summaries
train_summary_op = tf.summary.merge([loss_summary, acc_summary])
train_summary_dir = os.path.join(self.model_path, 'summaries', 'train')
train_summary_writer = tf.summary.FileWriter(train_summary_dir, self.sess.graph)
if dev_data_loader:
# Dev summaries
dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
dev_summary_dir = os.path.join(self.model_path, 'summaries', 'dev')
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, self.sess.graph)
dev_batch_count = 0
for epoch in range(self.args.num_epochs):
self.sess.run(tf.assign(self.model.lr, self.args.learning_rate * (self.args.decay_rate ** epoch)))
train_data_loader.reset_batch_pointer()
for batch in range(train_data_loader.num_batches):
start = time.time()
x, y = train_data_loader.next_batch()
feed = {self.model.input_data: x, self.model.targets: y}
train_loss, _, accuracy, summaries = self.sess.run([self.model.loss, self.model.optimizer, self.model.accuracy, train_summary_op], feed_dict=feed)
end = time.time()
print('{}/{} (epoch {}/{}), loss = {:.5f}, accuracy = {:.3f}, time/batch = {:.3f}'\
.format(epoch * train_data_loader.num_batches + batch + 1,
self.args.num_epochs * train_data_loader.num_batches,
epoch + 1,
self.args.num_epochs,
train_loss,
accuracy,
end - start))
train_summary_writer.add_summary(summaries, epoch * train_data_loader.num_batches + batch + 1)
if (epoch * train_data_loader.num_batches + batch + 1) % args.save_every == 0 \
or (epoch == args.num_epochs-1 and batch == train_data_loader.num_batches-1):
checkpoint_path = os.path.join(self.args.model_path, 'model.ckpt')
saver.save(self.sess, checkpoint_path, global_step=epoch * train_data_loader.num_batches + batch + 1)
print('model saved to {}'.format(checkpoint_path))
dev_batch_count += 1
if dev_batch_count == dev_data_loader.num_batches:
dev_data_loader.reset_batch_pointer()
dev_batch_count = 0
if dev_data_loader:
x, y = dev_data_loader.next_batch()
feed = {self.model.input_data: x, self.model.targets: y}
dev_loss, _, dev_accuracy, dev_summaries = self.sess.run([self.model.loss, self.model.optimizer, self.model.accuracy, dev_summary_op], feed_dict=feed)
print('dev_loss = {:.5f}, dev_accuracy = {:.3f}'.format(dev_loss, dev_accuracy))
if dev_summary_writer:
dev_summary_writer.add_summary(dev_summaries, epoch * train_data_loader.num_batches + batch + 1)
def predict(self, contents, batch_size=64):
if not self.model or not self.args or self.args.batch_size != batch_size or not self.vocab or not self.sess or not self.id2labels:
self._load_model(batch_size=batch_size)
x = [self._transform(i.strip()) for i in contents]
n_chunks = math.ceil(len(x) / self.args.batch_size)
x = np.array_split(x[:self.args.batch_size*n_chunks], n_chunks, axis=0)
results = []
for m in range(n_chunks):
results.extend(self.model.predict_label(self.sess, self.id2labels, x[m]))
return results
def test(self, test_file=None, data=None, batch_size=64):
if not self.model or not self.args or self.args.batch_size != batch_size or not self.vocab or not self.sess or not self.id2labels or not self.labels:
self._load_model(batch_size=batch_size)
data_loader = TextLoader(model_dir=self.args.model_path,
data_file=test_file,
batch_size=self.args.batch_size,
seq_length=self.args.seq_length,
vocab=self.vocab,
labels=self.labels,
segment=self.args.segment)
data = data_loader.tensor.copy()
n_chunks = math.ceil(len(data) / self.args.batch_size)
data_list = np.array_split(data[:self.args.batch_size*n_chunks], n_chunks, axis=0)
correct_total = 0.0
num_total = 0.0
for m in range(n_chunks):
start = time.time()
x = data_list[m][:, :-1]
y = data_list[m][:, -1]
results = self.model.predict_class(self.sess, x)
correct_num = np.sum(results==y)
end = time.time()
print(('batch {}/{} time = {:.3f}, sub_accuracy = {:.6f}'.format(m+1, n_chunks, end-start, correct_num*1.0/len(x))))
correct_total += correct_num
num_total += len(x)
accuracy_total = correct_total / num_total
print(('total_num = {}, total_accuracy = {:.6f}'.format(int(num_total), accuracy_total)))
return accuracy_total
class CNNClassifier(object):
def __init__(self, model_path, args):
assert os.path.isdir(model_path), '%s must be a path' % model_path
self.model_path = model_path
self.config_vocab_labels_file = os.path.join(self.model_path, 'config_vocab_labels.pkl')
self.args = args
self.args.label_size = None
self.args.vocab_size = None
self.vocab = None
self.labels = None
self.model = None
session_conf = tf.ConfigProto(
allow_soft_placement=self.args.allow_soft_placement,
log_device_placement=self.args.log_device_placement)
self.sess = tf.Session(config=session_conf)
if os.path.exists(self.config_vocab_labels_file):
self._load_config()
def _load_config(self):
with open(self.config_vocab_labels_file, 'rb') as f:
saved_args, vocab, labels = pickle.load(f)
assert saved_args, 'load config error'
assert vocab, 'load vocab error'
assert labels, 'load labels error'
self.args = saved_args
self.vocab = vocab
self.labels = labels
self.id2labels = dict(list(zip(list(labels.values()), list(labels.keys()))))
def _load_model(self, batch_size=None):
print('loading model ... ')
# self.__load_config()
if batch_size:
self.args.batch_size = batch_size
self._init_model()
saver =tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(self.args.model_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(self.sess, ckpt.model_checkpoint_path)
def _init_model(self):
# if not self.model:
try:
with tf.variable_scope('classifier'):
self.model = CNNModel(
seq_length=self.args.seq_length,
label_size=self.args.label_size,
vocab_size=self.args.vocab_size,
embedding_size=self.args.embedding_dim,
filter_sizes=list(map(int, self.args.filter_sizes.split(','))),
num_filters=self.args.num_filters,
l2_reg_lambda=self.args.l2_reg_lambda)
# self.model = BIDIRNNModel(self.args)
except ValueError as ve:
with tf.variable_scope('classifier', reuse=True):
self.model = CNNModel(
seq_length=seq_length,
label_size=self.args.label_size,
vocab_size=self.args.vocab_size,
embedding_size=self.args.embedding_dim,
filter_sizes=list(map(int, self.args.filter_sizes.split(','))),
num_filters=self.args.num_filters,
l2_reg_lambda=self.args.l2_reg_lambda)
# self.model = BIDIRNNModel(self.args)
def _transform(self, text):
text = text if type('') == type(text) else text.decode('utf-8')
text = [word for word in jieba.cut(text)] if self.args.segment else text
x = list(map(self.vocab.get, text))
x = [i if i else 0 for i in x]
x_len = len(x)
x = x[:self.args.seq_length] if x_len >= self.args.seq_length else x + [0] * (self.args.seq_length - x_len)
return x
def train(self, data_file=None, data=None, dev_data_file=None, vocab_corpus_file=None, args=None, continued=False):
train_data_loader = TextLoader(model_dir=self.args.model_path,
data_file=data_file,
vocab_corpus_file=vocab_corpus_file,
batch_size=self.args.batch_size,
seq_length=self.args.seq_length,
vocab=self.vocab,
labels=self.labels,
segment=self.args.segment)
if dev_data_file:
if self.vocab and self.labels:
vocab = self.vocab
labels = self.labels
else:
vocab = train_data_loader.vocab
labels = train_data_loader.labels
dev_data_loader = TextLoader(model_dir=self.args.model_path,
data_file=data_file,
batch_size=self.args.batch_size,
seq_length=self.args.seq_length,
vocab=vocab,
labels=labels,
segment=self.args.segment)
if not self.args.vocab_size and not self.args.label_size:
self.args.vocab_size = train_data_loader.vocab_size
self.args.label_size = train_data_loader.label_size
global_step = tf.Variable(0, name='global_step', trainable=False)
self._init_model()
init = tf.global_variables_initializer()
self.sess.run(init)
saver = tf.train.Saver(tf.global_variables())
if os.path.isfile(self.config_vocab_labels_file) and continued:
ckpt = tf.train.get_checkpoint_state(self.args.model_path)
assert ckpt, 'No checkpoint found'
assert ckpt.model_checkpoint_path, 'No model path found in checkpoint'
with open(self.config_vocab_labels_file, 'rb') as f:
saved_args, vocab, labels = pickle.load(f)
need_be_same = ['model', 'rnn_size', 'num_layers', 'seq_length']
for checkme in need_be_same:
assert vars(saved_args)[checkme] == vars(self.args)[checkme], 'command line argument and saved model disagree on %s' % checkme
assert len(self.vocab) == len(train_data_loader.vocab), 'data and loaded model disagree on dictionary mappings'
assert len(self.labels) == len(train_data_loader.labels), 'data and loaded model disagree on label dictionary mappings'
print('loading last training model and continue')
saver.restore(self.sess, ckpt.model_checkpoint_path)
else:
self.vocab = train_data_loader.vocab
self.labels = train_data_loader.labels
self.args.vocab_size = train_data_loader.vocab_size
self.args.label_size = train_data_loader.label_size
with open(self.config_vocab_labels_file, 'wb') as f:
pickle.dump([self.args, self.vocab, self.labels], f)
with tf.Graph().as_default():
# # Define Training procedure
# global_step = tf.Variable(0, name='global_step', trainable=False)
# optimizer = tf.train.AdamOptimizer(1e-3)
# # import pdb; pdb.set_trace()
# grads_and_vars = optimizer.compute_gradients(self.model.loss)
# train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# # Keep track of gradient values and sparsity (optional)
# grad_summaries = []
# for g, v in grads_and_vars:
# if g:
# grad_hist_summary = tf.summary.histogram('{}/grad/hist'.format(v.name), g)
# sparsity_summary = tf.summary.scalar('{}/grad/sparsity'.format(v.name), tf.nn.zero_fraction(g))
# grad_summaries.append(grad_hist_summary)
# grad_summaries.append(sparsity_summary)
# grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, 'runs', timestamp))
print('Writing to {}\n'.format(out_dir))
# Summaries for loss and accuracy
loss_summary = tf.summary.scalar('loss', self.model.loss)
acc_summary = tf.summary.scalar('accuracy', self.model.accuracy)
# Train Summaries
# train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])
train_summary_op = tf.summary.merge([loss_summary, acc_summary])
train_summary_dir = os.path.join(out_dir, 'summaries', 'train')
train_summary_writer = tf.summary.FileWriter(train_summary_dir, self.sess.graph)
# Dev summaries
dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, 'summaries', 'dev')
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, self.sess.graph)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, 'checkpoints'))
checkpoint_prefix = os.path.join(checkpoint_dir, 'model')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# saver = tf.train.Saver(tf.global_variables(), max_to_keep=self.args.num_checkpoints)
# saver = tf.train.Saver(self.sess, max_to_keep=self.args.num_checkpoints)
# Write vocabulary
# vocab_processor.save(os.path.join(out_dir, 'vocab'))
# Initialize all variables
# self.sess.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch, step):
"""
A single training step
"""
feed_dict = {
self.model.input_x: x_batch,
self.model.input_y: y_batch,
self.model.dropout_keep_prob: self.args.dropout_keep_prob
}
# _, step, summaries, loss, accuracy = self.sess.run(
# [train_op, global_step, train_summary_op, self.model.loss, self.model.accuracy],
# feed_dict)
# step, summaries, loss, accuracy = self.sess.run(
# [global_step, train_summary_op, self.model.loss, self.model.accuracy],
# feed_dict)
summaries, loss, accuracy = self.sess.run(
[train_summary_op, self.model.loss, self.model.accuracy],
feed_dict)
# print('{}: step {}, loss {:g}, acc {:g}'.format(time_str, step, loss, accuracy))
print('step {}, loss {:g}, acc {:g}'.format(step, loss, accuracy))
train_summary_writer.add_summary(summaries, step)
def dev_step(x_batch, y_batch, step, writer=None):
"""
Evaluates model on a dev set
"""
# y_batch = np.asarray([np.argmax(y_i) for y_i in y_batch])
feed_dict = {
self.model.input_x: x_batch,
self.model.input_y: y_batch,
self.model.dropout_keep_prob: 1.0
}
summaries, loss, accuracy = self.sess.run(
[dev_summary_op, self.model.loss, self.model.accuracy],
feed_dict)
# time_str = datetime.datetime.now().isoformat()
# print('{}: step {}, loss {:g}, acc {:g}'.format(time_str, step, loss, accuracy))
print('step {}, loss {:g}, acc {:g}'.format(step, loss, accuracy))
if writer:
writer.add_summary(summaries, step)
step = 0
# Generate batches
train_data_loader.reset_batch_pointer()
# Training loop. For each batch...
for batch in range(train_data_loader.num_batches):
step += 1
x_batch, y_batch = train_data_loader.next_batch()
train_step(x_batch, y_batch, step)
current_step = tf.train.global_step(self.sess, global_step)
if current_step % self.args.evaluate_every == 0:
print('\nEvaluation:')
# dev_step(x_dev, y_dev, writer=dev_summary_writer)
dev_step(x_batch, y_batch, step, writer=dev_summary_writer)
print('')
if current_step % self.args.checkpoint_every == 0:
path = saver.save(self.sess, checkpoint_prefix, global_step=current_step)
print('Saved model checkpoint to {}\n'.format(path))
# class Config(object):
# model_path = '../../data/test-model'
# train_file = '../../data/input.csv'
# vocab_corpus_file = '../../data/corpus.txt'
# init_from = None
# model = 'lstm'
# state_is_tuple = True
# learning_rate = 0.001
# decay_rate = 0.9
# keep_prob = 0.8
# rnn_size = 64
# num_layers = 2
# seq_length = 20
# batch_size = 16
# num_epochs = 20
# num_epochs = 50
# save_every = 100
# vocab_size = None
# label_size = None
def rnn_classifier_train_test():
model_path = '../../data/test-model'
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default= model_path,
help='directory to store checkpointed models')
parser.add_argument('--model', type=str, default='LSTM',
help='RNN, GRU or LSTM, default LSTM')
parser.add_argument('--rnn_size', type=int, default=128,
help='size of RNN hidden state')
parser.add_argument('--num_layers', type=int, default=4,
help='number of layers in RNN')
parser.add_argument('--batch_size', type=int, default=64,
help='minibatch size')
parser.add_argument('--seq_length', type=int, default=20,
help='RNN sequence length')
parser.add_argument('--num_epochs', type=int, default=1,
help='number of epochs')
parser.add_argument('--save_every', type=int, default=1000,
help='save frequency')
parser.add_argument('--learning_rate', type=float, default=0.001,
help='learning rate')
parser.add_argument('--decay_rate', type=float, default=0.9,
help='decay rate for rmsprop')
parser.add_argument('--keep_prob', type=float, default=0.8,
help='dropout keep probability')
parser.add_argument('--state_is_tuple', type=bool, default=True,
help='state_is_tuple')
parser.add_argument('--segment', type=bool, default=True,
help='text segmentation')
args = parser.parse_args()
# data = pd.read_csv('../../data/train.csv', encoding='utf-8')
model_path = '../../data/test-model'
rnn = RNNClassifier(model_path, args)
rnn.train(data_file='../../data/train.csv', dev_data_file='../../data/test.csv', vocab_corpus_file='../../data/corpus.csv', args=args)
print(rnn.predict(['英超-曼联3-1米堡升至第5 红魔迎来英超600胜']))
# print((rnn.test(test_file='../../data/test.csv', batch_size=64)))
def cnn_classifier_train_test():
model_path = '../../data/test-model-cnn'
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default= model_path,
help='directory to store checkpointed models')
parser.add_argument('--dev_sample_percentage', type=float, default= .1,
help='Percentage of the training data to use for validation')
parser.add_argument('--embedding_dim', type=int, default= 128,
help='Dimensionality of character embedding (default: 128)')
parser.add_argument('--cnn_size', type=int, default=128,
help='size of CNN hidden state')
parser.add_argument('--filter_sizes', type=str, default= '3,4,5',
help='Comma-separated filter sizes (default: "3,4,5")')
parser.add_argument('--num_filters', type=int, default= 128,
help='Number of filters per filter size (default: 128)')
parser.add_argument('--dropout_keep_prob', type=float, default= 0.5,
help='Dropout keep probability (default: 0.5)')
parser.add_argument('--l2_reg_lambda', type=float, default= 0.0,
help='L2 regularization lambda (default: 0.0)')
parser.add_argument('--batch_size', type=int, default=64,
help='minibatch size (default: 64)')
parser.add_argument('--seq_length', type=int, default=25,
help='sequence length (default: 25)')
parser.add_argument('--num_epochs', type=int, default=200,
help='Number of training epochs (default: 200)')
parser.add_argument('--evaluate_every', type=int, default=100,
help='Evaluate model on dev set after this many steps (default: 100)')
parser.add_argument('--checkpoint_every', type=int, default=100,
help='Save model after this many steps (default: 100)')
parser.add_argument('--num_checkpoints', type=int, default=5,
help='Number of checkpoints to store (default: 5)')
parser.add_argument('--allow_soft_placement', type=bool, default=True,
help='Allow device soft device placement')
parser.add_argument('--log_device_placement', type=bool, default=False,
help='Log placement of ops on devices')
parser.add_argument('--label_size', type=int, default=4,
help='Classes number')
parser.add_argument('--segment', type=bool, default=True,
help='text segmentation')
# parser = argparse.ArgumentParser()
# parser.add_argument('--model_path', type=str, default= model_path,
# help='directory to store checkpointed models')
# parser.add_argument('--model', type=str, default='lstm',
# help='rnn, gru or lstm, default lstm')
# parser.add_argument('--rnn_size', type=int, default=128,
# help='size of RNN hidden state')
# parser.add_argument('--num_layers', type=int, default=2,
# help='number of layers in RNN')
# parser.add_argument('--batch_size', type=int, default=256,
# help='minibatch size')
# parser.add_argument('--seq_length', type=int, default=25,
# help='RNN sequence length')
# parser.add_argument('--num_epochs', type=int, default=150,
# help='number of epochs')
# parser.add_argument('--save_every', type=int, default=1000,
# help='save frequency')
# parser.add_argument('--learning_rate', type=float, default=0.001,
# help='learning rate')
# parser.add_argument('--decay_rate', type=float, default=0.9,
# help='decay rate for rmsprop')
# parser.add_argument('--keep_prob', type=float, default=0.8,
# help='dropout keep probability')
# parser.add_argument('--state_is_tuple', type=bool, default=True,
# help='state_is_tuple')
args = parser.parse_args()
# data = pd.read_csv('../../data/train.csv', encoding='utf-8')
model_path = '../../data/test-model-cnn'
cnn = CNNClassifier(model_path, args)
cnn.train(data_file='../../data/train.csv', dev_data_file='../../data/test.csv', vocab_corpus_file='../../data/corpus.csv', args=args)
print(cnn.predict(['英超-曼联3-1米堡升至第5 红魔迎来英超600胜']))
print((cnn.test(test_file='../../data/test.csv', batch_size=32)))
if __name__ == '__main__':
rnn_classifier_train_test()
# cnn_classifier_train_test() | unknown | codeparrot/codeparrot-clean | ||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""DB related custom exceptions."""
from neutron.openstack.common.gettextutils import _
class DBError(Exception):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception))
class DBDuplicateEntry(DBError):
"""Wraps an implementation specific exception."""
def __init__(self, columns=[], inner_exception=None):
self.columns = columns
super(DBDuplicateEntry, self).__init__(inner_exception)
class DBDeadlock(DBError):
def __init__(self, inner_exception=None):
super(DBDeadlock, self).__init__(inner_exception)
class DBInvalidUnicodeParameter(Exception):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.") | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import codecs
import logging
import os
import chardet
import pysrt
from .score import get_equivalent_release_groups
from .video import Episode, Movie
from .utils import sanitize, sanitize_release_group
logger = logging.getLogger(__name__)
#: Subtitle extensions
SUBTITLE_EXTENSIONS = ('.srt', '.sub', '.smi', '.txt', '.ssa', '.ass', '.mpl')
class Subtitle(object):
"""Base class for subtitle.
:param language: language of the subtitle.
:type language: :class:`~babelfish.language.Language`
:param bool hearing_impaired: whether or not the subtitle is hearing impaired.
:param page_link: URL of the web page from which the subtitle can be downloaded.
:type page_link: str
:param encoding: Text encoding of the subtitle.
:type encoding: str
"""
#: Name of the provider that returns that class of subtitle
provider_name = ''
def __init__(self, language, hearing_impaired=False, page_link=None, encoding=None):
#: Language of the subtitle
self.language = language
#: Whether or not the subtitle is hearing impaired
self.hearing_impaired = hearing_impaired
#: URL of the web page from which the subtitle can be downloaded
self.page_link = page_link
#: Content as bytes
self.content = None
#: Encoding to decode with when accessing :attr:`text`
self.encoding = None
# validate the encoding
if encoding:
try:
self.encoding = codecs.lookup(encoding).name
except (TypeError, LookupError):
logger.debug('Unsupported encoding %s', encoding)
@property
def id(self):
"""Unique identifier of the subtitle"""
raise NotImplementedError
@property
def text(self):
"""Content as string
If :attr:`encoding` is None, the encoding is guessed with :meth:`guess_encoding`
"""
if not self.content:
return
if self.encoding:
return self.content.decode(self.encoding, errors='replace')
return self.content.decode(self.guess_encoding(), errors='replace')
def is_valid(self):
"""Check if a :attr:`text` is a valid SubRip format.
:return: whether or not the subtitle is valid.
:rtype: bool
"""
if not self.text:
return False
try:
pysrt.from_string(self.text, error_handling=pysrt.ERROR_RAISE)
except pysrt.Error as e:
if e.args[0] < 80:
return False
return True
def guess_encoding(self):
"""Guess encoding using the language, falling back on chardet.
:return: the guessed encoding.
:rtype: str
"""
logger.info('Guessing encoding for language %s', self.language)
# always try utf-8 first
encodings = ['utf-8']
# add language-specific encodings
if self.language.alpha3 == 'zho':
encodings.extend(['gb18030', 'big5'])
elif self.language.alpha3 == 'jpn':
encodings.append('shift-jis')
elif self.language.alpha3 == 'ara':
encodings.append('windows-1256')
elif self.language.alpha3 == 'heb':
encodings.append('windows-1255')
elif self.language.alpha3 == 'tur':
encodings.extend(['iso-8859-9', 'windows-1254'])
elif self.language.alpha3 == 'pol':
# Eastern European Group 1
encodings.extend(['windows-1250'])
elif self.language.alpha3 == 'bul':
# Eastern European Group 2
encodings.extend(['windows-1251'])
else:
# Western European (windows-1252)
encodings.append('latin-1')
# try to decode
logger.debug('Trying encodings %r', encodings)
for encoding in encodings:
try:
self.content.decode(encoding)
except UnicodeDecodeError:
pass
else:
logger.info('Guessed encoding %s', encoding)
return encoding
logger.warning('Could not guess encoding from language')
# fallback on chardet
encoding = chardet.detect(self.content)['encoding']
logger.info('Chardet found encoding %s', encoding)
return encoding
def get_matches(self, video):
"""Get the matches against the `video`.
:param video: the video to get the matches with.
:type video: :class:`~subliminal.video.Video`
:return: matches of the subtitle.
:rtype: set
"""
raise NotImplementedError
def __hash__(self):
return hash(self.provider_name + '-' + self.id)
def __repr__(self):
return '<%s %r [%s]>' % (self.__class__.__name__, self.id, self.language)
def get_subtitle_path(video_path, language=None, extension='.srt'):
"""Get the subtitle path using the `video_path` and `language`.
:param str video_path: path to the video.
:param language: language of the subtitle to put in the path.
:type language: :class:`~babelfish.language.Language`
:param str extension: extension of the subtitle.
:return: path of the subtitle.
:rtype: str
"""
subtitle_root = os.path.splitext(video_path)[0]
if language:
subtitle_root += '.' + str(language)
return subtitle_root + extension
def guess_matches(video, guess, partial=False):
"""Get matches between a `video` and a `guess`.
If a guess is `partial`, the absence information won't be counted as a match.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param guess: the guess.
:type guess: dict
:param bool partial: whether or not the guess is partial.
:return: matches between the `video` and the `guess`.
:rtype: set
"""
matches = set()
if isinstance(video, Episode):
# series
if video.series and 'title' in guess and sanitize(guess['title']) == sanitize(video.series):
matches.add('series')
# title
if video.title and 'episode_title' in guess and sanitize(guess['episode_title']) == sanitize(video.title):
matches.add('title')
# season
if video.season and 'season' in guess and guess['season'] == video.season:
matches.add('season')
# episode
# Currently we only have single-ep support (guessit returns a multi-ep as a list with int values)
# Most providers only support single-ep, so make sure it contains only 1 episode
# In case of multi-ep, take the lowest episode (subtitles will normally be available on lowest episode number)
if video.episode and 'episode' in guess:
episode_guess = guess['episode']
episode = min(episode_guess) if episode_guess and isinstance(episode_guess, list) else episode_guess
if episode == video.episode:
matches.add('episode')
# year
if video.year and 'year' in guess and guess['year'] == video.year:
matches.add('year')
# count "no year" as an information
if not partial and video.original_series and 'year' not in guess:
matches.add('year')
elif isinstance(video, Movie):
# year
if video.year and 'year' in guess and guess['year'] == video.year:
matches.add('year')
# title
if video.title and 'title' in guess and sanitize(guess['title']) == sanitize(video.title):
matches.add('title')
# release_group
if (video.release_group and 'release_group' in guess and
sanitize_release_group(guess['release_group']) in
get_equivalent_release_groups(sanitize_release_group(video.release_group))):
matches.add('release_group')
# resolution
if video.resolution and 'screen_size' in guess and guess['screen_size'] == video.resolution:
matches.add('resolution')
# format
# Guessit may return a list for `format`, which indicates a conflict in the guessing.
# We should match `format` only when it returns single value to avoid false `format` matches
if video.format and guess.get('format') and not isinstance(guess['format'], list) \
and guess['format'].lower() == video.format.lower():
matches.add('format')
# video_codec
if video.video_codec and 'video_codec' in guess and guess['video_codec'] == video.video_codec:
matches.add('video_codec')
# audio_codec
if video.audio_codec and 'audio_codec' in guess and guess['audio_codec'] == video.audio_codec:
matches.add('audio_codec')
return matches
def fix_line_ending(content):
"""Fix line ending of `content` by changing it to \n.
:param bytes content: content of the subtitle.
:return: the content with fixed line endings.
:rtype: bytes
"""
return content.replace(b'\r\n', b'\n').replace(b'\r', b'\n') | unknown | codeparrot/codeparrot-clean | ||
"""Routine to "compile" a .py file to a .pyc (or .pyo) file.
This module has intimate knowledge of the format of .pyc files.
"""
import builtins
import errno
import imp
import marshal
import os
import sys
import tokenize
import traceback
MAGIC = imp.get_magic()
__all__ = ["compile", "main", "PyCompileError"]
class PyCompileError(Exception):
"""Exception raised when an error occurs while attempting to
compile the file.
To raise this exception, use
raise PyCompileError(exc_type,exc_value,file[,msg])
where
exc_type: exception type to be used in error message
type name can be accesses as class variable
'exc_type_name'
exc_value: exception value to be used in error message
can be accesses as class variable 'exc_value'
file: name of file being compiled to be used in error message
can be accesses as class variable 'file'
msg: string message to be written as error message
If no value is given, a default exception message will be
given, consistent with 'standard' py_compile output.
message (or default) can be accesses as class variable
'msg'
"""
def __init__(self, exc_type, exc_value, file, msg=''):
exc_type_name = exc_type.__name__
if exc_type is SyntaxError:
tbtext = ''.join(traceback.format_exception_only(
exc_type, exc_value))
errmsg = tbtext.replace('File "<string>"', 'File "%s"' % file)
else:
errmsg = "Sorry: %s: %s" % (exc_type_name,exc_value)
Exception.__init__(self,msg or errmsg,exc_type_name,exc_value,file)
self.exc_type_name = exc_type_name
self.exc_value = exc_value
self.file = file
self.msg = msg or errmsg
def __str__(self):
return self.msg
def wr_long(f, x):
"""Internal; write a 32-bit int to a file in little-endian order."""
f.write(bytes([x & 0xff,
(x >> 8) & 0xff,
(x >> 16) & 0xff,
(x >> 24) & 0xff]))
def compile(file, cfile=None, dfile=None, doraise=False, optimize=-1):
"""Byte-compile one Python source file to Python bytecode.
:param file: The source file name.
:param cfile: The target byte compiled file name. When not given, this
defaults to the PEP 3147 location.
:param dfile: Purported file name, i.e. the file name that shows up in
error messages. Defaults to the source file name.
:param doraise: Flag indicating whether or not an exception should be
raised when a compile error is found. If an exception occurs and this
flag is set to False, a string indicating the nature of the exception
will be printed, and the function will return to the caller. If an
exception occurs and this flag is set to True, a PyCompileError
exception will be raised.
:param optimize: The optimization level for the compiler. Valid values
are -1, 0, 1 and 2. A value of -1 means to use the optimization
level of the current interpreter, as given by -O command line options.
:return: Path to the resulting byte compiled file.
Note that it isn't necessary to byte-compile Python modules for
execution efficiency -- Python itself byte-compiles a module when
it is loaded, and if it can, writes out the bytecode to the
corresponding .pyc (or .pyo) file.
However, if a Python installation is shared between users, it is a
good idea to byte-compile all modules upon installation, since
other users may not be able to write in the source directories,
and thus they won't be able to write the .pyc/.pyo file, and then
they would be byte-compiling every module each time it is loaded.
This can slow down program start-up considerably.
See compileall.py for a script/module that uses this module to
byte-compile all installed files (or all files in selected
directories).
"""
with tokenize.open(file) as f:
try:
timestamp = int(os.fstat(f.fileno()).st_mtime)
except AttributeError:
timestamp = int(os.stat(file).st_mtime)
codestring = f.read()
try:
codeobject = builtins.compile(codestring, dfile or file, 'exec',
optimize=optimize)
except Exception as err:
py_exc = PyCompileError(err.__class__, err, dfile or file)
if doraise:
raise py_exc
else:
sys.stderr.write(py_exc.msg + '\n')
return
if cfile is None:
if optimize >= 0:
cfile = imp.cache_from_source(file, debug_override=not optimize)
else:
cfile = imp.cache_from_source(file)
try:
os.makedirs(os.path.dirname(cfile))
except OSError as error:
if error.errno != errno.EEXIST:
raise
with open(cfile, 'wb') as fc:
fc.write(b'\0\0\0\0')
wr_long(fc, timestamp)
marshal.dump(codeobject, fc)
fc.flush()
fc.seek(0, 0)
fc.write(MAGIC)
return cfile
def main(args=None):
"""Compile several source files.
The files named in 'args' (or on the command line, if 'args' is
not specified) are compiled and the resulting bytecode is cached
in the normal manner. This function does not search a directory
structure to locate source files; it only compiles files named
explicitly. If '-' is the only parameter in args, the list of
files is taken from standard input.
"""
if args is None:
args = sys.argv[1:]
rv = 0
if args == ['-']:
while True:
filename = sys.stdin.readline()
if not filename:
break
filename = filename.rstrip('\n')
try:
compile(filename, doraise=True)
except PyCompileError as error:
rv = 1
sys.stderr.write("%s\n" % error.msg)
except IOError as error:
rv = 1
sys.stderr.write("%s\n" % error)
else:
for filename in args:
try:
compile(filename, doraise=True)
except PyCompileError as error:
# return value to indicate at least one failure
rv = 1
sys.stderr.write(error.msg)
return rv
if __name__ == "__main__":
sys.exit(main()) | unknown | codeparrot/codeparrot-clean | ||
#ifndef NUMPY_SRC_COMMON_NPYSORT_QUICKSORT_HPP
#define NUMPY_SRC_COMMON_NPYSORT_QUICKSORT_HPP
#include "heapsort.hpp"
#include "common.hpp"
namespace np::sort {
// pushing largest partition has upper bound of log2(n) space
// we store two pointers each time
constexpr size_t kQuickStack = sizeof(intptr_t) * 8 * 2;
constexpr ptrdiff_t kQuickSmall = 15;
// NUMERIC SORTS
template <typename T>
inline void Quick(T *start, SSize num)
{
T vp;
T *pl = start;
T *pr = pl + num - 1;
T *stack[kQuickStack];
T **sptr = stack;
T *pm, *pi, *pj, *pk;
int depth[kQuickStack];
int *psdepth = depth;
int cdepth = BitScanReverse(static_cast<std::make_unsigned_t<SSize>>(num)) * 2;
for (;;) {
if (NPY_UNLIKELY(cdepth < 0)) {
Heap(pl, pr - pl + 1);
goto stack_pop;
}
while ((pr - pl) > kQuickSmall) {
// quicksort partition
pm = pl + ((pr - pl) >> 1);
if (LessThan(*pm, *pl)) {
std::swap(*pm, *pl);
}
if (LessThan(*pr, *pm)) {
std::swap(*pr, *pm);
}
if (LessThan(*pm, *pl)) {
std::swap(*pm, *pl);
}
vp = *pm;
pi = pl;
pj = pr - 1;
std::swap(*pm, *pj);
for (;;) {
do {
++pi;
} while (LessThan(*pi, vp));
do {
--pj;
} while (LessThan(vp, *pj));
if (pi >= pj) {
break;
}
std::swap(*pi, *pj);
}
pk = pr - 1;
std::swap(*pi, *pk);
// push largest partition on stack
if (pi - pl < pr - pi) {
*sptr++ = pi + 1;
*sptr++ = pr;
pr = pi - 1;
}
else {
*sptr++ = pl;
*sptr++ = pi - 1;
pl = pi + 1;
}
*psdepth++ = --cdepth;
}
/* insertion sort */
for (pi = pl + 1; pi <= pr; ++pi) {
vp = *pi;
pj = pi;
pk = pi - 1;
while (pj > pl && LessThan(vp, *pk)) {
*pj-- = *pk--;
}
*pj = vp;
}
stack_pop:
if (sptr == stack) {
break;
}
pr = *(--sptr);
pl = *(--sptr);
cdepth = *(--psdepth);
}
}
} // np::sort
#endif // NUMPY_SRC_COMMON_NPYSORT_QUICK_HPP | unknown | github | https://github.com/numpy/numpy | numpy/_core/src/npysort/quicksort.hpp |
# -*- coding: utf8 -*-
from PyQt5.QtWidgets import QPushButton, QWidget
from PyQt5.QtWidgets import QComboBox, QLabel, QLineEdit, QDoubleSpinBox
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QFormLayout
from PyQt5.QtWidgets import QSpinBox
from PyQt5.QtCore import Qt
from PDielec.Constants import support_matrix_db
from PDielec.Constants import avogadro_si
from PDielec.Utilities import Debug
class ScenarioTab(QWidget):
def __init__(self, parent, debug=False):
super(QWidget, self).__init__(parent)
global debugger
debugger = Debug(debug,'ScenarioTab:')
self.dirty = True
self.settings = {}
self.notebook = parent
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
matrix = 'ptfe'
self.settings['Matrix'] = matrix
self.settings['Matrix density'] = support_matrix_db[matrix][0]
self.settings['Matrix permittivity'] = support_matrix_db[matrix][1]
self.settings['Bubble radius'] = 30.0
self.settings['Bubble volume fraction'] = 0.0
self.settings['Mass fraction'] = 0.1
self.settings['Volume fraction'] = 0.1
self.settings['Particle size(mu)'] = 0.0001
self.settings['Particle size distribution sigma(mu)'] = 0.0
self.settings['Ellipsoid a/b'] = 1.0
self.settings['Unique direction - h'] = 0
self.settings['Unique direction - k'] = 0
self.settings['Unique direction - l'] = 1
self.settings['Mass or volume fraction'] = 'volume'
self.settings['ATR material refractive index'] = 4.0
self.settings['ATR theta'] = 45.0
self.settings['ATR S polarisation fraction'] = 0.5
# get the reader from the main tab
self.notebook = parent
self.reader = self.notebook.mainTab.reader
self.settings['Effective medium method'] = 'Maxwell-Garnett'
# self.methods = ['Maxwell-Garnett', 'Bruggeman', 'Averaged Permittivity', 'Mie', 'Anisotropic-Mie']
self.methods = ['Maxwell-Garnett', 'Bruggeman', 'Averaged Permittivity', 'Mie']
self.settings['Particle shape'] = 'Sphere'
self.shapes = ['Sphere', 'Needle', 'Plate', 'Ellipsoid']
self.scenarioIndex = None
# Create a scenario tab
vbox = QVBoxLayout()
form = QFormLayout()
#
# Support matrix
#
self.matrix_cb = QComboBox(self)
self.matrix_cb.setToolTip('Define the permittivity and density of the support matrix')
self.matrix_cb.addItems(support_matrix_db)
index = self.matrix_cb.findText(self.settings['Matrix'], Qt.MatchFixedString)
if index >=0:
self.matrix_cb.setCurrentIndex(index)
else:
print('support matrix index was not 0',matrix)
self.matrix_cb.activated.connect(self.on_matrix_cb_activated)
label = QLabel('Support matrix',self)
label.setToolTip('Define the permittivity and density of the support matrix')
form.addRow(label, self.matrix_cb)
#
# Support matrix permittivity
#
self.density_sb = QDoubleSpinBox(self)
self.density_sb.setRange(0.001, 100.0)
self.density_sb.setSingleStep(0.01)
self.density_sb.setDecimals(3)
self.density_sb.setToolTip('Define the support matrix density. \nThis makes changes to the support density and permittivity')
self.density_sb.setValue(self.settings['Matrix density'])
self.density_sb.valueChanged.connect(self.on_density_sb_changed)
label = QLabel('Support density', self)
label.setToolTip('Define the support matrix density. \nThis makes changes to the support density and permittivity')
form.addRow(label, self.density_sb)
#
# Support matrix permittivity
#
self.permittivity_sb = QDoubleSpinBox(self)
self.permittivity_sb.setRange(0.001, 100.0)
self.permittivity_sb.setSingleStep(0.01)
self.permittivity_sb.setDecimals(3)
self.permittivity_sb.setToolTip('Define the support matrix permittivity')
self.permittivity_sb.setValue(self.settings['Matrix permittivity'])
self.permittivity_sb.valueChanged.connect(self.on_permittivity_sb_changed)
label = QLabel('Support permittivity', self)
label.setToolTip('Define the support matrix permittivity')
form.addRow(label, self.permittivity_sb)
#
# Bubble volume fraction
#
self.bubble_vf_sb = QDoubleSpinBox(self)
self.bubble_vf_sb.setRange(0.0, 100.0*(1.0-self.settings['Volume fraction']))
self.bubble_vf_sb.setSingleStep(1.0)
self.bubble_vf_sb.setDecimals(1)
self.bubble_vf_sb.setToolTip('Define the % volume fraction of air bubble inclusions in the matrix')
self.bubble_vf_sb.setValue(100*self.settings['Bubble volume fraction'])
self.bubble_vf_sb.valueChanged.connect(self.on_bubble_vf_sb_changed)
label = QLabel('% Air void volume fraction', self)
label.setToolTip('Define the % volume fraction of air bubble inclusions in the matrix')
form.addRow(label, self.bubble_vf_sb)
#
# Bubble radius in microns
#
self.bubble_radius_sb = QDoubleSpinBox(self)
self.bubble_radius_sb.setRange(0.001, 1000.0)
self.bubble_radius_sb.setSingleStep(1.0)
self.bubble_radius_sb.setDecimals(3)
self.bubble_radius_sb.setToolTip('Define the air bubble radius')
self.bubble_radius_sb.setValue(self.settings['Bubble radius'])
self.bubble_radius_sb.valueChanged.connect(self.on_bubble_radius_sb_changed)
label = QLabel('Air void radius (μm)', self)
label.setToolTip('Define the air void radius')
form.addRow(label, self.bubble_radius_sb)
#
# Mass fraction of dielectric medium
#
self.mf_sb = QDoubleSpinBox(self)
self.mf_sb.setRange(0.000001, 100.0)
self.mf_sb.setSingleStep(0.1)
self.mf_sb.setDecimals(6)
self.mf_sb.setToolTip('The percentage mass fraction of the dielectric medium. \nNote that volume and mass fraction are linked')
self.mf_sb.setValue(100.0*self.settings['Mass fraction'])
self.mf_sb.valueChanged.connect(self.on_mf_sb_changed)
label = QLabel('% Mass fraction of dielectric', self)
label.setToolTip('The percentage mass fraction of the dielectric medium. \nNote that volume and mass fraction are linked')
form.addRow(label, self.mf_sb)
#
# Volume fraction of dielectric medium
#
self.vf_sb = QDoubleSpinBox(self)
self.vf_sb.setRange(0.000001, 100.0*(1.0-self.settings['Bubble volume fraction']))
self.vf_sb.setSingleStep(0.1)
self.vf_sb.setDecimals(6)
self.vf_sb.setToolTip('The percentage volume fraction of the dielectric medium. \nNote that volume and mass fraction are linked')
self.vf_sb.valueChanged.connect(self.on_vf_sb_changed)
self.vf_sb.setValue(100.0*self.settings['Volume fraction'])
label = QLabel('% Volume fraction of dielectric', self)
label.setToolTip('The percentage volume fraction of the dielectric medium. \nNote that volume and mass fraction are linked')
form.addRow(label, self.vf_sb)
#
# Calculation method
#
self.methods_cb = QComboBox(self)
self.methods_cb.setToolTip('Choose the calculation method for the effective medium theory')
self.methods_cb.addItems(self.methods)
index = self.methods_cb.findText(self.settings['Effective medium method'], Qt.MatchFixedString)
if index >=0:
self.methods_cb.setCurrentIndex(index)
else:
print('Method index was not 0',self.settings['Effective medium method'])
self.methods_cb.activated.connect(self.on_methods_cb_activated)
label = QLabel('Method',self)
label.setToolTip('Choose the calculation method for the effective medium theory')
form.addRow(label, self.methods_cb)
#
# Particle size option
#
self.size_sb = QDoubleSpinBox(self)
self.size_sb.setRange(0.000001, 1000.0)
self.size_sb.setSingleStep(0.1)
self.size_sb.setDecimals(6)
self.size_sb.setToolTip('Define the particle radius of the sphere in μm.')
self.size_sb.setValue(self.settings['Particle size(mu)'])
self.size_sb.valueChanged.connect(self.on_size_sb_changed)
label = QLabel('Particle radius (μm)',self)
label.setToolTip('Define the particle radius of the sphere in μm.')
form.addRow(label, self.size_sb)
#
# Particle sigma option
#
self.sigma_sb = QDoubleSpinBox(self)
self.sigma_sb.setRange(0.0, 1000.0)
self.sigma_sb.setSingleStep(0.1)
self.sigma_sb.setDecimals(6)
self.sigma_sb.setToolTip('Define the particle size distribution as a lognormal distribution with the given sigma. \nOnly applicable for the Mie method')
self.sigma_sb.setValue(self.settings['Particle size distribution sigma(mu)'])
self.sigma_sb.valueChanged.connect(self.on_sigma_sb_changed)
label = QLabel('Particle sigma (μm)',self)
label.setToolTip('Define the particle size distribition as a lognormal with the given sigma. \nOnly applicable for the Mie method')
form.addRow(label, self.sigma_sb)
#
# Crystallite shape
#
self.shape_cb = QComboBox(self)
self.shape_cb.setToolTip('Choose a particle shape. \nFor the Mie methods only sphere is allowed. \nFor shapes other than sphere there is a unique direction. \nFor ellipsoidal and needle like this is a direction [abc]. \nFor a plate the perpendicular to a crystal face (hkl) is used to define the unique direction')
self.shape_cb.addItems(self.shapes)
index = self.shape_cb.findText(self.settings['Particle shape'], Qt.MatchFixedString)
if index >=0:
self.shape_cb.setCurrentIndex(index)
else:
print('Method index was not 0',self.settings['Particle shape'])
self.shape_cb.activated.connect(self.on_shape_cb_activated)
label = QLabel('Particle shape',self)
label.setToolTip('Choose a particle shape. \nFor the Mie methods only sphere is allowed. \nFor shapes other than sphere there is a unique direction. \nFor ellipsoidal and needle like this is a direction [abc]. \nFor a plate the perpendicular to a crystal face (hkl) is used to define the unique direction')
form.addRow(label, self.shape_cb)
#
# Particle shape information
# unique direction (hkl) or [abc]
self.h_sb = QSpinBox(self)
self.h_sb.setToolTip('Define the h dimension of the unique direction')
self.h_sb.setRange(-20,20)
self.h_sb.setValue(self.settings['Unique direction - h'])
self.h_sb.valueChanged.connect(self.on_h_sb_changed)
self.k_sb = QSpinBox(self)
self.k_sb.setToolTip('Define the k dimension of the unique direction')
self.k_sb.setRange(-20,20)
self.k_sb.setValue(self.settings['Unique direction - k'])
self.k_sb.valueChanged.connect(self.on_k_sb_changed)
self.l_sb = QSpinBox(self)
self.l_sb.setToolTip('Define the l dimension of the unique direction')
self.l_sb.setRange(-20,20)
self.l_sb.setValue(self.settings['Unique direction - l'])
self.l_sb.valueChanged.connect(self.on_l_sb_changed)
hbox = QHBoxLayout()
hbox.addWidget(self.h_sb)
hbox.addWidget(self.k_sb)
hbox.addWidget(self.l_sb)
self.hkl_label = QLabel('Unique direction [abc]',self)
self.hkl_label.setToolTip('Define the unique direction by [abc] or (hkl). \n[abc] is used by needles and ellipsoids. It defines the unique direction in crystallographic units. \n(hkl) is used by plates it defines a surface and the unique direction is perpendicular to it.')
form.addRow(self.hkl_label, hbox)
#
# a over b ratio for ellipse
#
self.aoverb_sb = QDoubleSpinBox(self)
self.aoverb_sb.setRange(0.0, 1000.0)
self.aoverb_sb.setSingleStep(0.1)
self.aoverb_sb.setDecimals(6)
self.aoverb_sb.setToolTip('Define the ellipsoid a/b ratio or eccentricity. \nOnly applicable for the ellipsoid shapes \na/b < 1: oblate ellipsoid \na/b > 1: prolate ellipsoid')
self.aoverb_sb.setValue(self.settings['Ellipsoid a/b'])
self.aoverb_sb.valueChanged.connect(self.on_aoverb_sb_changed)
label = QLabel('Ellipsoid a/b eccentricty',self)
label.setToolTip('Define the ellipsoid a/b ratio or eccentricity. \nOnly applicable for the ellipsoid shapes \na/b < 1: oblate ellipsoid \na/b > 1: prolate ellipsoid')
form.addRow(label, self.aoverb_sb)
#
# Add ATR options
# Refractive Index
self.atr_index_sb = QDoubleSpinBox(self)
self.atr_index_sb.setRange(0.001, 100.0)
self.atr_index_sb.setSingleStep(0.01)
self.atr_index_sb.setDecimals(3)
self.atr_index_sb.setToolTip('Define the ATR material refractive index')
self.atr_index_sb.setValue(self.settings['ATR material refractive index'])
self.atr_index_sb.valueChanged.connect(self.on_atr_index_sb_changed)
label = QLabel('ATR material refractive index', self)
label.setToolTip('Define the ATR material refractive index')
form.addRow(label, self.atr_index_sb)
# Incident angle in degreees
self.atr_incident_ang_sb = QDoubleSpinBox(self)
self.atr_incident_ang_sb.setRange(0.0, 180.0)
self.atr_incident_ang_sb.setSingleStep(0.1)
self.atr_incident_ang_sb.setDecimals(1)
self.atr_incident_ang_sb.setToolTip('Define the ATR incident angle')
self.atr_incident_ang_sb.setValue(self.settings['ATR theta'])
self.atr_incident_ang_sb.valueChanged.connect(self.on_atr_incident_ang_sb_changed)
label = QLabel('ATR incident angle', self)
label.setToolTip('Define the ATR incident angle')
form.addRow(label, self.atr_incident_ang_sb)
# S polarisation fraction
self.atr_spolfrac_sb = QDoubleSpinBox(self)
self.atr_spolfrac_sb.setRange(0.0, 1.0)
self.atr_spolfrac_sb.setSingleStep(0.01)
self.atr_spolfrac_sb.setDecimals(3)
self.atr_spolfrac_sb.setToolTip('Define the ATR S polarisation fraction, the rest is P polarisation')
self.atr_spolfrac_sb.setValue(self.settings['ATR S polarisation fraction'])
self.atr_spolfrac_sb.valueChanged.connect(self.on_atr_spolfrac_sb_changed)
label = QLabel('ATR S polarisation fraction', self)
label.setToolTip('Define the S polarisation fraction, the rest is P polarisation')
form.addRow(label, self.atr_spolfrac_sb)
#
# Add a legend option
#
self.legend_le = QLineEdit(self)
self.legend_le.setToolTip('The legend will be used to describe the results in the plot')
self.legend_le.setText('Scenario legend')
self.legend_le.textChanged.connect(self.on_legend_le_changed)
label = QLabel('Scenario legend',self)
label.setToolTip('The legend will be used to describe the results in the plot')
form.addRow(label, self.legend_le)
#
# Final buttons
#
hbox = QHBoxLayout()
self.pushButton1 = QPushButton('Add another scenario')
self.pushButton1.setToolTip('Use another scenario to calculate the effect of changing the material on the absorption and permittivity')
self.pushButton1.clicked.connect(self.pushButton1Clicked)
hbox.addWidget(self.pushButton1)
self.pushButton3 = QPushButton('Delete this scenario')
self.pushButton3.setToolTip('Delete the current scenario')
self.pushButton3.clicked.connect(self.pushButton3Clicked)
hbox.addWidget(self.pushButton3)
form.addRow(hbox)
vbox.addLayout(form)
# finalise the layout
self.setLayout(vbox)
# sort out greying of boxes
self.change_greyed_out()
def pushButton1Clicked(self):
# Add another scenario
debugger.print('Button 1 pressed')
self.notebook.addScenario(copyFromIndex=self.scenarioIndex)
def pushButton3Clicked(self):
# Delete a scenario
debugger.print('Button 3 pressed')
self.notebook.deleteScenario(self.scenarioIndex)
def crystal_density(self):
if not self.reader:
return 1.0
volume = self.reader.volume
mass = 0.0
for m in self.reader.masses:
mass += m
density = mass / (avogadro_si * volume * 1.0e-24)
return density
def on_h_sb_changed(self,value):
debugger.print('on_h_sb_changed', value)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
self.settings['Unique direction - h'] = value
def on_k_sb_changed(self,value):
debugger.print('on_k_sb_changed', value)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
self.settings['Unique direction - k'] = value
def on_l_sb_changed(self,value):
debugger.print('on_l_sb_changed', value)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
self.settings['Unique direction - l'] = value
def on_shape_cb_activated(self,index):
debugger.print('on shape cb activated', index)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
self.settings['Particle shape'] = self.shapes[index]
if self.settings['Particle shape'] == 'Sphere':
self.settings['Unique direction - h'] = 0
self.settings['Unique direction - k'] = 0
self.settings['Unique direction - l'] = 0
self.change_greyed_out()
def on_methods_cb_activated(self,index):
debugger.print('on methods cb activated', index)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
self.settings['Effective medium method'] = self.methods[index]
if self.settings['Effective medium method'] == 'Mie':
self.settings['Particle shape'] = 'Sphere'
elif self.settings['Effective medium method'] == 'Anisotropic-Mie':
self.settings['Particle shape'] = 'Sphere'
elif self.settings['Effective medium method'] == 'Maxwell-Garnett':
self.settings['Particle size distribution sigma(mu)'] = 0.0
elif self.settings['Effective medium method'] == 'Bruggeman':
self.settings['Particle size distribution sigma(mu)'] = 0.0
elif self.settings['Effective medium method'] == 'Averaged Permittivity':
self.settings['Particle size(mu)'] = 0.0001
self.settings['Particle size distribution sigma(mu)'] = 0.0
self.change_greyed_out()
def on_mf_sb_changed(self,value):
debugger.print('on mass fraction line edit changed', value)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
self.settings['Mass or volume fraction'] = 'mass'
self.settings['Mass fraction'] = value/100.0
self.update_vf_sb()
def update_vf_sb(self):
mf1 = self.settings['Mass fraction']
mf2 = 1.0 - mf1
rho1 = self.crystal_density()
rho2 = self.settings['Matrix density']
vf1 = ( 1.0 - self.settings['Bubble volume fraction'] ) * (mf1/mf2)*(rho2/rho1) / ( 1 + (mf1/mf2)*(rho2/rho1))
# vf1 = 1.0 / ( 1.0 + mf2/mf1 * (rho1/rho2) )
self.settings['Volume fraction'] = vf1
self.vf_sb.blockSignals(True)
self.vf_sb.setValue(100.0*vf1)
self.vf_sb.blockSignals(False)
self.bubble_vf_sb.setRange(0.0, 100.0*(1.0-self.settings['Volume fraction']))
self.vf_sb.setRange(0.0, 100.0*(1.0-self.settings['Bubble volume fraction']))
debugger.print('Update_vf_sb')
debugger.print('rho 1', rho1)
debugger.print('rho 2', rho2)
debugger.print('vf 1 ', vf1)
def on_aoverb_sb_changed(self,value):
debugger.print('on_aoverb_le_changed',value)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
self.settings['Ellipsoid a/b'] = value
def on_legend_le_changed(self,text):
debugger.print('on legend change', text)
self.dirty = True
self.settings['Legend'] = text
def on_sigma_sb_changed(self,value):
debugger.print('on sigma line edit changed', value)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
self.settings['Particle size distribution sigma(mu)'] = value
def on_size_sb_changed(self,value):
debugger.print('on size line edit changed', value)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
self.settings['Particle size(mu)'] = value
def on_vf_sb_changed(self,value):
debugger.print('on volume fraction line edit changed', value)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
self.settings['Mass or volume fraction'] = 'volume'
self.settings['Volume fraction'] = value/100.0
self.update_mf_sb()
def update_mf_sb(self):
vf1 = self.settings['Volume fraction']
vf2 = 1.0 - vf1 - self.settings['Bubble volume fraction']
rho1 = self.crystal_density()
rho2 = self.settings['Matrix density']
# mf1 = 1.0 / ( 1.0 + (vf2/vf1) * (rho2/rho1) )
mf1 = rho1*vf1 / ( rho1*vf1 + rho2*vf2 )
self.settings['Mass fraction'] = mf1
self.mf_sb.blockSignals(True)
self.mf_sb.setValue(100.0*mf1)
self.mf_sb.blockSignals(False)
debugger.print('Update_mf_sb')
debugger.print('rho 1', rho1)
debugger.print('rho 2', rho2)
debugger.print('mf 1 ', mf1)
def on_matrix_cb_activated(self,index):
debugger.print('on matrix combobox activated', index)
debugger.print('on matrix combobox activated', self.matrix_cb.currentText())
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
matrix = self.matrix_cb.currentText()
self.matrix_cb.blockSignals(True)
self.density_sb.blockSignals(True)
self.permittivity_sb.blockSignals(True)
self.settings['Matrix'] = matrix
self.settings['Matrix density'] = support_matrix_db[matrix][0]
self.settings['Matrix permittivity'] = support_matrix_db[matrix][1]
self.density_sb.setValue(self.settings['Matrix density'])
self.permittivity_sb.setValue(self.settings['Matrix permittivity'])
# volume fraction takes precedence
if self.settings['Mass or volume fraction'] == 'volume':
self.update_mf_sb()
self.update_vf_sb()
else:
self.update_vf_sb()
self.update_mf_sb()
self.matrix_cb.blockSignals(False)
self.density_sb.blockSignals(False)
self.permittivity_sb.blockSignals(False)
def on_density_sb_changed(self,value):
self.settings['Matrix density'] = value
# volume fraction taked precedence
if self.settings['Mass or volume fraction'] == 'volume':
self.update_mf_sb()
self.update_vf_sb()
else:
self.update_vf_sb()
self.update_mf_sb()
debugger.print('on density line edit changed', value)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
def on_bubble_vf_sb_changed(self,value):
self.settings['Bubble volume fraction'] = value/100.0
if self.settings['Mass or volume fraction'] == 'volume':
self.update_mf_sb()
else:
self.update_vf_sb()
debugger.print('on bubble volume fraction changed', value)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
def on_bubble_radius_sb_changed(self,value):
self.settings['Bubble radius'] = value
debugger.print('on permittivity line edit changed', value)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
def on_permittivity_sb_changed(self,value):
self.settings['Matrix permittivity'] = value
debugger.print('on permittivity line edit changed', value)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
def on_atr_index_sb_changed(self,value):
self.settings['ATR material refractive index'] = value
debugger.print('on atr index line edit changed', value)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
def on_atr_incident_ang_sb_changed(self,value):
self.settings['ATR theta'] = value
debugger.print('on atr incident angle line edit changed', value)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
def on_atr_spolfrac_sb_changed(self,value):
self.settings['ATR S polarisation fraction'] = value
debugger.print('on atr spolfraction line edit changed', value)
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
def set_reader(self,reader):
self.dirty = True
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
self.reader = reader
def change_greyed_out(self):
# Have a look through the settings and see if we need to grey anything out
method = self.settings['Effective medium method']
if method == 'Mie' or method == 'Anisotropic-Mie':
self.size_sb.setEnabled(True)
self.sigma_sb.setEnabled(True)
for i,shape in enumerate(self.shapes):
self.shape_cb.model().item(i).setEnabled(False)
self.settings['Particle shape'] = 'Sphere'
self.shape_cb.setEnabled(True)
index = self.shape_cb.findText(self.settings['Particle shape'], Qt.MatchFixedString)
if index >=0:
self.shape_cb.model().item(index).setEnabled(True)
self.shape_cb.setCurrentIndex(index)
else:
print('Method index was not 0',self.settings['Particle shape'])
elif method == 'Averaged Permittivity':
self.size_sb.setEnabled(False)
self.sigma_sb.setEnabled(False)
self.settings['Particle shape'] = 'Sphere'
index = self.shape_cb.findText(self.settings['Particle shape'], Qt.MatchFixedString)
if index >=0:
self.shape_cb.model().item(index).setEnabled(True)
self.shape_cb.setCurrentIndex(index)
self.shape_cb.setEnabled(False)
for i,shape in enumerate(self.shapes):
self.shape_cb.model().item(i).setEnabled(False)
elif method == 'Maxwell-Garnett' or method == 'Bruggeman':
self.size_sb.setEnabled(True)
self.sigma_sb.setEnabled(False)
self.shape_cb.setEnabled(True)
for i,shape in enumerate(self.shapes):
self.shape_cb.model().item(i).setEnabled(True)
else:
self.size_sb.setEnabled(False)
self.sigma_sb.setEnabled(False)
self.shape_cb.setEnabled(True)
for i,shape in enumerate(self.shapes):
self.shape_cb.model().item(i).setEnabled(True)
# deal with shapes
if self.settings['Particle shape'] == 'Ellipsoid':
self.h_sb.setEnabled(True)
self.k_sb.setEnabled(True)
self.l_sb.setEnabled(True)
self.hkl_label.setText('Unique direction [abc]')
self.aoverb_sb.setEnabled(True)
elif self.settings['Particle shape'] == 'Plate':
self.h_sb.setEnabled(True)
self.k_sb.setEnabled(True)
self.l_sb.setEnabled(True)
self.hkl_label.setText('Unique direction (hkl)')
self.aoverb_sb.setEnabled(False)
elif self.settings['Particle shape'] == 'Needle':
self.h_sb.setEnabled(True)
self.k_sb.setEnabled(True)
self.l_sb.setEnabled(True)
self.hkl_label.setText('Unique direction [abc]')
self.aoverb_sb.setEnabled(False)
elif self.settings['Particle shape'] == 'Sphere':
self.h_sb.setEnabled(False)
self.k_sb.setEnabled(False)
self.l_sb.setEnabled(False)
self.aoverb_sb.setEnabled(False)
else:
print('ScenarioTab: Shape not recognised', self.settings['Particle shape'])
def setScenarioIndex(self,index):
self.scenarioIndex = index
text = self.legend_le.text()
if text == 'Scenario legend':
self.legend_le.setText('Scenario '+str(index + 1))
return
def print_settings(self):
print('#')
print('# Scenario tab')
print('#')
print('tab = self.notebook.scenarios')
for key in self.settings:
print(key, self.settings[key])
def refresh(self,force=False):
if not self.dirty and not force:
debugger.print('refresh aborted', self.dirty,force)
return
debugger.print('refresh', force)
# Tell the main notebook that we need to recalculate any plot
self.notebook.plottingCalculationRequired = True
self.notebook.fittingCalculationRequired = True
# First see if we can get the reader from the mainTab
self.reader = self.notebook.mainTab.reader
#
# Block signals during refresh
#
for w in self.findChildren(QWidget):
w.blockSignals(True)
# use the settings values to initialise the widgets
index = self.matrix_cb.findText(self.settings['Matrix'], Qt.MatchFixedString)
self.matrix_cb.setCurrentIndex(index)
self.density_sb.setValue(self.settings['Matrix density'])
self.permittivity_sb.setValue(self.settings['Matrix permittivity'])
self.bubble_vf_sb.setValue(100*self.settings['Bubble volume fraction'])
self.bubble_radius_sb.setValue(self.settings['Bubble radius'])
if self.settings['Mass or volume fraction'] == 'volume':
# volume fraction takes precedence
self.update_mf_sb()
self.update_vf_sb()
else:
# mass fraction takes precedence
self.update_vf_sb()
self.update_mf_sb()
#
index = self.methods_cb.findText(self.settings['Effective medium method'], Qt.MatchFixedString)
self.methods_cb.setCurrentIndex(index)
self.size_sb.setValue(self.settings['Particle size(mu)'])
self.sigma_sb.setValue(self.settings['Particle size distribution sigma(mu)'])
index = self.shape_cb.findText(self.settings['Particle shape'], Qt.MatchFixedString)
self.shape_cb.setCurrentIndex(index)
self.h_sb.setValue(self.settings['Unique direction - h'])
self.k_sb.setValue(self.settings['Unique direction - k'])
self.l_sb.setValue(self.settings['Unique direction - l'])
self.aoverb_sb.setValue(self.settings['Ellipsoid a/b'])
self.legend_le.setText(self.settings['Legend'])
self.change_greyed_out()
#
# Unblock signals after refresh
#
for w in self.findChildren(QWidget):
w.blockSignals(False)
self.dirty = False
return | unknown | codeparrot/codeparrot-clean | ||
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Temporal memory implementation.
This is the Python implementation and is used as the base class for the C++
implementation in :class:`~nupic.algorithms.backtracking_tm.BacktrackingTMCPP`.
"""
import copy
import cPickle as pickle
import itertools
try:
import capnp
except ImportError:
capnp = None
import numpy
if capnp:
from nupic.algorithms.backtracking_tm_capnp import (
SegmentProto, SegmentUpdateProto, BacktrackingTMProto)
from nupic.bindings.math import Random
from nupic.bindings.algorithms import getSegmentActivityLevel, isSegmentActive
from nupic.math import GetNTAReal
from nupic.serializable import Serializable
from nupic.support.console_printer import ConsolePrinterMixin
# Default verbosity while running unit tests
VERBOSITY = 0
# The current TM version used to track the checkpoint state.
TM_VERSION = 1
# The numpy equivalent to the floating point type used by NTA
dtype = GetNTAReal()
class BacktrackingTM(ConsolePrinterMixin, Serializable):
"""
Class implementing the temporal memory algorithm as described in
`BAMI <https://numenta.com/biological-and-machine-intelligence/>`_. The
implementation here attempts to closely match the pseudocode in the
documentation. This implementation does contain several additional bells and
whistles such as a column confidence measure.
:param numberOfCols: (int) Number of mini-columns in the region. This values
needs to be the same as the number of columns in the SP, if one is
used.
:param cellsPerColumn: (int) The number of cells per mini-column.
:param initialPerm: (float) Initial permanence for newly created synapses.
:param connectedPerm: TODO: document
:param minThreshold: (int) Minimum number of active synapses for a segment to
be considered during search for the best-matching segments.
:param newSynapseCount: (int) The max number of synapses added to a segment
during learning.
:param permanenceInc: (float) Active synapses get their permanence counts
incremented by this value.
:param permanenceDec: (float) All other synapses get their permanence counts
decremented by this value.
:param permanenceMax: TODO: document
:param maxAge: (int) Number of iterations before global decay takes effect.
Also the global decay execution interval. After global decay starts, it
will will run again every ``maxAge`` iterations. If ``maxAge==1``,
global decay is applied to every iteration to every segment.
.. note:: Using ``maxAge > 1`` can significantly speed up the TM when
global decay is used.
:param globalDecay: (float) Value to decrease permanences when the global
decay process runs. Global decay will remove synapses if their
permanence value reaches 0. It will also remove segments when they no
longer have synapses.
.. note:: Global decay is applied after ``maxAge`` iterations, after
which it will run every ``maxAge`` iterations.
:param activationThreshold: (int) Number of synapses that must be active to
activate a segment.
:param doPooling: (bool) If True, pooling is enabled. False is the default.
:param segUpdateValidDuration: TODO: document
:param burnIn: (int) Used for evaluating the prediction score. Default is 2.
:param collectStats: (bool) If True, collect training / inference stats.
Default is False.
:param seed: (int) Random number generator seed. The seed affects the random
aspects of initialization like the initial permanence values. A fixed
value ensures a reproducible result.
:param verbosity: (int) Controls the verbosity of the TM diagnostic output:
- verbosity == 0: silent
- verbosity in [1..6]: increasing levels of verbosity
:param pamLength: (int) Number of time steps to remain in "Pay Attention Mode"
after we detect we've reached the end of a learned sequence. Setting
this to 0 disables PAM mode. When we are in PAM mode, we do not burst
unpredicted columns during learning, which in turn prevents us from
falling into a previously learned sequence for a while (until we run
through another 'pamLength' steps).
The advantage of PAM mode is that it requires fewer presentations to
learn a set of sequences which share elements. The disadvantage of PAM
mode is that if a learned sequence is immediately followed by set set
of elements that should be learned as a 2nd sequence, the first
``pamLength`` elements of that sequence will not be learned as part of
that 2nd sequence.
:param maxInfBacktrack: (int) How many previous inputs to keep in a buffer for
inference backtracking.
:param maxLrnBacktrack: (int) How many previous inputs to keep in a buffer for
learning backtracking.
:param maxSeqLength: (int) If not 0, we will never learn more than
``maxSeqLength`` inputs in a row without starting over at start cells.
This sets an upper bound on the length of learned sequences and thus is
another means (besides ``maxAge`` and ``globalDecay``) by which to
limit how much the TM tries to learn.
:param maxSegmentsPerCell: (int) The maximum number of segments allowed on a
cell. This is used to turn on "fixed size CLA" mode. When in effect,
``globalDecay`` is not applicable and must be set to 0 and ``maxAge``
must be set to 0. When this is used (> 0), ``maxSynapsesPerSegment``
must also be > 0.
:param maxSynapsesPerSegment: (int) The maximum number of synapses allowed in
a segment. This is used to turn on "fixed size CLA" mode. When in
effect, ``globalDecay`` is not applicable and must be set to 0, and
``maxAge`` must be set to 0. When this is used (> 0),
``maxSegmentsPerCell`` must also be > 0.
:param outputType: (string) Can be one of the following (default ``normal``):
- ``normal``: output the OR of the active and predicted state.
- ``activeState``: output only the active state.
- ``activeState1CellPerCol``: output only the active state, and at most
1 cell/column. If more than 1 cell is active in a column, the one
with the highest confidence is sent up.
"""
def __init__(self,
numberOfCols=500,
cellsPerColumn=10,
initialPerm=0.11,
connectedPerm=0.50,
minThreshold=8,
newSynapseCount=15,
permanenceInc=0.10,
permanenceDec=0.10,
permanenceMax=1.0,
globalDecay=0.10,
activationThreshold=12,
doPooling=False,
segUpdateValidDuration=5,
burnIn=2,
collectStats=False,
seed=42,
verbosity=VERBOSITY,
checkSynapseConsistency=False, # for cpp only -- ignored
pamLength=1,
maxInfBacktrack=10,
maxLrnBacktrack=5,
maxAge=100000,
maxSeqLength=32,
maxSegmentsPerCell=-1,
maxSynapsesPerSegment=-1,
outputType='normal',
):
ConsolePrinterMixin.__init__(self, verbosity)
# Check arguments
assert pamLength > 0, "This implementation must have pamLength > 0"
# Fixed size CLA mode?
if maxSegmentsPerCell != -1 or maxSynapsesPerSegment != -1:
assert (maxSegmentsPerCell > 0 and maxSynapsesPerSegment > 0)
assert (globalDecay == 0.0)
assert (maxAge == 0)
assert maxSynapsesPerSegment >= newSynapseCount, ("TM requires that "
"maxSynapsesPerSegment >= newSynapseCount. (Currently %s >= %s)" % (
maxSynapsesPerSegment, newSynapseCount))
# Seed random number generator
if seed >= 0:
self._random = Random(seed)
else:
self._random = Random(numpy.random.randint(256))
# Store creation parameters
self.numberOfCols = numberOfCols
self.cellsPerColumn = cellsPerColumn
self._numberOfCells = numberOfCols * cellsPerColumn
self.initialPerm = numpy.float32(initialPerm)
self.connectedPerm = numpy.float32(connectedPerm)
self.minThreshold = minThreshold
self.newSynapseCount = newSynapseCount
self.permanenceInc = numpy.float32(permanenceInc)
self.permanenceDec = numpy.float32(permanenceDec)
self.permanenceMax = numpy.float32(permanenceMax)
self.globalDecay = numpy.float32(globalDecay)
self.activationThreshold = activationThreshold
## Allows to turn off pooling
self.doPooling = doPooling
self.segUpdateValidDuration = segUpdateValidDuration
## Used for evaluating the prediction score
self.burnIn = burnIn
## If true, collect training/inference stats
self.collectStats = collectStats
self.verbosity = verbosity
self.pamLength = pamLength
self.maxAge = maxAge
self.maxInfBacktrack = maxInfBacktrack
self.maxLrnBacktrack = maxLrnBacktrack
self.maxSeqLength = maxSeqLength
self.maxSegmentsPerCell = maxSegmentsPerCell
self.maxSynapsesPerSegment = maxSynapsesPerSegment
assert outputType in ('normal', 'activeState', 'activeState1CellPerCol')
self.outputType = outputType
# No point having larger expiration if we are not doing pooling
if not doPooling:
self.segUpdateValidDuration = 1
# Create data structures
self.activeColumns = [] # list of indices of active columns
## Cells are indexed by column and index in the column
# Every self.cells[column][index] contains a list of segments
# Each segment is a structure of class Segment
self.cells = []
for c in xrange(self.numberOfCols):
self.cells.append([])
for _ in xrange(self.cellsPerColumn):
self.cells[c].append([])
self.lrnIterationIdx = 0
self.iterationIdx = 0
## unique segment id, so we can put segments in hashes
self.segID = 0
self.currentOutput = None # for checkPrediction
## pamCounter gets reset to pamLength whenever we detect that the learning
# state is making good predictions (at least half the columns predicted).
# Whenever we do not make a good prediction, we decrement pamCounter.
# When pamCounter reaches 0, we start the learn state over again at start
# cells.
self.pamCounter = self.pamLength
## If True, the TM will compute a signature for each sequence
self.collectSequenceStats = False
## This gets set when we receive a reset and cleared on the first compute
# following a reset.
self.resetCalled = False
## We keep track of the average input density here
self.avgInputDensity = None
## Keeps track of the length of the sequence currently being learned.
self.learnedSeqLength = 0
## Keeps track of the moving average of all learned sequence length.
self.avgLearnedSeqLength = 0.0
# Set attributes intialized later on.
self._prevLrnPatterns = None
self._prevInfPatterns = None
self.segmentUpdates = None
# Set attributes that are initialized in _initEphemerals.
self._stats = None
self.cellConfidence = None
self.colConfidence = None
self.lrnActiveState = None
self.infActiveState = None
self.lrnPredictedState = None
self.infPredictedState = None
self._internalStats = None
# All other members are ephemeral - don't need to be saved when we save
# state. So they get separated out into _initEphemerals, which also
# gets called when we are being restored from a saved state (via
# __setstate__)
self._initEphemerals()
def _getEphemeralMembers(self):
"""
List of our member variables that we don't need to be saved.
"""
return []
def _initEphemerals(self):
"""
Initialize all ephemeral members after being restored to a pickled state.
"""
## We store the lists of segments updates, per cell, so that they can be
# applied later during learning, when the cell gets bottom-up activation.
# We store one list per cell. The lists are identified with a hash key which
# is a tuple (column index, cell index).
self.segmentUpdates = {}
# Allocate and reset all stats
self.resetStats()
# NOTE: We don't use the same backtrack buffer for inference and learning
# because learning has a different metric for determining if an input from
# the past is potentially useful again for backtracking.
#
# Our inference backtrack buffer. This keeps track of up to
# maxInfBacktrack of previous input. Each entry is a list of active column
# inputs.
self._prevInfPatterns = []
# Our learning backtrack buffer. This keeps track of up to maxLrnBacktrack
# of previous input. Each entry is a list of active column inputs
self._prevLrnPatterns = []
# Keep integers rather than bools. Float?
stateShape = (self.numberOfCols, self.cellsPerColumn)
self.lrnActiveState = {}
self.lrnActiveState["t"] = numpy.zeros(stateShape, dtype="int8")
self.lrnActiveState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.lrnPredictedState = {}
self.lrnPredictedState["t"] = numpy.zeros(stateShape, dtype="int8")
self.lrnPredictedState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState = {}
self.infActiveState["t"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState["backup"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState["candidate"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState = {}
self.infPredictedState["t"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState["backup"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState["candidate"] = numpy.zeros(stateShape, dtype="int8")
self.cellConfidence = {}
self.cellConfidence["t"] = numpy.zeros(stateShape, dtype="float32")
self.cellConfidence["t-1"] = numpy.zeros(stateShape, dtype="float32")
self.cellConfidence["candidate"] = numpy.zeros(stateShape, dtype="float32")
self.colConfidence = {}
self.colConfidence["t"] = numpy.zeros(self.numberOfCols, dtype="float32")
self.colConfidence["t-1"] = numpy.zeros(self.numberOfCols, dtype="float32")
self.colConfidence["candidate"] = numpy.zeros(self.numberOfCols,
dtype="float32")
def __getstate__(self):
""" @internal
Return serializable state. This function will return a version of the
__dict__ with all "ephemeral" members stripped out. "Ephemeral" members
are defined as those that do not need to be (nor should be) stored
in any kind of persistent file (e.g., NuPIC network XML file.)
"""
state = self.__dict__.copy()
for ephemeralMemberName in self._getEphemeralMembers():
state.pop(ephemeralMemberName, None)
state['_random'] = self._getRandomState()
state['version'] = TM_VERSION
return state
def __setstate__(self, state):
""" @internal
Set the state of ourself from a serialized state.
"""
self._setRandomState(state['_random'])
del state['_random']
version = state.pop('version')
assert version == TM_VERSION
self.__dict__.update(state)
@staticmethod
def getSchema():
return BacktrackingTMProto
def write(self, proto):
"""Populate serialization proto instance.
:param proto: (BacktrackingTMProto) the proto instance to populate
"""
proto.version = TM_VERSION
self._random.write(proto.random)
proto.numberOfCols = self.numberOfCols
proto.cellsPerColumn = self.cellsPerColumn
proto.initialPerm = float(self.initialPerm)
proto.connectedPerm = float(self.connectedPerm)
proto.minThreshold = self.minThreshold
proto.newSynapseCount = self.newSynapseCount
proto.permanenceInc = float(self.permanenceInc)
proto.permanenceDec = float(self.permanenceDec)
proto.permanenceMax = float(self.permanenceMax)
proto.globalDecay = float(self.globalDecay)
proto.activationThreshold = self.activationThreshold
proto.doPooling = self.doPooling
proto.segUpdateValidDuration = self.segUpdateValidDuration
proto.burnIn = self.burnIn
proto.collectStats = self.collectStats
proto.verbosity = self.verbosity
proto.pamLength = self.pamLength
proto.maxAge = self.maxAge
proto.maxInfBacktrack = self.maxInfBacktrack
proto.maxLrnBacktrack = self.maxLrnBacktrack
proto.maxSeqLength = self.maxSeqLength
proto.maxSegmentsPerCell = self.maxSegmentsPerCell
proto.maxSynapsesPerSegment = self.maxSynapsesPerSegment
proto.outputType = self.outputType
proto.activeColumns = self.activeColumns
cellListProto = proto.init("cells", len(self.cells))
for i, columnSegments in enumerate(self.cells):
columnSegmentsProto = cellListProto.init(i, len(columnSegments))
for j, cellSegments in enumerate(columnSegments):
cellSegmentsProto = columnSegmentsProto.init(j, len(cellSegments))
for k, segment in enumerate(cellSegments):
segment.write(cellSegmentsProto[k])
proto.lrnIterationIdx = self.lrnIterationIdx
proto.iterationIdx = self.iterationIdx
proto.segID = self.segID
if self.currentOutput is not None:
proto.currentOutput = self.currentOutput.tolist()
proto.pamCounter = self.pamCounter
proto.collectSequenceStats = self.collectSequenceStats
proto.resetCalled = self.resetCalled
# In case of None, use negative value as placeholder for serialization
proto.avgInputDensity = self.avgInputDensity or -1.0
proto.learnedSeqLength = self.learnedSeqLength
proto.avgLearnedSeqLength = self.avgLearnedSeqLength
proto.prevLrnPatterns = self._prevLrnPatterns
proto.prevInfPatterns = self._prevInfPatterns
segmentUpdatesListProto = proto.init("segmentUpdates",
len(self.segmentUpdates))
for i, (key, updates) in enumerate(self.segmentUpdates.iteritems()):
cellSegmentUpdatesProto = segmentUpdatesListProto[i]
cellSegmentUpdatesProto.columnIdx = key[0]
cellSegmentUpdatesProto.cellIdx = key[1]
segmentUpdatesProto = cellSegmentUpdatesProto.init("segmentUpdates",
len(updates))
for j, (lrnIterationIdx, segmentUpdate) in enumerate(updates):
segmentUpdateWrapperProto = segmentUpdatesProto[j]
segmentUpdateWrapperProto.lrnIterationIdx = lrnIterationIdx
segmentUpdate.write(segmentUpdateWrapperProto.segmentUpdate)
# self.cellConfidence
proto.cellConfidenceT = self.cellConfidence["t"].tolist()
proto.cellConfidenceT1 = self.cellConfidence["t-1"].tolist()
proto.cellConfidenceCandidate = self.cellConfidence["candidate"].tolist()
# self.colConfidence
proto.colConfidenceT = self.colConfidence["t"].tolist()
proto.colConfidenceT1 = self.colConfidence["t-1"].tolist()
proto.colConfidenceCandidate = self.colConfidence["candidate"].tolist()
# self.lrnActiveState
proto.lrnActiveStateT = self.lrnActiveState["t"].tolist()
proto.lrnActiveStateT1 = self.lrnActiveState["t-1"].tolist()
# self.infActiveState
proto.infActiveStateT = self.infActiveState["t"].tolist()
proto.infActiveStateT1 = self.infActiveState["t-1"].tolist()
proto.infActiveStateBackup = self.infActiveState["backup"].tolist()
proto.infActiveStateCandidate = self.infActiveState["candidate"].tolist()
# self.lrnPredictedState
proto.lrnPredictedStateT = self.lrnPredictedState["t"].tolist()
proto.lrnPredictedStateT1 = self.lrnPredictedState["t-1"].tolist()
# self.infPredictedState
proto.infPredictedStateT = self.infPredictedState["t"].tolist()
proto.infPredictedStateT1 = self.infPredictedState["t-1"].tolist()
proto.infPredictedStateBackup = self.infPredictedState["backup"].tolist()
proto.infPredictedStateCandidate = self.infPredictedState["candidate"].tolist()
proto.consolePrinterVerbosity = self.consolePrinterVerbosity
@classmethod
def read(cls, proto):
"""Deserialize from proto instance.
:param proto: (BacktrackingTMProto) the proto instance to read from
"""
assert proto.version == TM_VERSION
obj = object.__new__(cls)
obj._random = Random()
obj._random.read(proto.random)
obj.numberOfCols = int(proto.numberOfCols)
obj.cellsPerColumn = int(proto.cellsPerColumn)
obj._numberOfCells = obj.numberOfCols * obj.cellsPerColumn
obj.initialPerm = numpy.float32(proto.initialPerm)
obj.connectedPerm = numpy.float32(proto.connectedPerm)
obj.minThreshold = int(proto.minThreshold)
obj.newSynapseCount = int(proto.newSynapseCount)
obj.permanenceInc = numpy.float32(proto.permanenceInc)
obj.permanenceDec = numpy.float32(proto.permanenceDec)
obj.permanenceMax = numpy.float32(proto.permanenceMax)
obj.globalDecay = numpy.float32(proto.globalDecay)
obj.activationThreshold = int(proto.activationThreshold)
obj.doPooling = proto.doPooling
obj.segUpdateValidDuration = int(proto.segUpdateValidDuration)
obj.burnIn = int(proto.burnIn)
obj.collectStats = proto.collectStats
obj.verbosity = int(proto.verbosity)
obj.pamLength = int(proto.pamLength)
obj.maxAge = int(proto.maxAge)
obj.maxInfBacktrack = int(proto.maxInfBacktrack)
obj.maxLrnBacktrack = int(proto.maxLrnBacktrack)
obj.maxSeqLength = int(proto.maxSeqLength)
obj.maxSegmentsPerCell = proto.maxSegmentsPerCell
obj.maxSynapsesPerSegment = proto.maxSynapsesPerSegment
obj.outputType = proto.outputType
obj.activeColumns = [int(col) for col in proto.activeColumns]
obj.cells = [[] for _ in xrange(len(proto.cells))]
for columnSegments, columnSegmentsProto in zip(obj.cells, proto.cells):
columnSegments.extend([[] for _ in xrange(len(columnSegmentsProto))])
for cellSegments, cellSegmentsProto in zip(columnSegments,
columnSegmentsProto):
for segmentProto in cellSegmentsProto:
segment = Segment.read(segmentProto, obj)
cellSegments.append(segment)
obj.lrnIterationIdx = int(proto.lrnIterationIdx)
obj.iterationIdx = int(proto.iterationIdx)
obj.segID = int(proto.segID)
obj.pamCounter = int(proto.pamCounter)
obj.collectSequenceStats = proto.collectSequenceStats
obj.resetCalled = proto.resetCalled
avgInputDensity = proto.avgInputDensity
if avgInputDensity < 0.0:
# Negative value placeholder indicates None
obj.avgInputDensity = None
else:
obj.avgInputDensity = avgInputDensity
obj.learnedSeqLength = int(proto.learnedSeqLength)
obj.avgLearnedSeqLength = proto.avgLearnedSeqLength
# Initialize various structures
obj._initEphemerals()
obj.currentOutput = numpy.array(proto.currentOutput, dtype='float32')
for pattern in proto.prevLrnPatterns:
obj.prevLrnPatterns.append([v for v in pattern])
for pattern in proto.prevInfPatterns:
obj.prevInfPatterns.append([v for v in pattern])
for cellWrapperProto in proto.segmentUpdates:
key = (cellWrapperProto.columnIdx, cellWrapperProto.cellIdx)
value = []
for updateWrapperProto in cellWrapperProto.segmentUpdates:
segmentUpdate = SegmentUpdate.read(updateWrapperProto.segmentUpdate, obj)
value.append((int(updateWrapperProto.lrnIterationIdx), segmentUpdate))
obj.segmentUpdates[key] = value
# cellConfidence
numpy.copyto(obj.cellConfidence["t"], proto.cellConfidenceT)
numpy.copyto(obj.cellConfidence["t-1"], proto.cellConfidenceT1)
numpy.copyto(obj.cellConfidence["candidate"],
proto.cellConfidenceCandidate)
# colConfidence
numpy.copyto(obj.colConfidence["t"], proto.colConfidenceT)
numpy.copyto(obj.colConfidence["t-1"], proto.colConfidenceT1)
numpy.copyto(obj.colConfidence["candidate"], proto.colConfidenceCandidate)
# lrnActiveState
numpy.copyto(obj.lrnActiveState["t"], proto.lrnActiveStateT)
numpy.copyto(obj.lrnActiveState["t-1"], proto.lrnActiveStateT1)
# infActiveState
numpy.copyto(obj.infActiveState["t"], proto.infActiveStateT)
numpy.copyto(obj.infActiveState["t-1"], proto.infActiveStateT1)
numpy.copyto(obj.infActiveState["backup"], proto.infActiveStateBackup)
numpy.copyto(obj.infActiveState["candidate"],
proto.infActiveStateCandidate)
# lrnPredictedState
numpy.copyto(obj.lrnPredictedState["t"], proto.lrnPredictedStateT)
numpy.copyto(obj.lrnPredictedState["t-1"], proto.lrnPredictedStateT1)
# infPredictedState
numpy.copyto(obj.infPredictedState["t"], proto.infPredictedStateT)
numpy.copyto(obj.infPredictedState["t-1"], proto.infPredictedStateT1)
numpy.copyto(obj.infPredictedState["backup"],
proto.infPredictedStateBackup)
numpy.copyto(obj.infPredictedState["candidate"],
proto.infPredictedStateCandidate)
obj.consolePrinterVerbosity = int(proto.consolePrinterVerbosity)
return obj
def __getattr__(self, name):
""" @internal
Patch __getattr__ so that we can catch the first access to 'cells' and load.
This function is only called when we try to access an attribute that doesn't
exist. We purposely make sure that "self.cells" doesn't exist after
unpickling so that we'll hit this, then we can load it on the first access.
If this is called at any other time, it will raise an AttributeError.
That's because:
- If 'name' is "cells", after the first call, self._realCells won't exist
so we'll get an implicit AttributeError.
- If 'name' isn't "cells", I'd expect our super wouldn't have __getattr__,
so we'll raise our own Attribute error. If the super did get __getattr__,
we'll just return what it gives us.
"""
try:
return super(BacktrackingTM, self).__getattr__(name)
except AttributeError:
raise AttributeError("'TM' object has no attribute '%s'" % name)
def __del__(self):
pass
def __ne__(self, tm):
return not self == tm
def __eq__(self, tm):
return not self.diff(tm)
def diff(self, tm):
diff = []
toCheck = [((), self.__getstate__(), tm.__getstate__())]
while toCheck:
keys, a, b = toCheck.pop()
if type(a) != type(b):
diff.append((keys, a, b))
elif isinstance(a, dict):
keys1 = set(a.keys())
keys2 = set(b.keys())
# If there are missing keys, add them to the diff.
if keys1 != keys2:
for k in keys1 - keys2:
diff.append((keys + (k,), a[k], None))
for k in keys2 - keys1:
diff.append((keys + (k,), None, b[k]))
# For matching keys, add the values to the list of things to check
for k in keys1.intersection(keys2):
toCheck.append((keys + (k,), a[k], b[k]))
elif isinstance(a, list) or isinstance(a, tuple):
if len(a) != len(b):
diff.append((keys + ('len',), len(a), len(b)))
else:
for i in xrange(len(a)):
toCheck.append((keys + (i,), a[i], b[i]))
elif isinstance(a, numpy.ndarray):
if len(a) != len(b):
diff.append((keys + ('len',), len(a), len(b)))
elif not numpy.array_equal(a, b):
diff.append((keys, a, b))
elif isinstance(a, Random):
if a.getState() != b.getState():
diff.append((keys, a.getState(), b.getState()))
elif (a.__class__.__name__ == 'Cells4' and
b.__class__.__name__ == 'Cells4'):
continue
else:
try:
_ = a != b
except ValueError:
raise ValueError(type(a))
if a != b:
diff.append((keys, a, b))
return diff
def getLearnActiveStateT(self):
return self.lrnActiveState['t']
def saveToFile(self, filePath):
"""
Implemented in
:meth:`nupic.algorithms.backtracking_tm_cpp.BacktrackingTMCPP.saveToFile`.
"""
pass
def loadFromFile(self, filePath):
"""
Implemented in
:meth:`nupic.algorithms.backtracking_tm_cpp.BacktrackingTMCPP.loadFromFile`.
"""
pass
def _getRandomState(self):
""" @internal
Return the random number state.
This is used during unit testing to generate repeatable results.
"""
return pickle.dumps(self._random)
def _setRandomState(self, state):
""" @internal Set the random number state.
This is used during unit testing to generate repeatable results.
"""
self._random = pickle.loads(state)
def reset(self,):
"""
Reset the state of all cells.
This is normally used between sequences while training. All internal states
are reset to 0.
"""
if self.verbosity >= 3:
print "\n==== RESET ====="
self.lrnActiveState['t-1'].fill(0)
self.lrnActiveState['t'].fill(0)
self.lrnPredictedState['t-1'].fill(0)
self.lrnPredictedState['t'].fill(0)
self.infActiveState['t-1'].fill(0)
self.infActiveState['t'].fill(0)
self.infPredictedState['t-1'].fill(0)
self.infPredictedState['t'].fill(0)
self.cellConfidence['t-1'].fill(0)
self.cellConfidence['t'].fill(0)
# Flush the segment update queue
self.segmentUpdates = {}
self._internalStats['nInfersSinceReset'] = 0
#To be removed
self._internalStats['curPredictionScore'] = 0
#New prediction score
self._internalStats['curPredictionScore2'] = 0
self._internalStats['curFalseNegativeScore'] = 0
self._internalStats['curFalsePositiveScore'] = 0
self._internalStats['curMissing'] = 0
self._internalStats['curExtra'] = 0
# When a reset occurs, set prevSequenceSignature to the signature of the
# just-completed sequence and start accumulating histogram for the next
# sequence.
self._internalStats['prevSequenceSignature'] = None
if self.collectSequenceStats:
if self._internalStats['confHistogram'].sum() > 0:
sig = self._internalStats['confHistogram'].copy()
sig.reshape(self.numberOfCols * self.cellsPerColumn)
self._internalStats['prevSequenceSignature'] = sig
self._internalStats['confHistogram'].fill(0)
self.resetCalled = True
# Clear out input history
self._prevInfPatterns = []
self._prevLrnPatterns = []
def resetStats(self):
"""
Reset the learning and inference stats. This will usually be called by
user code at the start of each inference run (for a particular data set).
"""
self._stats = dict()
self._internalStats = dict()
self._internalStats['nInfersSinceReset'] = 0
self._internalStats['nPredictions'] = 0
#New prediction score
self._internalStats['curPredictionScore'] = 0
self._internalStats['curPredictionScore2'] = 0
self._internalStats['predictionScoreTotal2'] = 0
self._internalStats['curFalseNegativeScore'] = 0
self._internalStats['falseNegativeScoreTotal'] = 0
self._internalStats['curFalsePositiveScore'] = 0
self._internalStats['falsePositiveScoreTotal'] = 0
self._internalStats['pctExtraTotal'] = 0
self._internalStats['pctMissingTotal'] = 0
self._internalStats['curMissing'] = 0
self._internalStats['curExtra'] = 0
self._internalStats['totalMissing'] = 0
self._internalStats['totalExtra'] = 0
# Sequence signature statistics. Note that we don't reset the sequence
# signature list itself.
self._internalStats['prevSequenceSignature'] = None
if self.collectSequenceStats:
self._internalStats['confHistogram'] = (
numpy.zeros((self.numberOfCols, self.cellsPerColumn),
dtype="float32"))
def getStats(self):
"""
Return the current learning and inference stats. This returns a dict
containing all the learning and inference stats we have collected since the
last :meth:`resetStats` call. If :class:`BacktrackingTM` ``collectStats``
parameter is False, then None is returned.
:returns: (dict) The following keys are returned in the dict when
``collectStats`` is True:
- ``nPredictions``: the number of predictions. This is the total
number of inferences excluding burn-in and the last inference.
- ``curPredictionScore``: the score for predicting the current input
(predicted during the previous inference)
- ``curMissing``: the number of bits in the current input that were
not predicted to be on.
- ``curExtra``: the number of bits in the predicted output that are
not in the next input
- ``predictionScoreTotal``: the sum of every prediction score to date
- ``predictionScoreAvg``: ``predictionScoreTotal / nPredictions``
- ``pctMissingTotal``: the total number of bits that were missed over
all predictions
- ``pctMissingAvg``: ``pctMissingTotal / nPredictions``
- ``prevSequenceSignature``: signature for the sequence immediately
preceding the last reset. 'None' if ``collectSequenceStats`` is
False.
"""
if not self.collectStats:
return None
self._stats['nPredictions'] = self._internalStats['nPredictions']
self._stats['curMissing'] = self._internalStats['curMissing']
self._stats['curExtra'] = self._internalStats['curExtra']
self._stats['totalMissing'] = self._internalStats['totalMissing']
self._stats['totalExtra'] = self._internalStats['totalExtra']
nPredictions = max(1, self._stats['nPredictions'])
# New prediction score
self._stats['curPredictionScore2'] = (
self._internalStats['curPredictionScore2'])
self._stats['predictionScoreAvg2'] = (
self._internalStats['predictionScoreTotal2'] / nPredictions)
self._stats['curFalseNegativeScore'] = (
self._internalStats['curFalseNegativeScore'])
self._stats['falseNegativeAvg'] = (
self._internalStats['falseNegativeScoreTotal'] / nPredictions)
self._stats['curFalsePositiveScore'] = (
self._internalStats['curFalsePositiveScore'])
self._stats['falsePositiveAvg'] = (
self._internalStats['falsePositiveScoreTotal'] / nPredictions)
self._stats['pctExtraAvg'] = (self._internalStats['pctExtraTotal'] /
nPredictions)
self._stats['pctMissingAvg'] = (self._internalStats['pctMissingTotal'] /
nPredictions)
# This will be None if collectSequenceStats is False
self._stats['prevSequenceSignature'] = (
self._internalStats['prevSequenceSignature'])
return self._stats
def _updateStatsInferEnd(self, stats, bottomUpNZ, predictedState,
colConfidence):
"""
Called at the end of learning and inference, this routine will update
a number of stats in our _internalStats dictionary, including our computed
prediction score.
:param stats internal stats dictionary
:param bottomUpNZ list of the active bottom-up inputs
:param predictedState The columns we predicted on the last time step (should
match the current bottomUpNZ in the best case)
:param colConfidence Column confidences we determined on the last time step
"""
# Return if not collecting stats
if not self.collectStats:
return
stats['nInfersSinceReset'] += 1
# Compute the prediction score, how well the prediction from the last
# time step predicted the current bottom-up input
(numExtra2, numMissing2, confidences2) = self._checkPrediction(
patternNZs=[bottomUpNZ], output=predictedState,
colConfidence=colConfidence)
predictionScore, positivePredictionScore, negativePredictionScore = (
confidences2[0])
# Store the stats that don't depend on burn-in
stats['curPredictionScore2'] = float(predictionScore)
stats['curFalseNegativeScore'] = 1.0 - float(positivePredictionScore)
stats['curFalsePositiveScore'] = float(negativePredictionScore)
stats['curMissing'] = numMissing2
stats['curExtra'] = numExtra2
# If we are passed the burn-in period, update the accumulated stats
# Here's what various burn-in values mean:
# 0: try to predict the first element of each sequence and all subsequent
# 1: try to predict the second element of each sequence and all subsequent
# etc.
if stats['nInfersSinceReset'] <= self.burnIn:
return
# Burn-in related stats
stats['nPredictions'] += 1
numExpected = max(1.0, float(len(bottomUpNZ)))
stats['totalMissing'] += numMissing2
stats['totalExtra'] += numExtra2
stats['pctExtraTotal'] += 100.0 * numExtra2 / numExpected
stats['pctMissingTotal'] += 100.0 * numMissing2 / numExpected
stats['predictionScoreTotal2'] += float(predictionScore)
stats['falseNegativeScoreTotal'] += 1.0 - float(positivePredictionScore)
stats['falsePositiveScoreTotal'] += float(negativePredictionScore)
if self.collectSequenceStats:
# Collect cell confidences for every cell that correctly predicted current
# bottom up input. Normalize confidence across each column
cc = self.cellConfidence['t-1'] * self.infActiveState['t']
sconf = cc.sum(axis=1)
for c in range(self.numberOfCols):
if sconf[c] > 0:
cc[c, :] /= sconf[c]
# Update cell confidence histogram: add column-normalized confidence
# scores to the histogram
self._internalStats['confHistogram'] += cc
def printState(self, aState):
"""
Print an integer array that is the same shape as activeState.
:param aState: TODO: document
"""
def formatRow(var, i):
s = ''
for c in range(self.numberOfCols):
if c > 0 and c % 10 == 0:
s += ' '
s += str(var[c, i])
s += ' '
return s
for i in xrange(self.cellsPerColumn):
print formatRow(aState, i)
def printConfidence(self, aState, maxCols = 20):
"""
Print a floating point array that is the same shape as activeState.
:param aState: TODO: document
:param maxCols: TODO: document
"""
def formatFPRow(var, i):
s = ''
for c in range(min(maxCols, self.numberOfCols)):
if c > 0 and c % 10 == 0:
s += ' '
s += ' %5.3f' % var[c, i]
s += ' '
return s
for i in xrange(self.cellsPerColumn):
print formatFPRow(aState, i)
def printColConfidence(self, aState, maxCols = 20):
"""
Print up to maxCols number from a flat floating point array.
:param aState: TODO: document
:param maxCols: TODO: document
"""
def formatFPRow(var):
s = ''
for c in range(min(maxCols, self.numberOfCols)):
if c > 0 and c % 10 == 0:
s += ' '
s += ' %5.3f' % var[c]
s += ' '
return s
print formatFPRow(aState)
def printStates(self, printPrevious = True, printLearnState = True):
"""
TODO: document
:param printPrevious:
:param printLearnState:
:return:
"""
def formatRow(var, i):
s = ''
for c in range(self.numberOfCols):
if c > 0 and c % 10 == 0:
s += ' '
s += str(var[c, i])
s += ' '
return s
print "\nInference Active state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.infActiveState['t-1'], i),
print formatRow(self.infActiveState['t'], i)
print "Inference Predicted state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.infPredictedState['t-1'], i),
print formatRow(self.infPredictedState['t'], i)
if printLearnState:
print "\nLearn Active state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.lrnActiveState['t-1'], i),
print formatRow(self.lrnActiveState['t'], i)
print "Learn Predicted state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.lrnPredictedState['t-1'], i),
print formatRow(self.lrnPredictedState['t'], i)
def printOutput(self, y):
"""
TODO: document
:param y:
:return:
"""
print "Output"
for i in xrange(self.cellsPerColumn):
for c in xrange(self.numberOfCols):
print int(y[c, i]),
print
def printInput(self, x):
"""
TODO: document
:param x:
:return:
"""
print "Input"
for c in xrange(self.numberOfCols):
print int(x[c]),
print
def printParameters(self):
"""
Print the parameter settings for the TM.
"""
print "numberOfCols=", self.numberOfCols
print "cellsPerColumn=", self.cellsPerColumn
print "minThreshold=", self.minThreshold
print "newSynapseCount=", self.newSynapseCount
print "activationThreshold=", self.activationThreshold
print
print "initialPerm=", self.initialPerm
print "connectedPerm=", self.connectedPerm
print "permanenceInc=", self.permanenceInc
print "permanenceDec=", self.permanenceDec
print "permanenceMax=", self.permanenceMax
print "globalDecay=", self.globalDecay
print
print "doPooling=", self.doPooling
print "segUpdateValidDuration=", self.segUpdateValidDuration
print "pamLength=", self.pamLength
def printActiveIndices(self, state, andValues=False):
"""
Print the list of ``[column, cellIdx]`` indices for each of the active cells
in state.
:param state: TODO: document
:param andValues: TODO: document
"""
if len(state.shape) == 2:
(cols, cellIdxs) = state.nonzero()
else:
cols = state.nonzero()[0]
cellIdxs = numpy.zeros(len(cols))
if len(cols) == 0:
print "NONE"
return
prevCol = -1
for (col, cellIdx) in zip(cols, cellIdxs):
if col != prevCol:
if prevCol != -1:
print "] ",
print "Col %d: [" % (col),
prevCol = col
if andValues:
if len(state.shape) == 2:
value = state[col, cellIdx]
else:
value = state[col]
print "%d: %s," % (cellIdx, value),
else:
print "%d," % (cellIdx),
print "]"
def printComputeEnd(self, output, learn=False):
"""
Called at the end of inference to print out various diagnostic
information based on the current verbosity level.
:param output: TODO: document
:param learn: TODO: document
"""
if self.verbosity >= 3:
print "----- computeEnd summary: "
print "learn:", learn
print "numBurstingCols: %s, " % (
self.infActiveState['t'].min(axis=1).sum()),
print "curPredScore2: %s, " % (
self._internalStats['curPredictionScore2']),
print "curFalsePosScore: %s, " % (
self._internalStats['curFalsePositiveScore']),
print "1-curFalseNegScore: %s, " % (
1 - self._internalStats['curFalseNegativeScore'])
print "numSegments: ", self.getNumSegments(),
print "avgLearnedSeqLength: ", self.avgLearnedSeqLength
print "----- infActiveState (%d on) ------" % (
self.infActiveState['t'].sum())
self.printActiveIndices(self.infActiveState['t'])
if self.verbosity >= 6:
self.printState(self.infActiveState['t'])
print "----- infPredictedState (%d on)-----" % (
self.infPredictedState['t'].sum())
self.printActiveIndices(self.infPredictedState['t'])
if self.verbosity >= 6:
self.printState(self.infPredictedState['t'])
print "----- lrnActiveState (%d on) ------" % (
self.lrnActiveState['t'].sum())
self.printActiveIndices(self.lrnActiveState['t'])
if self.verbosity >= 6:
self.printState(self.lrnActiveState['t'])
print "----- lrnPredictedState (%d on)-----" % (
self.lrnPredictedState['t'].sum())
self.printActiveIndices(self.lrnPredictedState['t'])
if self.verbosity >= 6:
self.printState(self.lrnPredictedState['t'])
print "----- cellConfidence -----"
self.printActiveIndices(self.cellConfidence['t'], andValues=True)
if self.verbosity >= 6:
self.printConfidence(self.cellConfidence['t'])
print "----- colConfidence -----"
self.printActiveIndices(self.colConfidence['t'], andValues=True)
print "----- cellConfidence[t-1] for currently active cells -----"
cc = self.cellConfidence['t-1'] * self.infActiveState['t']
self.printActiveIndices(cc, andValues=True)
if self.verbosity == 4:
print "Cells, predicted segments only:"
self.printCells(predictedOnly=True)
elif self.verbosity >= 5:
print "Cells, all segments:"
self.printCells(predictedOnly=False)
print
elif self.verbosity >= 1:
print "TM: learn:", learn
print "TM: active outputs(%d):" % len(output.nonzero()[0]),
self.printActiveIndices(output.reshape(self.numberOfCols,
self.cellsPerColumn))
def printSegmentUpdates(self):
"""
TODO: document
:return:
"""
print "=== SEGMENT UPDATES ===, Num = ", len(self.segmentUpdates)
for key, updateList in self.segmentUpdates.iteritems():
c, i = key[0], key[1]
print c, i, updateList
def printCell(self, c, i, onlyActiveSegments=False):
"""
TODO: document
:param c:
:param i:
:param onlyActiveSegments:
:return:
"""
if len(self.cells[c][i]) > 0:
print "Column", c, "Cell", i, ":",
print len(self.cells[c][i]), "segment(s)"
for j, s in enumerate(self.cells[c][i]):
isActive = self._isSegmentActive(s, self.infActiveState['t'])
if not onlyActiveSegments or isActive:
isActiveStr = "*" if isActive else " "
print " %sSeg #%-3d" % (isActiveStr, j),
s.debugPrint()
def printCells(self, predictedOnly=False):
"""
TODO: document
:param predictedOnly:
:return:
"""
if predictedOnly:
print "--- PREDICTED CELLS ---"
else:
print "--- ALL CELLS ---"
print "Activation threshold=", self.activationThreshold,
print "min threshold=", self.minThreshold,
print "connected perm=", self.connectedPerm
for c in xrange(self.numberOfCols):
for i in xrange(self.cellsPerColumn):
if not predictedOnly or self.infPredictedState['t'][c, i]:
self.printCell(c, i, predictedOnly)
def getNumSegmentsInCell(self, c, i):
"""
:param c: (int) column index
:param i: (int) cell index within column
:returns: (int) the total number of synapses in cell (c, i)
"""
return len(self.cells[c][i])
def getNumSynapses(self):
"""
:returns: (int) the total number of synapses
"""
nSyns = self.getSegmentInfo()[1]
return nSyns
def getNumSynapsesPerSegmentAvg(self):
"""
:returns: (int) the average number of synapses per segment
"""
return float(self.getNumSynapses()) / max(1, self.getNumSegments())
def getNumSegments(self):
"""
:returns: (int) the total number of segments
"""
nSegs = self.getSegmentInfo()[0]
return nSegs
def getNumCells(self):
"""
:returns: (int) the total number of cells
"""
return self.numberOfCols * self.cellsPerColumn
def getSegmentOnCell(self, c, i, segIdx):
"""
:param c: (int) column index
:param i: (int) cell index in column
:param segIdx: (int) segment index to match
:returns: (list) representing the the segment on cell (c, i) with index
``segIdx``.
::
[ [segmentID, sequenceSegmentFlag, positiveActivations,
totalActivations, lastActiveIteration,
lastPosDutyCycle, lastPosDutyCycleIteration],
[col1, idx1, perm1],
[col2, idx2, perm2], ...
]
"""
seg = self.cells[c][i][segIdx]
retlist = [[seg.segID, seg.isSequenceSeg, seg.positiveActivations,
seg.totalActivations, seg.lastActiveIteration,
seg._lastPosDutyCycle, seg._lastPosDutyCycleIteration]]
retlist += seg.syns
return retlist
class _SegmentUpdate(object):
"""
Class used to carry instructions for updating a segment.
"""
def __init__(self, c, i, seg=None, activeSynapses=[]):
self.columnIdx = c
self.cellIdx = i
self.segment = seg # The segment object itself, not an index (can be None)
self.activeSynapses = activeSynapses
self.sequenceSegment = False
self.phase1Flag = False
# Set true if segment only reaches activationThreshold when including
# not fully connected synapses.
self.weaklyPredicting = False
def write(self, proto):
proto.columnIdx = self.columnIdx
proto.cellIdx = self.cellIdx
self.segment.write(proto.segment)
activeSynapsesProto = proto.init("activeSynapses", len(self.activeSynapses))
for i, idx in enumerate(self.activeSynapses):
activeSynapsesProto[i] = idx
proto.sequenceSegment = self.sequenceSegment
proto.phase1Flag = self.phase1Flag
proto.weaklyPredicting = self.weaklyPredicting
@classmethod
def read(cls, proto, tm):
obj = object.__new__(cls)
obj.columnIdx = proto.columnIdx
obj.cellIdx = proto.cellIdx
obj.segment.read(proto.segment, tm)
obj.activeSynapses = [syn for syn in proto.activeSynapses]
obj.sequenceSegment = proto.sequenceSegment
obj.phase1Flag = proto.phase1Flag
obj.weaklyPredicting = proto.weaklyPredicting
return obj
def __eq__(self, other):
if set(self.__dict__.keys()) != set(other.__dict__.keys()):
return False
for k in self.__dict__:
if self.__dict__[k] != other.__dict__[k]:
return False
return True
def __ne__(self, other):
return not self == other
# Just for debugging
def __str__(self):
return ("Seg update: cell=[%d,%d]" % (self.columnIdx, self.cellIdx) +
", seq seg=" + str(self.sequenceSegment) +
", seg=" + str(self.segment) +
", synapses=" + str(self.activeSynapses))
def _addToSegmentUpdates(self, c, i, segUpdate):
"""
Store a dated potential segment update. The "date" (iteration index) is used
later to determine whether the update is too old and should be forgotten.
This is controlled by parameter ``segUpdateValidDuration``.
:param c: TODO: document
:param i: TODO: document
:param segUpdate: TODO: document
"""
# Sometimes we might be passed an empty update
if segUpdate is None or len(segUpdate.activeSynapses) == 0:
return
key = (c, i) # key = (column index, cell index in column)
# TODO: scan list of updates for that cell and consolidate?
# But watch out for dates!
if self.segmentUpdates.has_key(key):
self.segmentUpdates[key] += [(self.lrnIterationIdx, segUpdate)]
else:
self.segmentUpdates[key] = [(self.lrnIterationIdx, segUpdate)]
def _removeSegmentUpdate(self, updateInfo):
"""
Remove a segment update (called when seg update expires or is processed)
:param updateInfo: (tuple) (creationDate, SegmentUpdate)
"""
# An updateInfo contains (creationDate, SegmentUpdate)
(creationDate, segUpdate) = updateInfo
# Key is stored in segUpdate itself...
key = (segUpdate.columnIdx, segUpdate.cellIdx)
self.segmentUpdates[key].remove(updateInfo)
def _computeOutput(self):
"""
Computes output for both learning and inference. In both cases, the
output is the boolean OR of ``activeState`` and ``predictedState`` at ``t``.
Stores ``currentOutput`` for ``checkPrediction``.
:returns: TODO: document
"""
# TODO: This operation can be sped up by:
# 1.) Pre-allocating space for the currentOutput
# 2.) Making predictedState and activeState of type 'float32' up front
# 3.) Using logical_or(self.predictedState['t'], self.activeState['t'],
# self.currentOutput)
if self.outputType == 'activeState1CellPerCol':
# Fire only the most confident cell in columns that have 2 or more
# active cells
mostActiveCellPerCol = self.cellConfidence['t'].argmax(axis=1)
self.currentOutput = numpy.zeros(self.infActiveState['t'].shape,
dtype='float32')
# Turn on the most confident cell in each column. Note here that
# Columns refers to TM columns, even though each TM column is a row
# in the numpy array.
numCols = self.currentOutput.shape[0]
self.currentOutput[(xrange(numCols), mostActiveCellPerCol)] = 1
# Don't turn on anything in columns which are not active at all
activeCols = self.infActiveState['t'].max(axis=1)
inactiveCols = numpy.where(activeCols==0)[0]
self.currentOutput[inactiveCols, :] = 0
elif self.outputType == 'activeState':
self.currentOutput = self.infActiveState['t']
elif self.outputType == 'normal':
self.currentOutput = numpy.logical_or(self.infPredictedState['t'],
self.infActiveState['t'])
else:
raise RuntimeError("Unimplemented outputType")
return self.currentOutput.reshape(-1).astype('float32')
def _getActiveState(self):
"""
Return the current active state. This is called by the node to
obtain the sequence output of the TM.
:returns: TODO: document
"""
# TODO: This operation can be sped up by making activeState of
# type 'float32' up front.
return self.infActiveState['t'].reshape(-1).astype('float32')
def getPredictedState(self):
"""
:returns: numpy array of predicted cells, representing the current predicted
state. ``predictedCells[c][i]`` represents the state of the i'th cell in
the c'th column.
"""
return self.infPredictedState['t']
def predict(self, nSteps):
"""
This function gives the future predictions for <nSteps> timesteps starting
from the current TM state. The TM is returned to its original state at the
end before returning.
1. We save the TM state.
2. Loop for nSteps
a. Turn-on with lateral support from the current active cells
b. Set the predicted cells as the next step's active cells. This step
in learn and infer methods use input here to correct the predictions.
We don't use any input here.
3. Revert back the TM state to the time before prediction
:param nSteps: (int) The number of future time steps to be predicted
:returns: all the future predictions - a numpy array of type "float32" and
shape (nSteps, numberOfCols). The ith row gives the tm prediction for
each column at a future timestep (t+i+1).
"""
# Save the TM dynamic state, we will use to revert back in the end
pristineTPDynamicState = self._getTPDynamicState()
assert (nSteps>0)
# multiStepColumnPredictions holds all the future prediction.
multiStepColumnPredictions = numpy.zeros((nSteps, self.numberOfCols),
dtype="float32")
# This is a (nSteps-1)+half loop. Phase 2 in both learn and infer methods
# already predicts for timestep (t+1). We use that prediction for free and
# save the half-a-loop of work.
step = 0
while True:
# We get the prediction for the columns in the next time step from
# the topDownCompute method. It internally uses confidences.
multiStepColumnPredictions[step, :] = self.topDownCompute()
# Cleanest way in python to handle one and half loops
if step == nSteps-1:
break
step += 1
# Copy t-1 into t
self.infActiveState['t-1'][:, :] = self.infActiveState['t'][:, :]
self.infPredictedState['t-1'][:, :] = self.infPredictedState['t'][:, :]
self.cellConfidence['t-1'][:, :] = self.cellConfidence['t'][:, :]
# Predicted state at "t-1" becomes the active state at "t"
self.infActiveState['t'][:, :] = self.infPredictedState['t-1'][:, :]
# Predicted state and confidence are set in phase2.
self.infPredictedState['t'].fill(0)
self.cellConfidence['t'].fill(0.0)
self._inferPhase2()
# Revert the dynamic state to the saved state
self._setTPDynamicState(pristineTPDynamicState)
return multiStepColumnPredictions
def _getTPDynamicStateVariableNames(self):
"""
Any newly added dynamic states in the TM should be added to this list.
Parameters:
--------------------------------------------
retval: The list of names of TM dynamic state variables.
"""
return ["infActiveState",
"infPredictedState",
"lrnActiveState",
"lrnPredictedState",
"cellConfidence",
"colConfidence",
]
def _getTPDynamicState(self,):
"""
Parameters:
--------------------------------------------
retval: A dict with all the dynamic state variable names as keys and
their values at this instant as values.
"""
tpDynamicState = dict()
for variableName in self._getTPDynamicStateVariableNames():
tpDynamicState[variableName] = copy.deepcopy(self.__dict__[variableName])
return tpDynamicState
def _setTPDynamicState(self, tpDynamicState):
"""
Set all the dynamic state variables from the <tpDynamicState> dict.
<tpDynamicState> dict has all the dynamic state variable names as keys and
their values at this instant as values.
We set the dynamic state variables in the tm object with these items.
"""
for variableName in self._getTPDynamicStateVariableNames():
self.__dict__[variableName] = tpDynamicState.pop(variableName)
def _updateAvgLearnedSeqLength(self, prevSeqLength):
"""Update our moving average of learned sequence length."""
if self.lrnIterationIdx < 100:
alpha = 0.5
else:
alpha = 0.1
self.avgLearnedSeqLength = ((1.0 - alpha) * self.avgLearnedSeqLength +
(alpha * prevSeqLength))
def getAvgLearnedSeqLength(self):
"""
:returns: Moving average of learned sequence length
"""
return self.avgLearnedSeqLength
def _inferBacktrack(self, activeColumns):
"""
This "backtracks" our inference state, trying to see if we can lock onto
the current set of inputs by assuming the sequence started up to N steps
ago on start cells.
This will adjust @ref infActiveState['t'] if it does manage to lock on to a
sequence that started earlier. It will also compute infPredictedState['t']
based on the possibly updated @ref infActiveState['t'], so there is no need to
call inferPhase2() after calling inferBacktrack().
This looks at:
- ``infActiveState['t']``
This updates/modifies:
- ``infActiveState['t']``
- ``infPredictedState['t']``
- ``colConfidence['t']``
- ``cellConfidence['t']``
How it works:
This method gets called from :meth:`updateInferenceState` when we detect
either of the following two conditions:
#. The current bottom-up input had too many un-expected columns
#. We fail to generate a sufficient number of predicted columns for the
next time step.
Either of these two conditions indicate that we have fallen out of a
learned sequence.
Rather than simply "giving up" and bursting on the unexpected input
columns, a better approach is to see if perhaps we are in a sequence that
started a few steps ago. The real world analogy is that you are driving
along and suddenly hit a dead-end, you will typically go back a few turns
ago and pick up again from a familiar intersection.
This back-tracking goes hand in hand with our learning methodology, which
always tries to learn again from start cells after it loses context. This
results in a network that has learned multiple, overlapping paths through
the input data, each starting at different points. The lower the global
decay and the more repeatability in the data, the longer each of these
paths will end up being.
The goal of this function is to find out which starting point in the past
leads to the current input with the most context as possible. This gives us
the best chance of predicting accurately going forward. Consider the
following example, where you have learned the following sub-sequences which
have the given frequencies:
::
? - Q - C - D - E 10X seq 0
? - B - C - D - F 1X seq 1
? - B - C - H - I 2X seq 2
? - B - C - D - F 3X seq 3
? - Z - A - B - C - D - J 2X seq 4
? - Z - A - B - C - H - I 1X seq 5
? - Y - A - B - C - D - F 3X seq 6
----------------------------------------
W - X - Z - A - B - C - D <= input history
^
current time step
Suppose, in the current time step, the input pattern is D and you have not
predicted D, so you need to backtrack. Suppose we can backtrack up to 6
steps in the past, which path should we choose? From the table above, we can
see that the correct answer is to assume we are in seq 4. How do we
implement the backtrack to give us this right answer? The current
implementation takes the following approach:
#. Start from the farthest point in the past.
#. For each starting point S, calculate the confidence of the current
input, conf(startingPoint=S), assuming we followed that sequence.
Note that we must have learned at least one sequence that starts at
point S.
#. If conf(startingPoint=S) is significantly different from
conf(startingPoint=S-1), then choose S-1 as the starting point.
The assumption here is that starting point S-1 is the starting point of
a learned sub-sequence that includes the current input in it's path and
that started the longest ago. It thus has the most context and will be
the best predictor going forward.
From the statistics in the above table, we can compute what the confidences
will be for each possible starting point:
::
startingPoint confidence of D
-----------------------------------------
B (t-2) 4/6 = 0.667 (seq 1,3)/(seq 1,2,3)
Z (t-4) 2/3 = 0.667 (seq 4)/(seq 4,5)
First of all, we do not compute any confidences at starting points t-1, t-3,
t-5, t-6 because there are no learned sequences that start at those points.
Notice here that Z is the starting point of the longest sub-sequence leading
up to the current input. Event though starting at t-2 and starting at t-4
give the same confidence value, we choose the sequence starting at t-4
because it gives the most context, and it mirrors the way that learning
extends sequences.
:param activeColumns: (list) of active column indices
"""
# How much input history have we accumulated?
# The current input is always at the end of self._prevInfPatterns (at
# index -1), but it is also evaluated as a potential starting point by
# turning on it's start cells and seeing if it generates sufficient
# predictions going forward.
numPrevPatterns = len(self._prevInfPatterns)
if numPrevPatterns <= 0:
return
# This is an easy to use label for the current time step
currentTimeStepsOffset = numPrevPatterns - 1
# Save our current active state in case we fail to find a place to restart
# todo: save infActiveState['t-1'], infPredictedState['t-1']?
self.infActiveState['backup'][:, :] = self.infActiveState['t'][:, :]
# Save our t-1 predicted state because we will write over it as as evaluate
# each potential starting point.
self.infPredictedState['backup'][:, :] = self.infPredictedState['t-1'][:, :]
# We will record which previous input patterns did not generate predictions
# up to the current time step and remove all the ones at the head of the
# input history queue so that we don't waste time evaluating them again at
# a later time step.
badPatterns = []
# Let's go back in time and replay the recent inputs from start cells and
# see if we can lock onto this current set of inputs that way.
#
# Start the farthest back and work our way forward. For each starting point,
# See if firing on start cells at that point would predict the current
# input as well as generate sufficient predictions for the next time step.
#
# We want to pick the point closest to the current time step that gives us
# the relevant confidence. Think of this example, where we are at D and need
# to
# A - B - C - D
# decide if we should backtrack to C, B, or A. Suppose B-C-D is a high order
# sequence and A is unrelated to it. If we backtrock to B would we get a
# certain confidence of D, but if went went farther back, to A, the
# confidence wouldn't change, since A has no impact on the B-C-D series.
#
# So, our strategy will be to pick the "B" point, since choosing the A point
# does not impact our confidences going forward at all.
inSequence = False
candConfidence = None
candStartOffset = None
for startOffset in range(0, numPrevPatterns):
# If we have a candidate already in the past, don't bother falling back
# to start cells on the current input.
if startOffset == currentTimeStepsOffset and candConfidence is not None:
break
if self.verbosity >= 3:
print (
"Trying to lock-on using startCell state from %d steps ago:" % (
numPrevPatterns - 1 - startOffset),
self._prevInfPatterns[startOffset])
# Play through starting from starting point 'startOffset'
inSequence = False
for offset in range(startOffset, numPrevPatterns):
# If we are about to set the active columns for the current time step
# based on what we predicted, capture and save the total confidence of
# predicting the current input
if offset == currentTimeStepsOffset:
totalConfidence = self.colConfidence['t'][activeColumns].sum()
# Compute activeState[t] given bottom-up and predictedState[t-1]
self.infPredictedState['t-1'][:, :] = self.infPredictedState['t'][:, :]
inSequence = self._inferPhase1(self._prevInfPatterns[offset],
useStartCells = (offset == startOffset))
if not inSequence:
break
# Compute predictedState['t'] given activeState['t']
if self.verbosity >= 3:
print (" backtrack: computing predictions from ",
self._prevInfPatterns[offset])
inSequence = self._inferPhase2()
if not inSequence:
break
# If starting from startOffset got lost along the way, mark it as an
# invalid start point.
if not inSequence:
badPatterns.append(startOffset)
continue
# If we got to here, startOffset is a candidate starting point.
# Save this state as a candidate state. It will become the chosen state if
# we detect a change in confidences starting at a later startOffset
candConfidence = totalConfidence
candStartOffset = startOffset
if self.verbosity >= 3 and startOffset != currentTimeStepsOffset:
print (" # Prediction confidence of current input after starting %d "
"steps ago:" % (numPrevPatterns - 1 - startOffset),
totalConfidence)
if candStartOffset == currentTimeStepsOffset: # no more to try
break
self.infActiveState['candidate'][:, :] = self.infActiveState['t'][:, :]
self.infPredictedState['candidate'][:, :] = (
self.infPredictedState['t'][:, :])
self.cellConfidence['candidate'][:, :] = self.cellConfidence['t'][:, :]
self.colConfidence['candidate'][:] = self.colConfidence['t'][:]
break
# If we failed to lock on at any starting point, fall back to the original
# active state that we had on entry
if candStartOffset is None:
if self.verbosity >= 3:
print "Failed to lock on. Falling back to bursting all unpredicted."
self.infActiveState['t'][:, :] = self.infActiveState['backup'][:, :]
self._inferPhase2()
else:
if self.verbosity >= 3:
print ("Locked on to current input by using start cells from %d "
" steps ago:" % (numPrevPatterns - 1 - candStartOffset),
self._prevInfPatterns[candStartOffset])
# Install the candidate state, if it wasn't the last one we evaluated.
if candStartOffset != currentTimeStepsOffset:
self.infActiveState['t'][:, :] = self.infActiveState['candidate'][:, :]
self.infPredictedState['t'][:, :] = (
self.infPredictedState['candidate'][:, :])
self.cellConfidence['t'][:, :] = self.cellConfidence['candidate'][:, :]
self.colConfidence['t'][:] = self.colConfidence['candidate'][:]
# Remove any useless patterns at the head of the previous input pattern
# queue.
for i in range(numPrevPatterns):
if (i in badPatterns or
(candStartOffset is not None and i <= candStartOffset)):
if self.verbosity >= 3:
print ("Removing useless pattern from history:",
self._prevInfPatterns[0])
self._prevInfPatterns.pop(0)
else:
break
# Restore the original predicted state.
self.infPredictedState['t-1'][:, :] = self.infPredictedState['backup'][:, :]
def _inferPhase1(self, activeColumns, useStartCells):
"""
Update the inference active state from the last set of predictions
and the current bottom-up.
This looks at:
- ``infPredictedState['t-1']``
This modifies:
- ``infActiveState['t']``
:param activeColumns: (list) active bottom-ups
:param useStartCells: (bool) If true, ignore previous predictions and simply
turn on the start cells in the active columns
:returns: (bool) True if the current input was sufficiently predicted, OR if
we started over on startCells. False indicates that the current input
was NOT predicted, and we are now bursting on most columns.
"""
# Init to zeros to start
self.infActiveState['t'].fill(0)
# Phase 1 - turn on predicted cells in each column receiving bottom-up
# If we are following a reset, activate only the start cell in each
# column that has bottom-up
numPredictedColumns = 0
if useStartCells:
for c in activeColumns:
self.infActiveState['t'][c, 0] = 1
# else, turn on any predicted cells in each column. If there are none, then
# turn on all cells (burst the column)
else:
for c in activeColumns:
predictingCells = numpy.where(self.infPredictedState['t-1'][c] == 1)[0]
numPredictingCells = len(predictingCells)
if numPredictingCells > 0:
self.infActiveState['t'][c, predictingCells] = 1
numPredictedColumns += 1
else:
self.infActiveState['t'][c, :] = 1 # whole column bursts
# Did we predict this input well enough?
if useStartCells or numPredictedColumns >= 0.50 * len(activeColumns):
return True
else:
return False
def _inferPhase2(self):
"""
Phase 2 for the inference state. The computes the predicted state, then
checks to insure that the predicted state is not over-saturated, i.e.
look too close like a burst. This indicates that there were so many
separate paths learned from the current input columns to the predicted
input columns that bursting on the current input columns is most likely
generated mix and match errors on cells in the predicted columns. If
we detect this situation, we instead turn on only the start cells in the
current active columns and re-generate the predicted state from those.
This looks at:
- `` infActiveState['t']``
This modifies:
- `` infPredictedState['t']``
- `` colConfidence['t']``
- `` cellConfidence['t']``
:returns: (bool) True if we have a decent guess as to the next input.
Returning False from here indicates to the caller that we have
reached the end of a learned sequence.
"""
# Init to zeros to start
self.infPredictedState['t'].fill(0)
self.cellConfidence['t'].fill(0)
self.colConfidence['t'].fill(0)
# Phase 2 - Compute new predicted state and update cell and column
# confidences
for c in xrange(self.numberOfCols):
# For each cell in the column
for i in xrange(self.cellsPerColumn):
# For each segment in the cell
for s in self.cells[c][i]:
# See if it has the min number of active synapses
numActiveSyns = self._getSegmentActivityLevel(
s, self.infActiveState['t'], connectedSynapsesOnly=False)
if numActiveSyns < self.activationThreshold:
continue
# Incorporate the confidence into the owner cell and column
if self.verbosity >= 6:
print "incorporating DC from cell[%d,%d]: " % (c, i),
s.debugPrint()
dc = s.dutyCycle()
self.cellConfidence['t'][c, i] += dc
self.colConfidence['t'][c] += dc
# If we reach threshold on the connected synapses, predict it
# If not active, skip over it
if self._isSegmentActive(s, self.infActiveState['t']):
self.infPredictedState['t'][c, i] = 1
# Normalize column and cell confidences
sumConfidences = self.colConfidence['t'].sum()
if sumConfidences > 0:
self.colConfidence['t'] /= sumConfidences
self.cellConfidence['t'] /= sumConfidences
# Are we predicting the required minimum number of columns?
numPredictedCols = self.infPredictedState['t'].max(axis=1).sum()
if numPredictedCols >= 0.5 * self.avgInputDensity:
return True
else:
return False
def _updateInferenceState(self, activeColumns):
"""
Update the inference state. Called from :meth:`compute` on every iteration.
:param activeColumns: (list) active column indices.
"""
# Copy t to t-1
self.infActiveState['t-1'][:, :] = self.infActiveState['t'][:, :]
self.infPredictedState['t-1'][:, :] = self.infPredictedState['t'][:, :]
self.cellConfidence['t-1'][:, :] = self.cellConfidence['t'][:, :]
self.colConfidence['t-1'][:] = self.colConfidence['t'][:]
# Each phase will zero/initilize the 't' states that it affects
# Update our inference input history
if self.maxInfBacktrack > 0:
if len(self._prevInfPatterns) > self.maxInfBacktrack:
self._prevInfPatterns.pop(0)
self._prevInfPatterns.append(activeColumns)
# Compute the active state given the predictions from last time step and
# the current bottom-up
inSequence = self._inferPhase1(activeColumns, self.resetCalled)
# If this input was considered unpredicted, let's go back in time and
# replay the recent inputs from start cells and see if we can lock onto
# this current set of inputs that way.
if not inSequence:
if self.verbosity >= 3:
print ("Too much unpredicted input, re-tracing back to try and lock on "
"at an earlier timestep.")
# inferBacktrack() will call inferPhase2() for us.
self._inferBacktrack(activeColumns)
return
# Compute the predicted cells and the cell and column confidences
inSequence = self._inferPhase2()
if not inSequence:
if self.verbosity >= 3:
print ("Not enough predictions going forward, "
"re-tracing back to try and lock on at an earlier timestep.")
# inferBacktrack() will call inferPhase2() for us.
self._inferBacktrack(activeColumns)
def _learnBacktrackFrom(self, startOffset, readOnly=True):
"""
A utility method called from learnBacktrack. This will backtrack
starting from the given startOffset in our prevLrnPatterns queue.
It returns True if the backtrack was successful and we managed to get
predictions all the way up to the current time step.
If readOnly, then no segments are updated or modified, otherwise, all
segment updates that belong to the given path are applied.
This updates/modifies:
- lrnActiveState['t']
This trashes:
- lrnPredictedState['t']
- lrnPredictedState['t-1']
- lrnActiveState['t-1']
:param startOffset: Start offset within the prevLrnPatterns input history
:param readOnly:
:return: True if we managed to lock on to a sequence that started
earlier.
If False, we lost predictions somewhere along the way
leading up to the current time.
"""
# How much input history have we accumulated?
# The current input is always at the end of self._prevInfPatterns (at
# index -1), but it is also evaluated as a potential starting point by
# turning on it's start cells and seeing if it generates sufficient
# predictions going forward.
numPrevPatterns = len(self._prevLrnPatterns)
# This is an easy to use label for the current time step
currentTimeStepsOffset = numPrevPatterns - 1
# Clear out any old segment updates. learnPhase2() adds to the segment
# updates if we're not readOnly
if not readOnly:
self.segmentUpdates = {}
# Status message
if self.verbosity >= 3:
if readOnly:
print (
"Trying to lock-on using startCell state from %d steps ago:" % (
numPrevPatterns - 1 - startOffset),
self._prevLrnPatterns[startOffset])
else:
print (
"Locking on using startCell state from %d steps ago:" % (
numPrevPatterns - 1 - startOffset),
self._prevLrnPatterns[startOffset])
# Play through up to the current time step
inSequence = True
for offset in range(startOffset, numPrevPatterns):
# Copy predicted and active states into t-1
self.lrnPredictedState['t-1'][:, :] = self.lrnPredictedState['t'][:, :]
self.lrnActiveState['t-1'][:, :] = self.lrnActiveState['t'][:, :]
# Get the input pattern
inputColumns = self._prevLrnPatterns[offset]
# Apply segment updates from the last set of predictions
if not readOnly:
self._processSegmentUpdates(inputColumns)
# Phase 1:
# Compute activeState[t] given bottom-up and predictedState[t-1]
if offset == startOffset:
self.lrnActiveState['t'].fill(0)
for c in inputColumns:
self.lrnActiveState['t'][c, 0] = 1
inSequence = True
else:
# Uses lrnActiveState['t-1'] and lrnPredictedState['t-1']
# computes lrnActiveState['t']
inSequence = self._learnPhase1(inputColumns, readOnly=readOnly)
# Break out immediately if we fell out of sequence or reached the current
# time step
if not inSequence or offset == currentTimeStepsOffset:
break
# Phase 2:
# Computes predictedState['t'] given activeState['t'] and also queues
# up active segments into self.segmentUpdates, unless this is readOnly
if self.verbosity >= 3:
print " backtrack: computing predictions from ", inputColumns
self._learnPhase2(readOnly=readOnly)
# Return whether or not this starting point was valid
return inSequence
def _learnBacktrack(self):
"""
This "backtracks" our learning state, trying to see if we can lock onto
the current set of inputs by assuming the sequence started up to N steps
ago on start cells.
This will adjust @ref lrnActiveState['t'] if it does manage to lock on to a
sequence that started earlier.
:returns: >0 if we managed to lock on to a sequence that started
earlier. The value returned is how many steps in the
past we locked on.
If 0 is returned, the caller needs to change active
state to start on start cells.
How it works:
-------------------------------------------------------------------
This method gets called from updateLearningState when we detect either of
the following two conditions:
#. Our PAM counter (@ref pamCounter) expired
#. We reached the max allowed learned sequence length
Either of these two conditions indicate that we want to start over on start
cells.
Rather than start over on start cells on the current input, we can
accelerate learning by backtracking a few steps ago and seeing if perhaps
a sequence we already at least partially know already started.
This updates/modifies:
- @ref lrnActiveState['t']
This trashes:
- @ref lrnActiveState['t-1']
- @ref lrnPredictedState['t']
- @ref lrnPredictedState['t-1']
"""
# How much input history have we accumulated?
# The current input is always at the end of self._prevInfPatterns (at
# index -1), and is not a valid startingOffset to evaluate.
numPrevPatterns = len(self._prevLrnPatterns) - 1
if numPrevPatterns <= 0:
if self.verbosity >= 3:
print "lrnBacktrack: No available history to backtrack from"
return False
# We will record which previous input patterns did not generate predictions
# up to the current time step and remove all the ones at the head of the
# input history queue so that we don't waste time evaluating them again at
# a later time step.
badPatterns = []
# Let's go back in time and replay the recent inputs from start cells and
# see if we can lock onto this current set of inputs that way.
#
# Start the farthest back and work our way forward. For each starting point,
# See if firing on start cells at that point would predict the current
# input.
#
# We want to pick the point farthest in the past that has continuity
# up to the current time step
inSequence = False
for startOffset in range(0, numPrevPatterns):
# Can we backtrack from startOffset?
inSequence = self._learnBacktrackFrom(startOffset, readOnly=True)
# Done playing through the sequence from starting point startOffset
# Break out as soon as we find a good path
if inSequence:
break
# Take this bad starting point out of our input history so we don't
# try it again later.
badPatterns.append(startOffset)
# If we failed to lock on at any starting point, return failure. The caller
# will start over again on start cells
if not inSequence:
if self.verbosity >= 3:
print ("Failed to lock on. Falling back to start cells on current "
"time step.")
# Nothing in our input history was a valid starting point, so get rid
# of it so we don't try any of them again at a later iteration
self._prevLrnPatterns = []
return False
# We did find a valid starting point in the past. Now, we need to
# re-enforce all segments that became active when following this path.
if self.verbosity >= 3:
print ("Discovered path to current input by using start cells from %d "
"steps ago:" % (numPrevPatterns - startOffset),
self._prevLrnPatterns[startOffset])
self._learnBacktrackFrom(startOffset, readOnly=False)
# Remove any useless patterns at the head of the input pattern history
# queue.
for i in range(numPrevPatterns):
if i in badPatterns or i <= startOffset:
if self.verbosity >= 3:
print ("Removing useless pattern from history:",
self._prevLrnPatterns[0])
self._prevLrnPatterns.pop(0)
else:
break
return numPrevPatterns - startOffset
def _learnPhase1(self, activeColumns, readOnly=False):
"""
Compute the learning active state given the predicted state and
the bottom-up input.
:param activeColumns list of active bottom-ups
:param readOnly True if being called from backtracking logic.
This tells us not to increment any segment
duty cycles or queue up any updates.
:returns: True if the current input was sufficiently predicted, OR
if we started over on startCells. False indicates that the current
input was NOT predicted, well enough to consider it as "inSequence"
This looks at:
- @ref lrnActiveState['t-1']
- @ref lrnPredictedState['t-1']
This modifies:
- @ref lrnActiveState['t']
- @ref lrnActiveState['t-1']
"""
# Save previous active state and start out on a clean slate
self.lrnActiveState['t'].fill(0)
# For each column, turn on the predicted cell. There will always be at most
# one predicted cell per column
numUnpredictedColumns = 0
for c in activeColumns:
predictingCells = numpy.where(self.lrnPredictedState['t-1'][c] == 1)[0]
numPredictedCells = len(predictingCells)
assert numPredictedCells <= 1
# If we have a predicted cell, turn it on. The segment's posActivation
# count will have already been incremented by processSegmentUpdates
if numPredictedCells == 1:
i = predictingCells[0]
self.lrnActiveState['t'][c, i] = 1
continue
numUnpredictedColumns += 1
if readOnly:
continue
# If no predicted cell, pick the closest matching one to reinforce, or
# if none exists, create a new segment on a cell in that column
i, s, numActive = self._getBestMatchingCell(
c, self.lrnActiveState['t-1'], self.minThreshold)
if s is not None and s.isSequenceSegment():
if self.verbosity >= 4:
print "Learn branch 0, found segment match. Learning on col=", c
self.lrnActiveState['t'][c, i] = 1
segUpdate = self._getSegmentActiveSynapses(
c, i, s, self.lrnActiveState['t-1'], newSynapses = True)
s.totalActivations += 1
# This will update the permanences, posActivationsCount, and the
# lastActiveIteration (age).
trimSegment = self._adaptSegment(segUpdate)
if trimSegment:
self._trimSegmentsInCell(c, i, [s], minPermanence = 0.00001,
minNumSyns = 0)
# If no close match exists, create a new one
else:
# Choose a cell in this column to add a new segment to
i = self._getCellForNewSegment(c)
if (self.verbosity >= 4):
print "Learn branch 1, no match. Learning on col=", c,
print ", newCellIdxInCol=", i
self.lrnActiveState['t'][c, i] = 1
segUpdate = self._getSegmentActiveSynapses(
c, i, None, self.lrnActiveState['t-1'], newSynapses=True)
segUpdate.sequenceSegment = True # Make it a sequence segment
self._adaptSegment(segUpdate) # No need to check whether perm reached 0
# Determine if we are out of sequence or not and reset our PAM counter
# if we are in sequence
numBottomUpColumns = len(activeColumns)
if numUnpredictedColumns < numBottomUpColumns / 2:
return True # in sequence
else:
return False # out of sequence
def _learnPhase2(self, readOnly=False):
"""
Compute the predicted segments given the current set of active cells.
:param readOnly True if being called from backtracking logic.
This tells us not to increment any segment
duty cycles or queue up any updates.
This computes the lrnPredictedState['t'] and queues up any segments that
became active (and the list of active synapses for each segment) into
the segmentUpdates queue
This looks at:
- @ref lrnActiveState['t']
This modifies:
- @ref lrnPredictedState['t']
- @ref segmentUpdates
"""
# Clear out predicted state to start with
self.lrnPredictedState['t'].fill(0)
# Compute new predicted state. When computing predictions for
# phase 2, we predict at most one cell per column (the one with the best
# matching segment).
for c in xrange(self.numberOfCols):
# Is there a cell predicted to turn on in this column?
i, s, numActive = self._getBestMatchingCell(
c, self.lrnActiveState['t'], minThreshold = self.activationThreshold)
if i is None:
continue
# Turn on the predicted state for the best matching cell and queue
# the pertinent segment up for an update, which will get processed if
# the cell receives bottom up in the future.
self.lrnPredictedState['t'][c, i] = 1
if readOnly:
continue
# Queue up this segment for updating
segUpdate = self._getSegmentActiveSynapses(
c, i, s, activeState=self.lrnActiveState['t'],
newSynapses=(numActive < self.newSynapseCount))
s.totalActivations += 1 # increment totalActivations
self._addToSegmentUpdates(c, i, segUpdate)
if self.doPooling:
# creates a new pooling segment if no best matching segment found
# sum(all synapses) >= minThreshold, "weak" activation
predSegment = self._getBestMatchingSegment(c, i,
self.lrnActiveState['t-1'])
segUpdate = self._getSegmentActiveSynapses(c, i, predSegment,
self.lrnActiveState['t-1'], newSynapses=True)
self._addToSegmentUpdates(c, i, segUpdate)
def _updateLearningState(self, activeColumns):
"""
Update the learning state. Called from compute() on every iteration
:param activeColumns List of active column indices
"""
# Copy predicted and active states into t-1
self.lrnPredictedState['t-1'][:, :] = self.lrnPredictedState['t'][:, :]
self.lrnActiveState['t-1'][:, :] = self.lrnActiveState['t'][:, :]
# Update our learning input history
if self.maxLrnBacktrack > 0:
if len(self._prevLrnPatterns) > self.maxLrnBacktrack:
self._prevLrnPatterns.pop(0)
self._prevLrnPatterns.append(activeColumns)
if self.verbosity >= 4:
print "Previous learn patterns: \n"
print self._prevLrnPatterns
# Process queued up segment updates, now that we have bottom-up, we
# can update the permanences on the cells that we predicted to turn on
# and did receive bottom-up
self._processSegmentUpdates(activeColumns)
# Decrement the PAM counter if it is running and increment our learned
# sequence length
if self.pamCounter > 0:
self.pamCounter -= 1
self.learnedSeqLength += 1
# Phase 1 - turn on the predicted cell in each column that received
# bottom-up. If there was no predicted cell, pick one to learn to.
if not self.resetCalled:
# Uses lrnActiveState['t-1'] and lrnPredictedState['t-1']
# computes lrnActiveState['t']
inSequence = self._learnPhase1(activeColumns)
# Reset our PAM counter if we are in sequence
if inSequence:
self.pamCounter = self.pamLength
# Print status of PAM counter, learned sequence length
if self.verbosity >= 3:
print "pamCounter = ", self.pamCounter, "seqLength = ", \
self.learnedSeqLength
# Start over on start cells if any of the following occur:
# 1.) A reset was just called
# 2.) We have been loo long out of sequence (the pamCounter has expired)
# 3.) We have reached maximum allowed sequence length.
#
# Note that, unless we are following a reset, we also just learned or
# re-enforced connections to the current set of active columns because
# this input is still a valid prediction to learn.
#
# It is especially helpful to learn the connections to this input when
# you have a maxSeqLength constraint in place. Otherwise, you will have
# no continuity at all between sub-sequences of length maxSeqLength.
if (self.resetCalled or self.pamCounter == 0 or
(self.maxSeqLength != 0 and
self.learnedSeqLength >= self.maxSeqLength)):
if self.verbosity >= 3:
if self.resetCalled:
print "Starting over:", activeColumns, "(reset was called)"
elif self.pamCounter == 0:
print "Starting over:", activeColumns, "(PAM counter expired)"
else:
print "Starting over:", activeColumns, "(reached maxSeqLength)"
# Update average learned sequence length - this is a diagnostic statistic
if self.pamCounter == 0:
seqLength = self.learnedSeqLength - self.pamLength
else:
seqLength = self.learnedSeqLength
if self.verbosity >= 3:
print " learned sequence length was:", seqLength
self._updateAvgLearnedSeqLength(seqLength)
# Backtrack to an earlier starting point, if we find one
backSteps = 0
if not self.resetCalled:
backSteps = self._learnBacktrack()
# Start over in the current time step if reset was called, or we couldn't
# backtrack.
if self.resetCalled or backSteps is None or backSteps == 0:
backSteps = 0
self.lrnActiveState['t'].fill(0)
for c in activeColumns:
self.lrnActiveState['t'][c, 0] = 1
# Remove any old input history patterns
self._prevLrnPatterns = []
# Reset PAM counter
self.pamCounter = self.pamLength
self.learnedSeqLength = backSteps
# Clear out any old segment updates from prior sequences
self.segmentUpdates = {}
# Phase 2 - Compute new predicted state. When computing predictions for
# phase 2, we predict at most one cell per column (the one with the best
# matching segment).
self._learnPhase2()
def compute(self, bottomUpInput, enableLearn, enableInference=None):
"""
Handle one compute, possibly learning.
.. note:: It is an error to have both ``enableLearn`` and
``enableInference`` set to False
.. note:: By default, we don't compute the inference output when learning
because it slows things down, but you can override this by passing
in True for ``enableInference``.
:param bottomUpInput: The bottom-up input as numpy list, typically from a
spatial pooler.
:param enableLearn: (bool) If true, perform learning
:param enableInference: (bool) If None, default behavior is to disable the
inference output when ``enableLearn`` is on. If true, compute the
inference output. If false, do not compute the inference output.
:returns: TODO: document
"""
# As a speed optimization for now (until we need online learning), skip
# computing the inference output while learning
if enableInference is None:
if enableLearn:
enableInference = False
else:
enableInference = True
assert (enableLearn or enableInference)
# Get the list of columns that have bottom-up
activeColumns = bottomUpInput.nonzero()[0]
if enableLearn:
self.lrnIterationIdx += 1
self.iterationIdx += 1
if self.verbosity >= 3:
print "\n==== PY Iteration: %d =====" % (self.iterationIdx)
print "Active cols:", activeColumns
# Update segment duty cycles if we are crossing a "tier"
# We determine if it's time to update the segment duty cycles. Since the
# duty cycle calculation is a moving average based on a tiered alpha, it is
# important that we update all segments on each tier boundary
if enableLearn:
if self.lrnIterationIdx in Segment.dutyCycleTiers:
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
for segment in self.cells[c][i]:
segment.dutyCycle()
# Update the average input density
if self.avgInputDensity is None:
self.avgInputDensity = len(activeColumns)
else:
self.avgInputDensity = (0.99 * self.avgInputDensity +
0.01 * len(activeColumns))
# First, update the inference state
# As a speed optimization for now (until we need online learning), skip
# computing the inference output while learning
if enableInference:
self._updateInferenceState(activeColumns)
# Next, update the learning state
if enableLearn:
self._updateLearningState(activeColumns)
# Apply global decay, and remove synapses and/or segments.
# Synapses are removed if their permanence value is <= 0.
# Segments are removed when they don't have synapses anymore.
# Removal of synapses can trigger removal of whole segments!
# todo: isolate the synapse/segment retraction logic so that
# it can be called in adaptSegments, in the case where we
# do global decay only episodically.
if self.globalDecay > 0.0 and ((self.lrnIterationIdx % self.maxAge) == 0):
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
segsToDel = [] # collect and remove outside the loop
for segment in self.cells[c][i]:
age = self.lrnIterationIdx - segment.lastActiveIteration
if age <= self.maxAge:
continue
synsToDel = [] # collect and remove outside the loop
for synapse in segment.syns:
synapse[2] = synapse[2] - self.globalDecay # decrease permanence
if synapse[2] <= 0:
synsToDel.append(synapse) # add to list to delete
# 1 for sequenceSegment flag
if len(synsToDel) == segment.getNumSynapses():
segsToDel.append(segment) # will remove the whole segment
elif len(synsToDel) > 0:
for syn in synsToDel: # remove some synapses on segment
segment.syns.remove(syn)
for seg in segsToDel: # remove some segments of this cell
self._cleanUpdatesList(c, i, seg)
self.cells[c][i].remove(seg)
# Update the prediction score stats
# Learning always includes inference
if self.collectStats:
if enableInference:
predictedState = self.infPredictedState['t-1']
else:
predictedState = self.lrnPredictedState['t-1']
self._updateStatsInferEnd(self._internalStats,
activeColumns,
predictedState,
self.colConfidence['t-1'])
# Finally return the TM output
output = self._computeOutput()
# Print diagnostic information based on the current verbosity level
self.printComputeEnd(output, learn=enableLearn)
self.resetCalled = False
return output
def infer(self, bottomUpInput):
"""
TODO: document
:param bottomUpInput:
:return:
"""
return self.compute(bottomUpInput, enableLearn=False)
def learn(self, bottomUpInput, enableInference=None):
"""
TODO: document
:param bottomUpInput:
:param enableInference:
:return:
"""
return self.compute(bottomUpInput, enableLearn=True,
enableInference=enableInference)
def _columnConfidences(self):
"""
Returns the stored cell confidences from the last compute.
:returns: Column confidence scores
"""
return self.colConfidence['t']
def topDownCompute(self):
"""
For now, we will assume there is no one above us and that bottomUpOut is
simply the output that corresponds to our currently stored column
confidences.
:returns: the same thing as :meth:`columnConfidences`
"""
# Simply return the column confidences
return self._columnConfidences()
def _trimSegmentsInCell(self, colIdx, cellIdx, segList, minPermanence,
minNumSyns):
"""
This method goes through a list of segments for a given cell and
deletes all synapses whose permanence is less than minPermanence and deletes
any segments that have less than minNumSyns synapses remaining.
:param colIdx Column index
:param cellIdx Cell index within the column
:param segList List of segment references
:param minPermanence Any syn whose permamence is 0 or < minPermanence will
be deleted.
:param minNumSyns Any segment with less than minNumSyns synapses remaining
in it will be deleted.
:returns: tuple (numSegsRemoved, numSynsRemoved)
"""
# Fill in defaults
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
# Loop through all segments
nSegsRemoved, nSynsRemoved = 0, 0
segsToDel = [] # collect and remove segments outside the loop
for segment in segList:
# List if synapses to delete
synsToDel = [syn for syn in segment.syns if syn[2] < minPermanence]
if len(synsToDel) == len(segment.syns):
segsToDel.append(segment) # will remove the whole segment
else:
if len(synsToDel) > 0:
for syn in synsToDel: # remove some synapses on segment
segment.syns.remove(syn)
nSynsRemoved += 1
if len(segment.syns) < minNumSyns:
segsToDel.append(segment)
# Remove segments that don't have enough synapses and also take them
# out of the segment update list, if they are in there
nSegsRemoved += len(segsToDel)
for seg in segsToDel: # remove some segments of this cell
self._cleanUpdatesList(colIdx, cellIdx, seg)
self.cells[colIdx][cellIdx].remove(seg)
nSynsRemoved += len(seg.syns)
return nSegsRemoved, nSynsRemoved
def trimSegments(self, minPermanence=None, minNumSyns=None):
"""
This method deletes all synapses whose permanence is less than
minPermanence and deletes any segments that have less than
minNumSyns synapses remaining.
:param minPermanence: (float) Any syn whose permanence is 0 or <
``minPermanence`` will be deleted. If None is passed in, then
``self.connectedPerm`` is used.
:param minNumSyns: (int) Any segment with less than ``minNumSyns`` synapses
remaining in it will be deleted. If None is passed in, then
``self.activationThreshold`` is used.
:returns: (tuple) ``numSegsRemoved``, ``numSynsRemoved``
"""
# Fill in defaults
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
# Loop through all cells
totalSegsRemoved, totalSynsRemoved = 0, 0
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
(segsRemoved, synsRemoved) = self._trimSegmentsInCell(
colIdx=c, cellIdx=i, segList=self.cells[c][i],
minPermanence=minPermanence, minNumSyns=minNumSyns)
totalSegsRemoved += segsRemoved
totalSynsRemoved += synsRemoved
# Print all cells if verbosity says to
if self.verbosity >= 5:
print "Cells, all segments:"
self.printCells(predictedOnly=False)
return totalSegsRemoved, totalSynsRemoved
def _cleanUpdatesList(self, col, cellIdx, seg):
"""
Removes any update that would be for the given col, cellIdx, segIdx.
NOTE: logically, we need to do this when we delete segments, so that if
an update refers to a segment that was just deleted, we also remove
that update from the update list. However, I haven't seen it trigger
in any of the unit tests yet, so it might mean that it's not needed
and that situation doesn't occur, by construction.
"""
# TODO: check if the situation described in the docstring above actually
# occurs.
for key, updateList in self.segmentUpdates.iteritems():
c, i = key[0], key[1]
if c == col and i == cellIdx:
for update in updateList:
if update[1].segment == seg:
self._removeSegmentUpdate(update)
def finishLearning(self):
"""
Called when learning has been completed. This method just calls
:meth:`trimSegments` and then clears out caches.
"""
# Keep weakly formed synapses around because they contain confidence scores
# for paths out of learned sequenced and produce a better prediction than
# chance.
self.trimSegments(minPermanence=0.0001)
# Update all cached duty cycles for better performance right after loading
# in the trained network.
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
for segment in self.cells[c][i]:
segment.dutyCycle()
# For error checking purposes, make sure no start cell has incoming
# connections
if self.cellsPerColumn > 1:
for c in xrange(self.numberOfCols):
assert self.getNumSegmentsInCell(c, 0) == 0
def _checkPrediction(self, patternNZs, output=None, colConfidence=None,
details=False):
"""
This function produces goodness-of-match scores for a set of input patterns,
by checking for their presence in the current and predicted output of the
TM. Returns a global count of the number of extra and missing bits, the
confidence scores for each input pattern, and (if requested) the
bits in each input pattern that were not present in the TM's prediction.
:param patternNZs a list of input patterns that we want to check for. Each
element is a list of the non-zeros in that pattern.
:param output The output of the TM. If not specified, then use the
TM's current output. This can be specified if you are
trying to check the prediction metric for an output from
the past.
:param colConfidence The column confidences. If not specified, then use the
TM's current self.colConfidence. This can be specified if you
are trying to check the prediction metrics for an output
from the past.
:param details if True, also include details of missing bits per pattern.
:returns: list containing:
[
totalExtras,
totalMissing,
[conf_1, conf_2, ...],
[missing1, missing2, ...]
]
@retval totalExtras a global count of the number of 'extras', i.e. bits that
are on in the current output but not in the or of all the
passed in patterns
@retval totalMissing a global count of all the missing bits, i.e. the bits
that are on in the or of the patterns, but not in the
current output
@retval conf_i the confidence score for the i'th pattern inpatternsToCheck
This consists of 3 items as a tuple:
(predictionScore, posPredictionScore, negPredictionScore)
@retval missing_i the bits in the i'th pattern that were missing
in the output. This list is only returned if details is
True.
"""
# TODO: Add option to check predictedState only.
# Get the non-zeros in each pattern
numPatterns = len(patternNZs)
# Compute the union of all the expected patterns
orAll = set()
orAll = orAll.union(*patternNZs)
# Get the list of active columns in the output
if output is None:
assert self.currentOutput is not None
output = self.currentOutput
output = set(output.sum(axis=1).nonzero()[0])
# Compute the total extra and missing in the output
totalExtras = len(output.difference(orAll))
totalMissing = len(orAll.difference(output))
# Get the percent confidence level per column by summing the confidence
# levels of the cells in the column. During training, each segment's
# confidence number is computed as a running average of how often it
# correctly predicted bottom-up activity on that column. A cell's
# confidence number is taken from the first active segment found in the
# cell. Note that confidence will only be non-zero for predicted columns.
if colConfidence is None:
colConfidence = self.colConfidence['t']
# Assign confidences to each pattern
confidences = []
for i in xrange(numPatterns):
# Sum of the column confidences for this pattern
positivePredictionSum = colConfidence[patternNZs[i]].sum()
# How many columns in this pattern
positiveColumnCount = len(patternNZs[i])
# Sum of all the column confidences
totalPredictionSum = colConfidence.sum()
# Total number of columns
totalColumnCount = len(colConfidence)
negativePredictionSum = totalPredictionSum - positivePredictionSum
negativeColumnCount = totalColumnCount - positiveColumnCount
# Compute the average confidence score per column for this pattern
if positiveColumnCount != 0:
positivePredictionScore = positivePredictionSum
else:
positivePredictionScore = 0.0
# Compute the average confidence score per column for the other patterns
if negativeColumnCount != 0:
negativePredictionScore = negativePredictionSum
else:
negativePredictionScore = 0.0
# Scale the positive and negative prediction scores so that they sum to
# 1.0
currentSum = negativePredictionScore + positivePredictionScore
if currentSum > 0:
positivePredictionScore *= 1.0/currentSum
negativePredictionScore *= 1.0/currentSum
predictionScore = positivePredictionScore - negativePredictionScore
confidences.append((predictionScore,
positivePredictionScore,
negativePredictionScore))
# Include detail? (bits in each pattern that were missing from the output)
if details:
missingPatternBits = [set(pattern).difference(output)
for pattern in patternNZs]
return (totalExtras, totalMissing, confidences, missingPatternBits)
else:
return (totalExtras, totalMissing, confidences)
def _isSegmentActive(self, seg, activeState):
"""
A segment is active if it has >= activationThreshold connected
synapses that are active due to activeState.
Notes: studied various cutoffs, none of which seem to be worthwhile
list comprehension didn't help either
:param seg TODO: document
:param activeState TODO: document
"""
# Computing in C - *much* faster
return isSegmentActive(seg.syns, activeState,
self.connectedPerm, self.activationThreshold)
def _getSegmentActivityLevel(self, seg, activeState,
connectedSynapsesOnly=False):
"""
This routine computes the activity level of a segment given activeState.
It can tally up only connected synapses (permanence >= connectedPerm), or
all the synapses of the segment, at either t or t-1.
:param seg TODO: document
:param activeState TODO: document
:param connectedSynapsesOnly TODO: document
"""
# Computing in C - *much* faster
return getSegmentActivityLevel(seg.syns, activeState, connectedSynapsesOnly,
self.connectedPerm)
def _getBestMatchingCell(self, c, activeState, minThreshold):
"""
Find weakly activated cell in column with at least minThreshold active
synapses.
:param c which column to look at
:param activeState the active cells
:param minThreshold minimum number of synapses required
:returns: tuple (cellIdx, segment, numActiveSynapses)
"""
# Collect all cells in column c that have at least minThreshold in the most
# activated segment
bestActivityInCol = minThreshold
bestSegIdxInCol = -1
bestCellInCol = -1
for i in xrange(self.cellsPerColumn):
maxSegActivity = 0
maxSegIdx = 0
for j, s in enumerate(self.cells[c][i]):
activity = self._getSegmentActivityLevel(s, activeState)
if activity > maxSegActivity:
maxSegActivity = activity
maxSegIdx = j
if maxSegActivity >= bestActivityInCol:
bestActivityInCol = maxSegActivity
bestSegIdxInCol = maxSegIdx
bestCellInCol = i
if bestCellInCol == -1:
return (None, None, None)
else:
return (bestCellInCol, self.cells[c][bestCellInCol][bestSegIdxInCol],
bestActivityInCol)
def _getBestMatchingSegment(self, c, i, activeState):
"""
For the given cell, find the segment with the largest number of active
synapses. This routine is aggressive in finding the best match. The
permanence value of synapses is allowed to be below connectedPerm. The number
of active synapses is allowed to be below activationThreshold, but must be
above minThreshold. The routine returns the segment index. If no segments are
found, then an index of -1 is returned.
:param c TODO: document
:param i TODO: document
:param activeState TODO: document
"""
maxActivity, which = self.minThreshold, -1
for j, s in enumerate(self.cells[c][i]):
activity = self._getSegmentActivityLevel(s, activeState,
connectedSynapsesOnly=False)
if activity >= maxActivity:
maxActivity, which = activity, j
if which == -1:
return None
else:
return self.cells[c][i][which]
def _getCellForNewSegment(self, colIdx):
"""
Return the index of a cell in this column which is a good candidate
for adding a new segment.
When we have fixed size resources in effect, we insure that we pick a
cell which does not already have the max number of allowed segments. If
none exists, we choose the least used segment in the column to re-allocate.
:param colIdx which column to look at
:returns: cell index
"""
# Not fixed size CLA, just choose a cell randomly
if self.maxSegmentsPerCell < 0:
if self.cellsPerColumn > 1:
# Don't ever choose the start cell (cell # 0) in each column
i = self._random.getUInt32(self.cellsPerColumn-1) + 1
else:
i = 0
return i
# Fixed size CLA, choose from among the cells that are below the maximum
# number of segments.
# NOTE: It is important NOT to always pick the cell with the fewest number
# of segments. The reason is that if we always do that, we are more likely
# to run into situations where we choose the same set of cell indices to
# represent an 'A' in both context 1 and context 2. This is because the
# cell indices we choose in each column of a pattern will advance in
# lockstep (i.e. we pick cell indices of 1, then cell indices of 2, etc.).
candidateCellIdxs = []
if self.cellsPerColumn == 1:
minIdx = 0
maxIdx = 0
else:
minIdx = 1 # Don't include startCell in the mix
maxIdx = self.cellsPerColumn-1
for i in xrange(minIdx, maxIdx+1):
numSegs = len(self.cells[colIdx][i])
if numSegs < self.maxSegmentsPerCell:
candidateCellIdxs.append(i)
# If we found one, return with it. Note we need to use _random to maintain
# correspondence with CPP code.
if len(candidateCellIdxs) > 0:
#candidateCellIdx = random.choice(candidateCellIdxs)
candidateCellIdx = (
candidateCellIdxs[self._random.getUInt32(len(candidateCellIdxs))])
if self.verbosity >= 5:
print "Cell [%d,%d] chosen for new segment, # of segs is %d" % (
colIdx, candidateCellIdx, len(self.cells[colIdx][candidateCellIdx]))
return candidateCellIdx
# All cells in the column are full, find a segment to free up
candidateSegment = None
candidateSegmentDC = 1.0
# For each cell in this column
for i in xrange(minIdx, maxIdx+1):
# For each segment in this cell
for s in self.cells[colIdx][i]:
dc = s.dutyCycle()
if dc < candidateSegmentDC:
candidateCellIdx = i
candidateSegmentDC = dc
candidateSegment = s
# Free up the least used segment
if self.verbosity >= 5:
print ("Deleting segment #%d for cell[%d,%d] to make room for new "
"segment" % (candidateSegment.segID, colIdx, candidateCellIdx))
candidateSegment.debugPrint()
self._cleanUpdatesList(colIdx, candidateCellIdx, candidateSegment)
self.cells[colIdx][candidateCellIdx].remove(candidateSegment)
return candidateCellIdx
def _getSegmentActiveSynapses(self, c, i, s, activeState, newSynapses=False):
"""
Return a segmentUpdate data structure containing a list of proposed
changes to segment s. Let activeSynapses be the list of active synapses
where the originating cells have their activeState output = 1 at time step
t. (This list is empty if s is None since the segment doesn't exist.)
newSynapses is an optional argument that defaults to false. If newSynapses
is true, then newSynapseCount - len(activeSynapses) synapses are added to
activeSynapses. These synapses are randomly chosen from the set of cells
that have learnState = 1 at timeStep.
:param c TODO: document
:param i TODO: document
:param s TODO: document
:param activeState TODO: document
:param newSynapses TODO: document
"""
activeSynapses = []
if s is not None: # s can be None, if adding a new segment
# Here we add *integers* to activeSynapses
activeSynapses = [idx for idx, syn in enumerate(s.syns) \
if activeState[syn[0], syn[1]]]
if newSynapses: # add a few more synapses
nSynapsesToAdd = self.newSynapseCount - len(activeSynapses)
# Here we add *pairs* (colIdx, cellIdx) to activeSynapses
activeSynapses += self._chooseCellsToLearnFrom(c, i, s, nSynapsesToAdd,
activeState)
# It's still possible that activeSynapses is empty, and this will
# be handled in addToSegmentUpdates
# NOTE: activeSynapses contains a mixture of integers and pairs of integers
# - integers are indices of synapses already existing on the segment,
# that we will need to update.
# - pairs represent source (colIdx, cellIdx) of new synapses to create on
# the segment
update = BacktrackingTM._SegmentUpdate(c, i, s, activeSynapses)
return update
def _chooseCellsToLearnFrom(self, c, i, s, n, activeState):
"""
Choose n random cells to learn from.
This function is called several times while learning with timeStep = t-1, so
we cache the set of candidates for that case. It's also called once with
timeStep = t, and we cache that set of candidates.
:returns: tuple (column index, cell index).
"""
if n <= 0:
return []
tmpCandidates = numpy.where(activeState == 1)
# Candidates can be empty at this point, in which case we return
# an empty segment list. adaptSegments will do nothing when getting
# that list.
if len(tmpCandidates[0]) == 0:
return []
if s is None: # new segment
cands = [syn for syn in zip(tmpCandidates[0], tmpCandidates[1])]
else:
# We exclude any synapse that is already in this segment.
synapsesAlreadyInSegment = set((syn[0], syn[1]) for syn in s.syns)
cands = [syn for syn in zip(tmpCandidates[0], tmpCandidates[1])
if (syn[0], syn[1]) not in synapsesAlreadyInSegment]
# If we have no more candidates than requested, return all of them,
# no shuffle necessary.
if len(cands) <= n:
return cands
if n == 1: # so that we don't shuffle if only one is needed
idx = self._random.getUInt32(len(cands))
return [cands[idx]] # col and cell idx in col
# If we need more than one candidate
indices = numpy.array([j for j in range(len(cands))], dtype='uint32')
tmp = numpy.zeros(min(n, len(indices)), dtype='uint32')
self._random.sample(indices, tmp)
return sorted([cands[j] for j in tmp])
def _processSegmentUpdates(self, activeColumns):
"""
Go through the list of accumulated segment updates and process them
as follows:
if the segment update is too old, remove the update
else if the cell received bottom-up, update its permanences
else if it's still being predicted, leave it in the queue
else remove it.
:param activeColumns TODO: document
"""
# The segmentUpdates dict has keys which are the column,cellIdx of the
# owner cell. The values are lists of segment updates for that cell
removeKeys = []
trimSegments = []
for key, updateList in self.segmentUpdates.iteritems():
# Get the column number and cell index of the owner cell
c, i = key[0], key[1]
# If the cell received bottom-up, update its segments
if c in activeColumns:
action = 'update'
# If not, either keep it around if it's still predicted, or remove it
else:
# If it is still predicted, and we are pooling, keep it around
if self.doPooling and self.lrnPredictedState['t'][c, i] == 1:
action = 'keep'
else:
action = 'remove'
# Process each segment for this cell. Each segment entry contains
# [creationDate, SegmentInfo]
updateListKeep = []
if action != 'remove':
for (createDate, segUpdate) in updateList:
if self.verbosity >= 4:
print "_nLrnIterations =", self.lrnIterationIdx,
print segUpdate
# If this segment has expired. Ignore this update (and hence remove it
# from list)
if self.lrnIterationIdx - createDate > self.segUpdateValidDuration:
continue
if action == 'update':
trimSegment = self._adaptSegment(segUpdate)
if trimSegment:
trimSegments.append((segUpdate.columnIdx, segUpdate.cellIdx,
segUpdate.segment))
else:
# Keep segments that haven't expired yet (the cell is still being
# predicted)
updateListKeep.append((createDate, segUpdate))
self.segmentUpdates[key] = updateListKeep
if len(updateListKeep) == 0:
removeKeys.append(key)
# Clean out empty segment updates
for key in removeKeys:
self.segmentUpdates.pop(key)
# Trim segments that had synapses go to 0
for (c, i, segment) in trimSegments:
self._trimSegmentsInCell(c, i, [segment], minPermanence = 0.00001,
minNumSyns = 0)
def _adaptSegment(self, segUpdate):
"""
This function applies segment update information to a segment in a
cell.
Synapses on the active list get their permanence counts incremented by
permanenceInc. All other synapses get their permanence counts decremented
by permanenceDec.
We also increment the positiveActivations count of the segment.
:param segUpdate SegmentUpdate instance
:returns: True if some synapses were decremented to 0 and the segment is a
candidate for trimming
"""
# This will be set to True if detect that any syapses were decremented to
# 0
trimSegment = False
# segUpdate.segment is None when creating a new segment
c, i, segment = segUpdate.columnIdx, segUpdate.cellIdx, segUpdate.segment
# update.activeSynapses can be empty.
# If not, it can contain either or both integers and tuples.
# The integers are indices of synapses to update.
# The tuples represent new synapses to create (src col, src cell in col).
# We pre-process to separate these various element types.
# synToCreate is not empty only if positiveReinforcement is True.
# NOTE: the synapse indices start at *1* to skip the segment flags.
activeSynapses = segUpdate.activeSynapses
synToUpdate = set([syn for syn in activeSynapses if type(syn) == int])
# Modify an existing segment
if segment is not None:
if self.verbosity >= 4:
print "Reinforcing segment #%d for cell[%d,%d]" % (segment.segID, c, i)
print " before:",
segment.debugPrint()
# Mark it as recently useful
segment.lastActiveIteration = self.lrnIterationIdx
# Update frequency and positiveActivations
segment.positiveActivations += 1 # positiveActivations += 1
segment.dutyCycle(active=True)
# First, decrement synapses that are not active
# s is a synapse *index*, with index 0 in the segment being the tuple
# (segId, sequence segment flag). See below, creation of segments.
lastSynIndex = len(segment.syns) - 1
inactiveSynIndices = [s for s in xrange(0, lastSynIndex+1) \
if s not in synToUpdate]
trimSegment = segment.updateSynapses(inactiveSynIndices,
-self.permanenceDec)
# Now, increment active synapses
activeSynIndices = [syn for syn in synToUpdate if syn <= lastSynIndex]
segment.updateSynapses(activeSynIndices, self.permanenceInc)
# Finally, create new synapses if needed
# syn is now a tuple (src col, src cell)
synsToAdd = [syn for syn in activeSynapses if type(syn) != int]
# If we have fixed resources, get rid of some old syns if necessary
if self.maxSynapsesPerSegment > 0 \
and len(synsToAdd) + len(segment.syns) > self.maxSynapsesPerSegment:
numToFree = (len(segment.syns) + len(synsToAdd) -
self.maxSynapsesPerSegment)
segment.freeNSynapses(numToFree, inactiveSynIndices, self.verbosity)
for newSyn in synsToAdd:
segment.addSynapse(newSyn[0], newSyn[1], self.initialPerm)
if self.verbosity >= 4:
print " after:",
segment.debugPrint()
# Create a new segment
else:
# (segID, sequenceSegment flag, frequency, positiveActivations,
# totalActivations, lastActiveIteration)
newSegment = Segment(tm=self, isSequenceSeg=segUpdate.sequenceSegment)
# numpy.float32 important so that we can match with C++
for synapse in activeSynapses:
newSegment.addSynapse(synapse[0], synapse[1], self.initialPerm)
if self.verbosity >= 3:
print "New segment #%d for cell[%d,%d]" % (self.segID-1, c, i),
newSegment.debugPrint()
self.cells[c][i].append(newSegment)
return trimSegment
def getSegmentInfo(self, collectActiveData = False):
"""Returns information about the distribution of segments, synapses and
permanence values in the current TM. If requested, also returns information
regarding the number of currently active segments and synapses.
:returns: tuple described below:
::
(
nSegments,
nSynapses,
nActiveSegs,
nActiveSynapses,
distSegSizes,
distNSegsPerCell,
distPermValues,
distAges
)
- ``nSegments``: (int) total number of segments
- ``nSynapses``: (int) total number of synapses
- ``nActiveSegs``: (int) total number of active segments (0 if
``collectActiveData`` is False)
- ``nActiveSynapses``: (int) total number of active synapses 0 if
``collectActiveData`` is False
- ``distSegSizes``: (dict) where d[n] = number of segments with n synapses
- ``distNSegsPerCell``: (dict) where d[n] = number of cells with n segments
- ``distPermValues``: (dict) where d[p] = number of synapses with perm = p/10
- ``distAges``: (list) of tuples (``ageRange``, ``numSegments``)
"""
nSegments, nSynapses = 0, 0
nActiveSegs, nActiveSynapses = 0, 0
distSegSizes, distNSegsPerCell = {}, {}
distPermValues = {} # Num synapses with given permanence values
numAgeBuckets = 20
distAges = []
ageBucketSize = int((self.lrnIterationIdx+20) / 20)
for i in range(numAgeBuckets):
distAges.append(['%d-%d' % (i*ageBucketSize, (i+1)*ageBucketSize-1), 0])
for c in xrange(self.numberOfCols):
for i in xrange(self.cellsPerColumn):
if len(self.cells[c][i]) > 0:
nSegmentsThisCell = len(self.cells[c][i])
nSegments += nSegmentsThisCell
if distNSegsPerCell.has_key(nSegmentsThisCell):
distNSegsPerCell[nSegmentsThisCell] += 1
else:
distNSegsPerCell[nSegmentsThisCell] = 1
for seg in self.cells[c][i]:
nSynapsesThisSeg = seg.getNumSynapses()
nSynapses += nSynapsesThisSeg
if distSegSizes.has_key(nSynapsesThisSeg):
distSegSizes[nSynapsesThisSeg] += 1
else:
distSegSizes[nSynapsesThisSeg] = 1
# Accumulate permanence value histogram
for syn in seg.syns:
p = int(syn[2]*10)
if distPermValues.has_key(p):
distPermValues[p] += 1
else:
distPermValues[p] = 1
# Accumulate segment age histogram
age = self.lrnIterationIdx - seg.lastActiveIteration
ageBucket = int(age/ageBucketSize)
distAges[ageBucket][1] += 1
# Get active synapse statistics if requested
if collectActiveData:
if self._isSegmentActive(seg, self.infActiveState['t']):
nActiveSegs += 1
for syn in seg.syns:
if self.activeState['t'][syn[0]][syn[1]] == 1:
nActiveSynapses += 1
return (nSegments, nSynapses, nActiveSegs, nActiveSynapses,
distSegSizes, distNSegsPerCell, distPermValues, distAges)
class Segment(object):
"""
The Segment class is a container for all of the segment variables and
the synapses it owns.
"""
## These are iteration count tiers used when computing segment duty cycle.
dutyCycleTiers = [0, 100, 320, 1000,
3200, 10000, 32000, 100000,
320000]
## This is the alpha used in each tier. dutyCycleAlphas[n] is used when
# `iterationIdx > dutyCycleTiers[n]`.
dutyCycleAlphas = [None, 0.0032, 0.0010, 0.00032,
0.00010, 0.000032, 0.00001, 0.0000032,
0.0000010]
def __init__(self, tm, isSequenceSeg):
self.tm = tm
self.segID = tm.segID
tm.segID += 1
self.isSequenceSeg = isSequenceSeg
self.lastActiveIteration = tm.lrnIterationIdx
self.positiveActivations = 1
self.totalActivations = 1
# These are internal variables used to compute the positive activations
# duty cycle.
# Callers should use dutyCycle()
self._lastPosDutyCycle = 1.0 / tm.lrnIterationIdx
self._lastPosDutyCycleIteration = tm.lrnIterationIdx
# Each synapse is a tuple (srcCellCol, srcCellIdx, permanence)
self.syns = []
def __str__(self):
return str((self.segID, self.isSequenceSeg, self.lastActiveIteration,
self.positiveActivations, self.totalActivations, self._lastPosDutyCycle, self._lastPosDutyCycleIteration, self.syns))
def __ne__(self, s):
return not self == s
def __eq__(self, s):
if (self.segID != s.segID or
self.isSequenceSeg != s.isSequenceSeg or
self.lastActiveIteration != s.lastActiveIteration or
self.positiveActivations != s.positiveActivations or
self.totalActivations != s.totalActivations or
self._lastPosDutyCycle != s._lastPosDutyCycle or
self._lastPosDutyCycleIteration != s._lastPosDutyCycleIteration):
return False
if len(self.syns) != len(s.syns):
return False
for syn1, syn2 in zip(self.syns, s.syns):
if syn1[0] != syn2[0] or syn1[1] != syn2[1]:
return False
if abs(syn1[2] - syn2[2]) > 0.000001:
return False
return True
def write(self, proto):
proto.segID = self.segID
proto.isSequenceSeg = self.isSequenceSeg
proto.lastActiveIteration = self.lastActiveIteration
proto.positiveActivations = self.positiveActivations
proto.totalActivations = self.totalActivations
proto.lastPosDutyCycle = self._lastPosDutyCycle
proto.lastPosDutyCycleIteration = self._lastPosDutyCycleIteration
synapseListProto = proto.init("synapses", len(self.syns))
for i, syn in enumerate(self.syns):
synProto = synapseListProto[i]
synProto.srcCellCol = syn[0]
synProto.srcCellIdx = syn[1]
synProto.permanence = float(syn[2])
@classmethod
def read(cls, proto, tm):
obj = object.__new__(cls)
obj.tm = tm
obj.segID = int(proto.segID)
obj.isSequenceSeg = proto.isSequenceSeg
obj.lastActiveIteration = int(proto.lastActiveIteration)
obj.positiveActivations = int(proto.positiveActivations)
obj.totalActivations = int(proto.totalActivations)
obj._lastPosDutyCycle = proto.lastPosDutyCycle
obj._lastPosDutyCycleIteration = int(proto.lastPosDutyCycleIteration)
obj.syns = []
for synProto in proto.synapses:
obj.addSynapse(synProto.srcCellCol, synProto.srcCellIdx,
synProto.permanence)
return obj
def dutyCycle(self, active=False, readOnly=False):
"""Compute/update and return the positive activations duty cycle of
this segment. This is a measure of how often this segment is
providing good predictions.
:param active True if segment just provided a good prediction
:param readOnly If True, compute the updated duty cycle, but don't change
the cached value. This is used by debugging print statements.
:returns: The duty cycle, a measure of how often this segment is
providing good predictions.
**NOTE:** This method relies on different schemes to compute the duty cycle
based on how much history we have. In order to support this tiered
approach **IT MUST BE CALLED ON EVERY SEGMENT AT EACH DUTY CYCLE TIER**
(@ref dutyCycleTiers).
When we don't have a lot of history yet (first tier), we simply return
number of positive activations / total number of iterations
After a certain number of iterations have accumulated, it converts into
a moving average calculation, which is updated only when requested
since it can be a bit expensive to compute on every iteration (it uses
the pow() function).
The duty cycle is computed as follows:
dc[t] = (1-alpha) * dc[t-1] + alpha * value[t]
If the value[t] has been 0 for a number of steps in a row, you can apply
all of the updates at once using:
dc[t] = (1-alpha)^(t-lastT) * dc[lastT]
We use the alphas and tiers as defined in @ref dutyCycleAlphas and
@ref dutyCycleTiers.
"""
# For tier #0, compute it from total number of positive activations seen
if self.tm.lrnIterationIdx <= self.dutyCycleTiers[1]:
dutyCycle = float(self.positiveActivations) \
/ self.tm.lrnIterationIdx
if not readOnly:
self._lastPosDutyCycleIteration = self.tm.lrnIterationIdx
self._lastPosDutyCycle = dutyCycle
return dutyCycle
# How old is our update?
age = self.tm.lrnIterationIdx - self._lastPosDutyCycleIteration
# If it's already up to date, we can returned our cached value.
if age == 0 and not active:
return self._lastPosDutyCycle
# Figure out which alpha we're using
for tierIdx in range(len(self.dutyCycleTiers)-1, 0, -1):
if self.tm.lrnIterationIdx > self.dutyCycleTiers[tierIdx]:
alpha = self.dutyCycleAlphas[tierIdx]
break
# Update duty cycle
dutyCycle = pow(1.0-alpha, age) * self._lastPosDutyCycle
if active:
dutyCycle += alpha
# Update cached values if not read-only
if not readOnly:
self._lastPosDutyCycleIteration = self.tm.lrnIterationIdx
self._lastPosDutyCycle = dutyCycle
return dutyCycle
def debugPrint(self):
"""Print segment information for verbose messaging and debugging.
This uses the following format:
ID:54413 True 0.64801 (24/36) 101 [9,1]0.75 [10,1]0.75 [11,1]0.75
where:
54413 - is the unique segment id
True - is sequence segment
0.64801 - moving average duty cycle
(24/36) - (numPositiveActivations / numTotalActivations)
101 - age, number of iterations since last activated
[9,1]0.75 - synapse from column 9, cell #1, strength 0.75
[10,1]0.75 - synapse from column 10, cell #1, strength 0.75
[11,1]0.75 - synapse from column 11, cell #1, strength 0.75
"""
# Segment ID
print "ID:%-5d" % (self.segID),
# Sequence segment or pooling segment
if self.isSequenceSeg:
print "True",
else:
print "False",
# Duty cycle
print "%9.7f" % (self.dutyCycle(readOnly=True)),
# numPositive/totalActivations
print "(%4d/%-4d)" % (self.positiveActivations,
self.totalActivations),
# Age
print "%4d" % (self.tm.lrnIterationIdx - self.lastActiveIteration),
# Print each synapses on this segment as: srcCellCol/srcCellIdx/perm
# if the permanence is above connected, put [] around the synapse info
# For aid in comparing to the C++ implementation, print them in sorted
# order
sortedSyns = sorted(self.syns)
for _, synapse in enumerate(sortedSyns):
print "[%d,%d]%4.2f" % (synapse[0], synapse[1], synapse[2]),
print
def isSequenceSegment(self):
return self.isSequenceSeg
def getNumSynapses(self):
return len(self.syns)
def freeNSynapses(self, numToFree, inactiveSynapseIndices, verbosity= 0):
"""Free up some synapses in this segment. We always free up inactive
synapses (lowest permanence freed up first) before we start to free up
active ones.
:param numToFree number of synapses to free up
:param inactiveSynapseIndices list of the inactive synapse indices.
"""
# Make sure numToFree isn't larger than the total number of syns we have
assert (numToFree <= len(self.syns))
if (verbosity >= 4):
print "\nIn PY freeNSynapses with numToFree =", numToFree,
print "inactiveSynapseIndices =",
for i in inactiveSynapseIndices:
print self.syns[i][0:2],
print
# Remove the lowest perm inactive synapses first
if len(inactiveSynapseIndices) > 0:
perms = numpy.array([self.syns[i][2] for i in inactiveSynapseIndices])
candidates = numpy.array(inactiveSynapseIndices)[
perms.argsort()[0:numToFree]]
candidates = list(candidates)
else:
candidates = []
# Do we need more? if so, remove the lowest perm active synapses too
if len(candidates) < numToFree:
activeSynIndices = [i for i in xrange(len(self.syns))
if i not in inactiveSynapseIndices]
perms = numpy.array([self.syns[i][2] for i in activeSynIndices])
moreToFree = numToFree - len(candidates)
moreCandidates = numpy.array(activeSynIndices)[
perms.argsort()[0:moreToFree]]
candidates += list(moreCandidates)
if verbosity >= 4:
print "Deleting %d synapses from segment to make room for new ones:" % (
len(candidates)), candidates
print "BEFORE:",
self.debugPrint()
# Free up all the candidates now
synsToDelete = [self.syns[i] for i in candidates]
for syn in synsToDelete:
self.syns.remove(syn)
if verbosity >= 4:
print "AFTER:",
self.debugPrint()
def addSynapse(self, srcCellCol, srcCellIdx, perm):
"""Add a new synapse
:param srcCellCol source cell column
:param srcCellIdx source cell index within the column
:param perm initial permanence
"""
self.syns.append([int(srcCellCol), int(srcCellIdx), numpy.float32(perm)])
def updateSynapses(self, synapses, delta):
"""Update a set of synapses in the segment.
:param tm The owner TM
:param synapses List of synapse indices to update
:param delta How much to add to each permanence
:returns: True if synapse reached 0
"""
reached0 = False
if delta > 0:
for synapse in synapses:
self.syns[synapse][2] = newValue = self.syns[synapse][2] + delta
# Cap synapse permanence at permanenceMax
if newValue > self.tm.permanenceMax:
self.syns[synapse][2] = self.tm.permanenceMax
else:
for synapse in synapses:
self.syns[synapse][2] = newValue = self.syns[synapse][2] + delta
# Cap min synapse permanence to 0 in case there is no global decay
if newValue <= 0:
self.syns[synapse][2] = 0
reached0 = True
return reached0
# This is necessary for unpickling objects that have instances of the nested
# class since the loading process looks for the class at the top level of the
# module.
SegmentUpdate = BacktrackingTM._SegmentUpdate | unknown | codeparrot/codeparrot-clean | ||
# encoding: utf-8
import sys
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from askbot.utils.console import ProgressBar
class Migration(DataMigration):
def forwards(self, orm):
# ContentType for Post model should be created no later than in migration 0092
ct_post = orm['contenttypes.ContentType'].objects.get(app_label='askbot', model='post')
message = "Connecting award objects to posts"
num_awards = orm.Award.objects.count()
for aw in ProgressBar(orm.Award.objects.iterator(), num_awards, message):
ct = aw.content_type
if ct.app_label == 'askbot' and ct.model in ('exercise', 'problem', 'comment'):
aw.content_type = ct_post
try:
aw.object_id = orm.Post.objects.get(**{'self_%s__id' % str(ct.model): aw.object_id}).id
except orm.Post.DoesNotExist:
continue
aw.save()
###
message = "Connecting repute objects to posts"
num_reputes = orm.Repute.objects.count()
for rp in ProgressBar(orm.Repute.objects.iterator(), num_reputes, message):
if rp.exercise:
rp.exercise_post = orm.Post.objects.get(self_exercise__id=rp.exercise.id)
rp.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'exercise_post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousproblem': {
'Meta': {'object_name': 'AnonymousProblem'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_problems'", 'to': "orm['askbot.Exercise']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousexercise': {
'Meta': {'object_name': 'AnonymousExercise'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.problem': {
'Meta': {'object_name': 'Problem', 'db_table': "u'problem'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['askbot.Exercise']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoriteexercise': {
'Meta': {'object_name': 'FavoriteExercise', 'db_table': "u'favorite_exercise'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_exercises'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.post': {
'Meta': {'object_name': 'Post'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_posts'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'post_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'self_problem': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Problem']"}),
'self_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Comment']"}),
'self_exercise': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Exercise']"}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
# "Post-processing" - added manually to add support for URL mapping
'old_exercise_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
'old_problem_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
'old_comment_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('problem', 'revision'), ('exercise', 'revision'))", 'object_name': 'PostRevision'},
'problem': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Problem']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Exercise']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.exercise': {
'Meta': {'object_name': 'Exercise', 'db_table': "u'exercise'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercises'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercises'", 'unique': 'True', 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.exerciseview': {
'Meta': {'object_name': 'ExerciseView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Exercise']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercise_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']", 'null': 'True', 'blank': 'True'}),
'exercise_post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_problem': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Problem']", 'null': 'True', 'blank': 'True'}),
'problem_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'problem_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'unused_favorite_threads'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteExercise']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_threads'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_last_active_in_threads'", 'to': "orm['auth.User']"}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'threads'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('user', 'voted_post'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'voted_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'post_votes'", 'to': "orm['askbot.Post']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'exercises_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot'] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from datetime import datetime
from django.contrib.auth.models import User
from desktop.conf import LDAP
from models import UserProfile, get_profile
from views import import_ldap_users
import ldap_access
LOG = logging.getLogger(__name__)
class LdapSynchronizationMiddleware(object):
"""
Synchronize against LDAP authority.
"""
USER_CACHE_NAME = 'ldap_use_group_sync_cache'
def process_request(self, request):
user = request.user
if not user or not user.is_authenticated():
return
if not User.objects.filter(username=user.username, userprofile__creation_method=str(UserProfile.CreationMethod.EXTERNAL)).exists():
LOG.warn("User %s is not an Ldap user" % user.username)
return
# Cache should be cleared when user logs out.
if self.USER_CACHE_NAME not in request.session:
if LDAP.LDAP_SERVERS.get():
connection = ldap_access.get_connection_from_server(next(LDAP.LDAP_SERVERS.__iter__()))
else:
connection = ldap_access.get_connection_from_server()
import_ldap_users(connection, user.username, sync_groups=True, import_by_dn=False)
request.session[self.USER_CACHE_NAME] = True
request.session.modified = True
class UpdateLastActivityMiddleware(object):
"""
Middleware to track the last activity of a user.
"""
def process_request(self, request):
user = request.user
if not user or not user.is_authenticated():
return
profile = get_profile(user)
profile.last_activity = datetime.now()
try:
profile.save()
except DatabaseError:
log.exception('Error saving profile information') | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrappers for gsutil, for basic interaction with Google Cloud Storage."""
import cStringIO
import hashlib
import logging
import os
import subprocess
import sys
import tarfile
import urllib2
from telemetry.core import util
PUBLIC_BUCKET = 'chromium-telemetry'
INTERNAL_BUCKET = 'chrome-telemetry'
_GSUTIL_URL = 'http://storage.googleapis.com/pub/gsutil.tar.gz'
_DOWNLOAD_PATH = os.path.join(util.GetTelemetryDir(), 'third_party', 'gsutil')
class CloudStorageError(Exception):
@staticmethod
def _GetConfigInstructions(gsutil_path):
return ('To configure your credentials:\n'
' 1. Run "%s config" and follow its instructions.\n'
' 2. If you have a @google.com account, use that one.\n'
' 3. Leave the project-id field blank.' % gsutil_path)
class PermissionError(CloudStorageError):
def __init__(self, gsutil_path):
super(PermissionError, self).__init__(
'Attempted to access a file from Cloud Storage but you don\'t '
'have permission. ' + self._GetConfigInstructions(gsutil_path))
class CredentialsError(CloudStorageError):
def __init__(self, gsutil_path):
super(CredentialsError, self).__init__(
'Attempted to access a file from Cloud Storage but you have no '
'configured credentials. ' + self._GetConfigInstructions(gsutil_path))
class NotFoundError(CloudStorageError):
pass
def _DownloadGsutil():
logging.info('Downloading gsutil')
response = urllib2.urlopen(_GSUTIL_URL)
with tarfile.open(fileobj=cStringIO.StringIO(response.read())) as tar_file:
tar_file.extractall(os.path.dirname(_DOWNLOAD_PATH))
logging.info('Downloaded gsutil to %s' % _DOWNLOAD_PATH)
return os.path.join(_DOWNLOAD_PATH, 'gsutil')
def _FindGsutil():
"""Return the gsutil executable path. If we can't find it, download it."""
search_paths = [_DOWNLOAD_PATH] + os.environ['PATH'].split(os.pathsep)
# Look for a depot_tools installation.
for path in search_paths:
gsutil_path = os.path.join(path, 'third_party', 'gsutil', 'gsutil')
if os.path.isfile(gsutil_path):
return gsutil_path
# Look for a gsutil installation.
for path in search_paths:
gsutil_path = os.path.join(path, 'gsutil')
if os.path.isfile(gsutil_path):
return gsutil_path
# Failed to find it. Download it!
return _DownloadGsutil()
def _RunCommand(args):
gsutil_path = _FindGsutil()
gsutil = subprocess.Popen([sys.executable, gsutil_path] + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = gsutil.communicate()
if gsutil.returncode:
if stderr.startswith('You are attempting to access protected data with '
'no configured credentials.'):
raise CredentialsError(gsutil_path)
if 'status=403' in stderr:
raise PermissionError(gsutil_path)
if stderr.startswith('InvalidUriError') or 'No such object' in stderr:
raise NotFoundError(stderr)
raise CloudStorageError(stderr)
return stdout
def List(bucket):
stdout = _RunCommand(['ls', 'gs://%s' % bucket])
return [url.split('/')[-1] for url in stdout.splitlines()]
def Delete(bucket, remote_path):
url = 'gs://%s/%s' % (bucket, remote_path)
logging.info('Deleting %s' % url)
_RunCommand(['rm', url])
def Get(bucket, remote_path, local_path):
url = 'gs://%s/%s' % (bucket, remote_path)
logging.info('Downloading %s to %s' % (url, local_path))
_RunCommand(['cp', url, local_path])
def Insert(bucket, remote_path, local_path):
url = 'gs://%s/%s' % (bucket, remote_path)
logging.info('Uploading %s to %s' % (local_path, url))
_RunCommand(['cp', local_path, url])
def GetIfChanged(bucket, file_path):
"""Gets the file at file_path if it has a hash file that doesn't match.
If the file is not in Cloud Storage, log a warning instead of raising an
exception. We assume that the user just hasn't uploaded the file yet.
Returns:
True if the binary was changed.
"""
hash_path = file_path + '.sha1'
if not os.path.exists(hash_path):
return False
with open(hash_path, 'rb') as f:
expected_hash = f.read(1024).rstrip()
if os.path.exists(file_path) and GetHash(file_path) == expected_hash:
return False
try:
Get(bucket, expected_hash, file_path)
except NotFoundError:
logging.warning('Unable to update file %s from Cloud Storage.' % file_path)
return True
def GetHash(file_path):
"""Calculates and returns the hash of the file at file_path."""
sha1 = hashlib.sha1()
with open(file_path, 'rb') as f:
while True:
# Read in 1mb chunks, so it doesn't all have to be loaded into memory.
chunk = f.read(1024*1024)
if not chunk:
break
sha1.update(chunk)
return sha1.hexdigest() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_switch_interface
short_description: Configure software switch interfaces by grouping physical and WiFi interfaces in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and switch_interface category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_switch_interface:
description:
- Configure software switch interfaces by grouping physical and WiFi interfaces.
default: null
type: dict
suboptions:
intra_switch_policy:
description:
- Allow any traffic between switch interfaces or require firewall policies to allow traffic between switch interfaces.
type: str
choices:
- implicit
- explicit
member:
description:
- Names of the interfaces that belong to the virtual switch.
type: list
suboptions:
interface_name:
description:
- Physical interface name. Source system.interface.name.
type: str
name:
description:
- Interface name (name cannot be in use by any other interfaces, VLANs, or inter-VDOM links).
required: true
type: str
span:
description:
- Enable/disable port spanning. Port spanning echoes traffic received by the software switch to the span destination port.
type: str
choices:
- disable
- enable
span_dest_port:
description:
- SPAN destination port name. All traffic on the SPAN source ports is echoed to the SPAN destination port. Source system.interface.name.
type: str
span_direction:
description:
- "The direction in which the SPAN port operates, either: rx, tx, or both."
type: str
choices:
- rx
- tx
- both
span_source_port:
description:
- Physical interface name. Port spanning echoes all traffic on the SPAN source ports to the SPAN destination port.
type: list
suboptions:
interface_name:
description:
- Physical interface name. Source system.interface.name.
type: str
type:
description:
- "Type of switch based on functionality: switch for normal functionality, or hub to duplicate packets to all port members."
type: str
choices:
- switch
- hub
vdom:
description:
- VDOM that the software switch belongs to. Source system.vdom.name.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure software switch interfaces by grouping physical and WiFi interfaces.
fortios_system_switch_interface:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_switch_interface:
intra_switch_policy: "implicit"
member:
-
interface_name: "<your_own_value> (source system.interface.name)"
name: "default_name_6"
span: "disable"
span_dest_port: "<your_own_value> (source system.interface.name)"
span_direction: "rx"
span_source_port:
-
interface_name: "<your_own_value> (source system.interface.name)"
type: "switch"
vdom: "<your_own_value> (source system.vdom.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_switch_interface_data(json):
option_list = ['intra_switch_policy', 'member', 'name',
'span', 'span_dest_port', 'span_direction',
'span_source_port', 'type', 'vdom']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_switch_interface(data, fos):
vdom = data['vdom']
state = data['state']
system_switch_interface_data = data['system_switch_interface']
filtered_data = underscore_to_hyphen(filter_system_switch_interface_data(system_switch_interface_data))
if state == "present":
return fos.set('system',
'switch-interface',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system',
'switch-interface',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_switch_interface']:
resp = system_switch_interface(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_switch_interface": {
"required": False, "type": "dict", "default": None,
"options": {
"intra_switch_policy": {"required": False, "type": "str",
"choices": ["implicit", "explicit"]},
"member": {"required": False, "type": "list",
"options": {
"interface_name": {"required": False, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"span": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"span_dest_port": {"required": False, "type": "str"},
"span_direction": {"required": False, "type": "str",
"choices": ["rx", "tx", "both"]},
"span_source_port": {"required": False, "type": "list",
"options": {
"interface_name": {"required": False, "type": "str"}
}},
"type": {"required": False, "type": "str",
"choices": ["switch", "hub"]},
"vdom": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""sdist tests"""
import locale
import os
import shutil
import sys
import tempfile
import unittest
import unicodedata
import re
from setuptools.tests import environment, test_svn
from setuptools.tests.py26compat import skipIf
from setuptools.compat import StringIO, unicode
from setuptools.tests.py26compat import skipIf
from setuptools.command.sdist import sdist, walk_revctrl
from setuptools.command.egg_info import manifest_maker
from setuptools.dist import Distribution
from setuptools import svn_utils
SETUP_ATTRS = {
'name': 'sdist_test',
'version': '0.0',
'packages': ['sdist_test'],
'package_data': {'sdist_test': ['*.txt']}
}
SETUP_PY = """\
from setuptools import setup
setup(**%r)
""" % SETUP_ATTRS
if sys.version_info >= (3,):
LATIN1_FILENAME = 'smörbröd.py'.encode('latin-1')
else:
LATIN1_FILENAME = 'sm\xf6rbr\xf6d.py'
# Cannot use context manager because of Python 2.4
def quiet():
global old_stdout, old_stderr
old_stdout, old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = StringIO(), StringIO()
def unquiet():
sys.stdout, sys.stderr = old_stdout, old_stderr
# Fake byte literals for Python <= 2.5
def b(s, encoding='utf-8'):
if sys.version_info >= (3,):
return s.encode(encoding)
return s
# Convert to POSIX path
def posix(path):
if sys.version_info >= (3,) and not isinstance(path, str):
return path.replace(os.sep.encode('ascii'), b('/'))
else:
return path.replace(os.sep, '/')
# HFS Plus uses decomposed UTF-8
def decompose(path):
if isinstance(path, unicode):
return unicodedata.normalize('NFD', path)
try:
path = path.decode('utf-8')
path = unicodedata.normalize('NFD', path)
path = path.encode('utf-8')
except UnicodeError:
pass # Not UTF-8
return path
class TestSdistTest(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
f = open(os.path.join(self.temp_dir, 'setup.py'), 'w')
f.write(SETUP_PY)
f.close()
# Set up the rest of the test package
test_pkg = os.path.join(self.temp_dir, 'sdist_test')
os.mkdir(test_pkg)
# *.rst was not included in package_data, so c.rst should not be
# automatically added to the manifest when not under version control
for fname in ['__init__.py', 'a.txt', 'b.txt', 'c.rst']:
# Just touch the files; their contents are irrelevant
open(os.path.join(test_pkg, fname), 'w').close()
self.old_cwd = os.getcwd()
os.chdir(self.temp_dir)
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.temp_dir)
def test_package_data_in_sdist(self):
"""Regression test for pull request #4: ensures that files listed in
package_data are included in the manifest even if they're not added to
version control.
"""
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# squelch output
quiet()
try:
cmd.run()
finally:
unquiet()
manifest = cmd.filelist.files
self.assertTrue(os.path.join('sdist_test', 'a.txt') in manifest)
self.assertTrue(os.path.join('sdist_test', 'b.txt') in manifest)
self.assertTrue(os.path.join('sdist_test', 'c.rst') not in manifest)
def test_manifest_is_written_with_utf8_encoding(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# UTF-8 filename
filename = os.path.join('sdist_test', 'smörbröd.py')
# Add UTF-8 filename and write manifest
quiet()
try:
mm.run()
mm.filelist.files.append(filename)
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
u_contents = contents.decode('UTF-8')
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
# The manifest should contain the UTF-8 filename
if sys.version_info >= (3,):
self.assertTrue(posix(filename) in u_contents)
else:
self.assertTrue(posix(filename) in contents)
# Python 3 only
if sys.version_info >= (3,):
def test_write_manifest_allows_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# UTF-8 filename
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
# Add filename and write manifest
quiet()
try:
mm.run()
u_filename = filename.decode('utf-8')
mm.filelist.files.append(u_filename)
# Re-write manifest
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
contents.decode('UTF-8')
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
# The manifest should contain the UTF-8 filename
self.assertTrue(posix(filename) in contents)
# The filelist should have been updated as well
self.assertTrue(u_filename in mm.filelist.files)
def test_write_manifest_skips_non_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# Latin-1 filename
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
# Add filename with surrogates and write manifest
quiet()
try:
mm.run()
u_filename = filename.decode('utf-8', 'surrogateescape')
mm.filelist.files.append(u_filename)
# Re-write manifest
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
contents.decode('UTF-8')
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
# The Latin-1 filename should have been skipped
self.assertFalse(posix(filename) in contents)
# The filelist should have been updated as well
self.assertFalse(u_filename in mm.filelist.files)
def test_manifest_is_read_with_utf8_encoding(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Create manifest
quiet()
try:
cmd.run()
finally:
unquiet()
# Add UTF-8 filename to manifest
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
manifest = open(cmd.manifest, 'ab')
manifest.write(b('\n')+filename)
manifest.close()
# The file must exist to be included in the filelist
open(filename, 'w').close()
# Re-read manifest
cmd.filelist.files = []
quiet()
try:
cmd.read_manifest()
finally:
unquiet()
# The filelist should contain the UTF-8 filename
if sys.version_info >= (3,):
filename = filename.decode('utf-8')
self.assertTrue(filename in cmd.filelist.files)
# Python 3 only
if sys.version_info >= (3,):
def test_read_manifest_skips_non_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Create manifest
quiet()
try:
cmd.run()
finally:
unquiet()
# Add Latin-1 filename to manifest
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
manifest = open(cmd.manifest, 'ab')
manifest.write(b('\n')+filename)
manifest.close()
# The file must exist to be included in the filelist
open(filename, 'w').close()
# Re-read manifest
cmd.filelist.files = []
quiet()
try:
try:
cmd.read_manifest()
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
finally:
unquiet()
# The Latin-1 filename should have been skipped
filename = filename.decode('latin-1')
self.assertFalse(filename in cmd.filelist.files)
@skipIf(sys.version_info >= (3,) and locale.getpreferredencoding() != 'UTF-8',
'Unittest fails if locale is not utf-8 but the manifests is recorded correctly')
def test_sdist_with_utf8_encoded_filename(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# UTF-8 filename
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
open(filename, 'w').close()
quiet()
try:
cmd.run()
finally:
unquiet()
if sys.platform == 'darwin':
filename = decompose(filename)
if sys.version_info >= (3,):
fs_enc = sys.getfilesystemencoding()
if sys.platform == 'win32':
if fs_enc == 'cp1252':
# Python 3 mangles the UTF-8 filename
filename = filename.decode('cp1252')
self.assertTrue(filename in cmd.filelist.files)
else:
filename = filename.decode('mbcs')
self.assertTrue(filename in cmd.filelist.files)
else:
filename = filename.decode('utf-8')
self.assertTrue(filename in cmd.filelist.files)
else:
self.assertTrue(filename in cmd.filelist.files)
def test_sdist_with_latin1_encoded_filename(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Latin-1 filename
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
open(filename, 'w').close()
self.assertTrue(os.path.isfile(filename))
quiet()
try:
cmd.run()
finally:
unquiet()
if sys.version_info >= (3,):
#not all windows systems have a default FS encoding of cp1252
if sys.platform == 'win32':
# Latin-1 is similar to Windows-1252 however
# on mbcs filesys it is not in latin-1 encoding
fs_enc = sys.getfilesystemencoding()
if fs_enc == 'mbcs':
filename = filename.decode('mbcs')
else:
filename = filename.decode('latin-1')
self.assertTrue(filename in cmd.filelist.files)
else:
# The Latin-1 filename should have been skipped
filename = filename.decode('latin-1')
self.assertFalse(filename in cmd.filelist.files)
else:
# No conversion takes place under Python 2 and the file
# is included. We shall keep it that way for BBB.
self.assertTrue(filename in cmd.filelist.files)
class TestDummyOutput(environment.ZippedEnvironment):
def setUp(self):
self.datafile = os.path.join('setuptools', 'tests',
'svn_data', "dummy.zip")
self.dataname = "dummy"
super(TestDummyOutput, self).setUp()
def _run(self):
code, data = environment.run_setup_py(["sdist"],
pypath=self.old_cwd,
data_stream=0)
if code:
info = "DIR: " + os.path.abspath('.')
info += "\n SDIST RETURNED: %i\n\n" % code
info += data
raise AssertionError(info)
datalines = data.splitlines()
possible = (
"running sdist",
"running egg_info",
"creating dummy\.egg-info",
"writing dummy\.egg-info",
"writing top-level names to dummy\.egg-info",
"writing dependency_links to dummy\.egg-info",
"writing manifest file 'dummy\.egg-info",
"reading manifest file 'dummy\.egg-info",
"reading manifest template 'MANIFEST\.in'",
"writing manifest file 'dummy\.egg-info",
"creating dummy-0.1.1",
"making hard links in dummy-0\.1\.1",
"copying files to dummy-0\.1\.1",
"copying \S+ -> dummy-0\.1\.1",
"copying dummy",
"copying dummy\.egg-info",
"hard linking \S+ -> dummy-0\.1\.1",
"hard linking dummy",
"hard linking dummy\.egg-info",
"Writing dummy-0\.1\.1",
"creating dist",
"creating 'dist",
"Creating tar archive",
"running check",
"adding 'dummy-0\.1\.1",
"tar .+ dist/dummy-0\.1\.1\.tar dummy-0\.1\.1",
"gzip .+ dist/dummy-0\.1\.1\.tar",
"removing 'dummy-0\.1\.1' \\(and everything under it\\)",
)
print(" DIR: " + os.path.abspath('.'))
for line in datalines:
found = False
for pattern in possible:
if re.match(pattern, line):
print(" READ: " + line)
found = True
break
if not found:
raise AssertionError("Unexpexected: %s\n-in-\n%s"
% (line, data))
return data
def test_sources(self):
self._run()
class TestSvn(environment.ZippedEnvironment):
def setUp(self):
version = svn_utils.SvnInfo.get_svn_version()
if not version: # None or Empty
return
self.base_version = tuple([int(x) for x in version.split('.')][:2])
if not self.base_version:
raise ValueError('No SVN tools installed')
elif self.base_version < (1, 3):
raise ValueError('Insufficient SVN Version %s' % version)
elif self.base_version >= (1, 9):
#trying the latest version
self.base_version = (1, 8)
self.dataname = "svn%i%i_example" % self.base_version
self.datafile = os.path.join('setuptools', 'tests',
'svn_data', self.dataname + ".zip")
super(TestSvn, self).setUp()
@skipIf(not test_svn._svn_check, "No SVN to text, in the first place")
def test_walksvn(self):
if self.base_version >= (1, 6):
folder2 = 'third party2'
folder3 = 'third party3'
else:
folder2 = 'third_party2'
folder3 = 'third_party3'
#TODO is this right
expected = set([
os.path.join('a file'),
os.path.join(folder2, 'Changes.txt'),
os.path.join(folder2, 'MD5SUMS'),
os.path.join(folder2, 'README.txt'),
os.path.join(folder3, 'Changes.txt'),
os.path.join(folder3, 'MD5SUMS'),
os.path.join(folder3, 'README.txt'),
os.path.join(folder3, 'TODO.txt'),
os.path.join(folder3, 'fin'),
os.path.join('third_party', 'README.txt'),
os.path.join('folder', folder2, 'Changes.txt'),
os.path.join('folder', folder2, 'MD5SUMS'),
os.path.join('folder', folder2, 'WatashiNiYomimasu.txt'),
os.path.join('folder', folder3, 'Changes.txt'),
os.path.join('folder', folder3, 'fin'),
os.path.join('folder', folder3, 'MD5SUMS'),
os.path.join('folder', folder3, 'oops'),
os.path.join('folder', folder3, 'WatashiNiYomimasu.txt'),
os.path.join('folder', folder3, 'ZuMachen.txt'),
os.path.join('folder', 'third_party', 'WatashiNiYomimasu.txt'),
os.path.join('folder', 'lalala.txt'),
os.path.join('folder', 'quest.txt'),
# The example will have a deleted file
# (or should) but shouldn't return it
])
self.assertEqual(set(x for x in walk_revctrl()), expected)
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_serverautoscalepolicy
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of ServerAutoScalePolicy Avi RESTful Object
description:
- This module is used to configure ServerAutoScalePolicy object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
description:
description:
- User defined description for the object.
intelligent_autoscale:
description:
- Use avi intelligent autoscale algorithm where autoscale is performed by comparing load on the pool against estimated capacity of all the servers.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
intelligent_scalein_margin:
description:
- Maximum extra capacity as percentage of load used by the intelligent scheme.
- Scalein is triggered when available capacity is more than this margin.
- Allowed values are 1-99.
- Default value when not specified in API or module is interpreted by Avi Controller as 40.
intelligent_scaleout_margin:
description:
- Minimum extra capacity as percentage of load used by the intelligent scheme.
- Scaleout is triggered when available capacity is less than this margin.
- Allowed values are 1-99.
- Default value when not specified in API or module is interpreted by Avi Controller as 20.
max_scalein_adjustment_step:
description:
- Maximum number of servers to scalein simultaneously.
- The actual number of servers to scalein is chosen such that target number of servers is always more than or equal to the min_size.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
max_scaleout_adjustment_step:
description:
- Maximum number of servers to scaleout simultaneously.
- The actual number of servers to scaleout is chosen such that target number of servers is always less than or equal to the max_size.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
max_size:
description:
- Maximum number of servers after scaleout.
- Allowed values are 0-400.
min_size:
description:
- No scale-in happens once number of operationally up servers reach min_servers.
- Allowed values are 0-400.
name:
description:
- Name of the object.
required: true
scalein_alertconfig_refs:
description:
- Trigger scalein when alerts due to any of these alert configurations are raised.
- It is a reference to an object of type alertconfig.
scalein_cooldown:
description:
- Cooldown period during which no new scalein is triggered to allow previous scalein to successfully complete.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
- Units(SEC).
scaleout_alertconfig_refs:
description:
- Trigger scaleout when alerts due to any of these alert configurations are raised.
- It is a reference to an object of type alertconfig.
scaleout_cooldown:
description:
- Cooldown period during which no new scaleout is triggered to allow previous scaleout to successfully complete.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
- Units(SEC).
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
use_predicted_load:
description:
- Use predicted load rather than current load.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create ServerAutoScalePolicy object
avi_serverautoscalepolicy:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_serverautoscalepolicy
"""
RETURN = '''
obj:
description: ServerAutoScalePolicy (api/serverautoscalepolicy) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
description=dict(type='str',),
intelligent_autoscale=dict(type='bool',),
intelligent_scalein_margin=dict(type='int',),
intelligent_scaleout_margin=dict(type='int',),
max_scalein_adjustment_step=dict(type='int',),
max_scaleout_adjustment_step=dict(type='int',),
max_size=dict(type='int',),
min_size=dict(type='int',),
name=dict(type='str', required=True),
scalein_alertconfig_refs=dict(type='list',),
scalein_cooldown=dict(type='int',),
scaleout_alertconfig_refs=dict(type='list',),
scaleout_cooldown=dict(type='int',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
use_predicted_load=dict(type='bool',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'serverautoscalepolicy',
set([]))
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
package usergroup
import (
"github.com/moby/sys/user"
)
const (
subuidFileName = "/etc/subuid"
subgidFileName = "/etc/subgid"
)
func parseSubuid(username string) ([]user.SubID, error) {
return user.ParseSubIDFileFilter(subuidFileName, func(sid user.SubID) bool {
return sid.Name == username
})
}
func parseSubgid(username string) ([]user.SubID, error) {
return user.ParseSubIDFileFilter(subgidFileName, func(sid user.SubID) bool {
return sid.Name == username
})
} | go | github | https://github.com/moby/moby | daemon/internal/usergroup/parser.go |
#ifndef JEMALLOC_INTERNAL_ATOMIC_C11_H
#define JEMALLOC_INTERNAL_ATOMIC_C11_H
#include <stdatomic.h>
#define ATOMIC_INIT(...) ATOMIC_VAR_INIT(__VA_ARGS__)
#define atomic_memory_order_t memory_order
#define atomic_memory_order_relaxed memory_order_relaxed
#define atomic_memory_order_acquire memory_order_acquire
#define atomic_memory_order_release memory_order_release
#define atomic_memory_order_acq_rel memory_order_acq_rel
#define atomic_memory_order_seq_cst memory_order_seq_cst
#define atomic_fence atomic_thread_fence
#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \
/* unused */ lg_size) \
typedef _Atomic(type) atomic_##short_type##_t; \
\
ATOMIC_INLINE type \
atomic_load_##short_type(const atomic_##short_type##_t *a, \
atomic_memory_order_t mo) { \
/* \
* A strict interpretation of the C standard prevents \
* atomic_load from taking a const argument, but it's \
* convenient for our purposes. This cast is a workaround. \
*/ \
atomic_##short_type##_t* a_nonconst = \
(atomic_##short_type##_t*)a; \
return atomic_load_explicit(a_nonconst, mo); \
} \
\
ATOMIC_INLINE void \
atomic_store_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
atomic_store_explicit(a, val, mo); \
} \
\
ATOMIC_INLINE type \
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
atomic_memory_order_t mo) { \
return atomic_exchange_explicit(a, val, mo); \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
return atomic_compare_exchange_weak_explicit(a, expected, \
desired, success_mo, failure_mo); \
} \
\
ATOMIC_INLINE bool \
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \
return atomic_compare_exchange_strong_explicit(a, expected, \
desired, success_mo, failure_mo); \
}
/*
* Integral types have some special operations available that non-integral ones
* lack.
*/
#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \
/* unused */ lg_size) \
JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \
\
ATOMIC_INLINE type \
atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_add_explicit(a, val, mo); \
} \
\
ATOMIC_INLINE type \
atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_sub_explicit(a, val, mo); \
} \
ATOMIC_INLINE type \
atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_and_explicit(a, val, mo); \
} \
ATOMIC_INLINE type \
atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_or_explicit(a, val, mo); \
} \
ATOMIC_INLINE type \
atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
type val, atomic_memory_order_t mo) { \
return atomic_fetch_xor_explicit(a, val, mo); \
}
#endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */ | c | github | https://github.com/redis/redis | deps/jemalloc/include/jemalloc/internal/atomic_c11.h |
# -*- coding: utf-8 -*-
""" S3 Logging Facility
@copyright: (c) 2015 Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import sys
from gluon import current
# =============================================================================
class S3Log(object):
"""
Simple global logging facility, called like:
current.log.error("Something went wrong", value="Example")
gives:
2014-02-16 11:58:41 S3LOG ERROR: Something went wrong: Example
Configurable in 000_config.py (set up in models/00_db.py)
- to include caller details (file name, line number, function name):
2014-02-16 11:58:23 (applications/eden/modules/s3/s3rest.py 477 __init__)
ERROR: Something went wrong: Example
- to write to console (sys.stderr), to a log file, or both.
Configuration see modules/s3cfg.py.
"""
def __init__(self):
"""
Constructor
"""
settings = current.deployment_settings
log_level = settings.get_log_level()
if log_level is None:
self.critical = \
self.error = \
self.warning = \
self.info = \
self.debug = self.ignore
self.log_level = 100
else:
try:
level = getattr(logging, log_level.upper())
except AttributeError:
raise SyntaxError("Invalid settings.log.level: %s" % log_level)
self.log_level = level
self.critical = self._critical \
if level <= logging.CRITICAL else self.ignore
self.error = self._error \
if level <= logging.ERROR else self.ignore
self.warning = self._warning \
if level <= logging.WARNING else self.ignore
self.info = self._info \
if level <= logging.INFO else self.ignore
self.debug = self._debug \
if level <= logging.DEBUG else self.ignore
self.configure_logger()
# -------------------------------------------------------------------------
@classmethod
def setup(cls):
"""
Set up current.log
"""
if hasattr(current, "log"):
return
current.log = cls()
return
# -------------------------------------------------------------------------
def configure_logger(self):
"""
Configure output handlers
"""
if hasattr(current, "log"):
return
settings = current.deployment_settings
console = settings.get_log_console()
logfile = settings.get_log_logfile()
if not console and not logfile:
# No point to log without output channel
self.critical = \
self.error = \
self.warning = \
self.info = \
self.debug = self.ignore
return
logger = logging.getLogger(__name__)
logger.propagate = False
logger.setLevel(self.log_level)
logger.handlers = []
m_format = "%(asctime)s %(caller)s %(levelname)s: %(message)s"
d_format = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(m_format, d_format)
# Set up console handler
if console:
console_handler = logging.StreamHandler(sys.stderr)
console_handler.setFormatter(formatter)
console_handler.setLevel(self.log_level)
logger.addHandler(console_handler)
# Set up log file handler
if logfile:
from logging.handlers import RotatingFileHandler
MAXBYTES = 1048576
logfile_handler = RotatingFileHandler(logfile,
maxBytes = MAXBYTES,
backupCount = 3)
logfile_handler.setFormatter(formatter)
logfile_handler.setLevel(self.log_level)
logger.addHandler(logfile_handler)
return
# -------------------------------------------------------------------------
@staticmethod
def ignore(message, value=None):
"""
Dummy to ignore messages below minimum severity level
"""
return
# -------------------------------------------------------------------------
@staticmethod
def recorder():
"""
Return a recording facility for log messages
"""
return S3LogRecorder()
# -------------------------------------------------------------------------
@staticmethod
def _log(severity, message, value=None):
"""
Log a message
@param severity: the severity of the message
@param message: the message
@param value: message suffix (optional)
"""
logger = logging.getLogger(__name__)
logger.propagate = False
msg = "%s: %s" % (message, value) if value else message
extra = {"caller": "S3LOG"}
if current.deployment_settings.get_log_caller_info():
caller = logger.findCaller()
if caller:
extra = {"caller": "(%s %s %s)" % caller}
logger.log(severity, msg, extra=extra)
return
# -------------------------------------------------------------------------
@classmethod
def _critical(cls, message, value=None):
"""
Log a critical message (highest severity level),
called via current.log.critical()
@param message: the message
@param value: message suffix (optional)
"""
cls._log(logging.CRITICAL, message, value=value)
# -------------------------------------------------------------------------
@classmethod
def _error(cls, message, value=None):
"""
Log an error message,
called via current.log.error()
@param message: the message
@param value: message suffix (optional)
"""
cls._log(logging.ERROR, message, value=value)
# -------------------------------------------------------------------------
@classmethod
def _warning(cls, message, value=None):
"""
Log a warning message,
called via current.log.warning()
@param message: the message
@param value: message suffix (optional)
"""
cls._log(logging.WARNING, message, value=value)
# -------------------------------------------------------------------------
@classmethod
def _info(cls, message, value=None):
"""
Log an general info message,
called via current.log.info()
@param message: the message
@param value: message suffix (optional)
"""
cls._log(logging.INFO, message, value=value)
# -------------------------------------------------------------------------
@classmethod
def _debug(cls, message, value=None):
"""
Log a detailed debug message (lowest severity level),
called via current.log.debug()
@param message: the message
@param value: message suffix (optional)
"""
cls._log(logging.DEBUG, message, value=value)
# =============================================================================
class S3LogRecorder(object):
"""
S3Log recorder, simple facility to record log messages for tests
Start:
recorder = current.log.recorder()
Read out messages:
messages = recorder.read()
Stop recording:
recorder.stop()
Re-start recording:
recorder.listen()
Clear messages buffer:
recorder.clear()
"""
def __init__(self):
self.handler = None
self.strbuf = None
self.listen()
# -------------------------------------------------------------------------
def listen(self):
""" Start recording S3Log messages """
if self.handler is not None:
return
strbuf = self.strbuf
if strbuf is None:
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
strbuf = StringIO()
handler = logging.StreamHandler(strbuf)
logger = logging.getLogger(__name__)
logger.addHandler(handler)
self.handler = handler
self.strbuf = strbuf
return
# -------------------------------------------------------------------------
def read(self):
""" Read out recorded S3Log messages """
strbuf = self.strbuf
if strbuf is None:
return ""
handler = self.handler
if handler is not None:
handler.flush()
return strbuf.getvalue()
# -------------------------------------------------------------------------
def stop(self):
""" Stop recording S3Log messages (and return the messages) """
handler = self.handler
if handler is not None:
logger = logging.getLogger(__name__)
logger.removeHandler(handler)
handler.close()
self.handler = None
strbuf = self.strbuf
if strbuf is not None:
return strbuf.getvalue()
else:
return ""
# -------------------------------------------------------------------------
def clear(self):
""" Clear the messages buffer """
if self.handler is not None:
on = True
self.stop()
else:
on = False
strbuf = self.strbuf
if strbuf is not None:
strbuf.close()
self.strbuf = None
if on:
self.listen()
# END ========================================================================= | unknown | codeparrot/codeparrot-clean | ||
import pytest
from thefuck.rules.fab_command_not_found import match, get_new_command
from thefuck.types import Command
output = '''
Warning: Command(s) not found:
extenson
deloyp
Available commands:
update_config
prepare_extension
Template A string class for supporting $-substitutions.
deploy
glob Return a list of paths matching a pathname pattern.
install_web
set_version
'''
@pytest.mark.parametrize('command', [
Command('fab extenson', output),
Command('fab deloyp', output),
Command('fab extenson deloyp', output)])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command('gulp extenson', output),
Command('fab deloyp', '')])
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize('script, result', [
('fab extenson', 'fab prepare_extension'),
('fab extenson:version=2016',
'fab prepare_extension:version=2016'),
('fab extenson:version=2016 install_web set_version:val=0.5.0',
'fab prepare_extension:version=2016 install_web set_version:val=0.5.0'),
('fab extenson:version=2016 deloyp:beta=true -H the.fuck',
'fab prepare_extension:version=2016 deploy:beta=true -H the.fuck'),
])
def test_get_new_command(script, result):
command = Command(script, output)
assert get_new_command(command) == result | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### DynamicRnnEstimator
An `Estimator` that uses a recurrent neural network with dynamic unrolling.
```python
problem_type = ProblemType.CLASSIFICATION # or REGRESSION
prediction_type = PredictionType.SINGLE_VALUE # or MULTIPLE_VALUE
estimator = DynamicRnnEstimator(problem_type,
prediction_type,
my_feature_columns)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.dynamic_rnn_estimator import DynamicRnnEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import GraphRewriteSpec
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.head import binary_svm_head
from tensorflow.contrib.learn.python.learn.estimators.head import Head
from tensorflow.contrib.learn.python.learn.estimators.head import loss_only_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_class_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_label_head
from tensorflow.contrib.learn.python.learn.estimators.head import no_op_train_fn
from tensorflow.contrib.learn.python.learn.estimators.head import poisson_regression_head
from tensorflow.contrib.learn.python.learn.estimators.head import regression_head
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearEstimator
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.rnn_common import PredictionType
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import action_runner
from telemetry.page import page_test
from telemetry.timeline.model import TimelineModel
from telemetry.timeline import tracing_config
from telemetry.value import trace
from telemetry.web_perf import smooth_gesture_util
from telemetry.web_perf import timeline_interaction_record as tir_module
RUN_SMOOTH_ACTIONS = 'RunSmoothAllActions'
class TimelineController(object):
def __init__(self, enable_auto_issuing_record=True):
super(TimelineController, self).__init__()
self.trace_categories = None
self._model = None
self._renderer_process = None
self._smooth_records = []
self._interaction = None
self._enable_auto_issuing_record = enable_auto_issuing_record
def SetUp(self, page, tab):
"""Starts gathering timeline data.
"""
# Resets these member variables incase this object is reused.
self._model = None
self._renderer_process = None
if not tab.browser.platform.tracing_controller.IsChromeTracingSupported():
raise Exception('Not supported')
config = tracing_config.TracingConfig()
config.tracing_category_filter.AddFilterString(self.trace_categories)
for delay in page.GetSyntheticDelayCategories():
config.tracing_category_filter.AddSyntheticDelay(delay)
config.enable_chrome_trace = True
tab.browser.platform.tracing_controller.StartTracing(config)
def Start(self, tab):
# Start the smooth marker for all actions.
runner = action_runner.ActionRunner(tab)
if self._enable_auto_issuing_record:
self._interaction = runner.CreateInteraction(
RUN_SMOOTH_ACTIONS)
self._interaction.Begin()
def Stop(self, tab, results):
# End the smooth marker for all actions.
if self._enable_auto_issuing_record:
self._interaction.End()
# Stop tracing.
timeline_data = tab.browser.platform.tracing_controller.StopTracing()
results.AddValue(trace.TraceValue(
results.current_page, timeline_data))
self._model = TimelineModel(timeline_data)
self._renderer_process = self._model.GetRendererProcessFromTabId(tab.id)
renderer_thread = self.model.GetRendererThreadFromTabId(tab.id)
run_smooth_actions_record = None
self._smooth_records = []
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
r = tir_module.TimelineInteractionRecord.FromAsyncEvent(event)
if r.label == RUN_SMOOTH_ACTIONS:
assert run_smooth_actions_record is None, (
'TimelineController cannot issue more than 1 %s record' %
RUN_SMOOTH_ACTIONS)
run_smooth_actions_record = r
else:
self._smooth_records.append(
smooth_gesture_util.GetAdjustedInteractionIfContainGesture(
self.model, r))
# If there is no other smooth records, we make measurements on time range
# marked by timeline_controller itself.
# TODO(nednguyen): when crbug.com/239179 is marked fixed, makes sure that
# page sets are responsible for issueing the markers themselves.
if len(self._smooth_records) == 0 and run_smooth_actions_record:
self._smooth_records = [run_smooth_actions_record]
if len(self._smooth_records) == 0:
raise page_test.Failure('No interaction record was created.')
def CleanUp(self, platform):
if platform.tracing_controller.is_tracing_running:
platform.tracing_controller.StopTracing()
@property
def model(self):
return self._model
@property
def renderer_process(self):
return self._renderer_process
@property
def smooth_records(self):
return self._smooth_records | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_socialprofiles
# Purpose: Obtains social media profiles of any identified human names.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 12/04/2014
# Copyright: (c) Steve Micallef 2014
# Licence: GPL
# -------------------------------------------------------------------------------
import random
import re
import time
import urllib
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
sites = {
# Search string to use, domain name the profile will sit on within
# those search results.
"Facebook": ['+intitle:%22{0}%22%20+site:facebook.com',
'"(https?://[a-z\.]*facebook.[a-z\.]+/[^\"<> ]+)"'],
"Google+": ['+intitle:%22{0}%22%20+site:plus.google.com',
'"(https?://plus.google.[a-z\.]+/\d+[^\"<>\/ ]+)"'],
"LinkedIn": ['+intitle:%22{0}%22%20+site:linkedin.com',
'"(https?://[a-z\.]*linkedin.[a-z\.]+/[^\"<> ]+)"']
}
class sfp_socialprofiles(SpiderFootPlugin):
"""Social Media Profiles:Identify the social media profiles for human names identified."""
# Default options
opts = {
'pages': 1,
'method': "yahoo",
'tighten': True
}
# Option descriptions
optdescs = {
'pages': "Number of search engine pages of identified profiles to iterate through.",
'tighten': "Tighten results by expecting to find the keyword of the target domain mentioned in the social media profile page results?",
'method': "Search engine to use: google, yahoo or bing."
}
keywords = None
results = list()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = list()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["HUMAN_NAME"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["SOCIAL_MEDIA", "SEARCH_ENGINE_WEB_CONTENT"]
def yahooCleaner(self, string):
ret = "\"" + urllib.unquote(string.group(1)) + "\""
return ret
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
self.currentEventSrc = event
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
# Don't look up stuff twice
if eventData in self.results:
self.sf.debug("Skipping " + eventData + " as already mapped.")
return None
else:
self.results.append(eventData)
if self.keywords is None:
self.keywords = self.sf.domainKeywords(self.getTarget().getNames(),
self.opts['_internettlds'])
for site in sites.keys():
s = unicode(sites[site][0]).format(eventData)
searchStr = s.replace(" ", "%20")
searchDom = sites[site][1]
if self.opts['method'].lower() == "google":
results = self.sf.googleIterate(searchStr, dict(limit=self.opts['pages'],
useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout']))
if self.opts['method'].lower() == "yahoo":
results = self.sf.yahooIterate(searchStr, dict(limit=self.opts['pages'],
useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout']))
if self.opts['method'].lower() == "bing":
results = self.sf.bingIterate(searchStr, dict(limit=self.opts['pages'],
useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout']))
if results is None:
self.sf.info("No data returned from " + self.opts['method'] + ".")
return None
if self.checkForStop():
return None
pauseSecs = random.randint(4, 15)
self.sf.debug("Pausing for " + str(pauseSecs))
time.sleep(pauseSecs)
for key in results.keys():
instances = list()
# Yahoo requires some additional parsing
if self.opts['method'].lower() == "yahoo":
res = re.sub("RU=(.[^\/]+)\/RK=", self.yahooCleaner,
results[key], 0)
else:
res = results[key]
matches = re.findall(searchDom, res, re.IGNORECASE)
if matches is not None:
for match in matches:
if match in instances:
continue
else:
instances.append(match)
if self.checkForStop():
return None
# Fetch the profile page if we are checking
# for a firm relationship.
if self.opts['tighten']:
pres = self.sf.fetchUrl(match, timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'])
if pres['content'] is None:
continue
else:
found = False
for kw in self.keywords:
if re.search("[^a-zA-Z\-\_]" + kw + "[^a-zA-Z\-\_]", pres['content'], re.IGNORECASE):
found = True
if not found:
continue
self.sf.info("Social Media Profile found at " + site + ": " + match)
evt = SpiderFootEvent("SOCIAL_MEDIA", match,
self.__name__, event)
self.notifyListeners(evt)
# Submit the bing results for analysis
evt = SpiderFootEvent("SEARCH_ENGINE_WEB_CONTENT", res,
self.__name__, event)
self.notifyListeners(evt)
# End of sfp_socialprofiles class | unknown | codeparrot/codeparrot-clean | ||
"""Let's Encrypt compatibility test interfaces"""
import zope.interface
import letsencrypt.interfaces
# pylint: disable=no-self-argument,no-method-argument
class IPluginProxy(zope.interface.Interface):
"""Wraps a Let's Encrypt plugin"""
http_port = zope.interface.Attribute(
"The port to connect to on localhost for HTTP traffic")
https_port = zope.interface.Attribute(
"The port to connect to on localhost for HTTPS traffic")
def add_parser_arguments(cls, parser):
"""Adds command line arguments needed by the parser"""
def __init__(args):
"""Initializes the plugin with the given command line args"""
def cleanup_from_tests():
"""Performs any necessary cleanup from running plugin tests.
This is guaranteed to be called before the program exits.
"""
def has_more_configs():
"""Returns True if there are more configs to test"""
def load_config():
"""Loads the next config and returns its name"""
def get_testable_domain_names():
"""Returns the domain names that can be used in testing"""
class IAuthenticatorProxy(IPluginProxy, letsencrypt.interfaces.IAuthenticator):
"""Wraps a Let's Encrypt authenticator"""
class IInstallerProxy(IPluginProxy, letsencrypt.interfaces.IInstaller):
"""Wraps a Let's Encrypt installer"""
def get_all_names_answer():
"""Returns all names that should be found by the installer"""
class IConfiguratorProxy(IAuthenticatorProxy, IInstallerProxy):
"""Wraps a Let's Encrypt configurator""" | unknown | codeparrot/codeparrot-clean | ||
"""
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid :ref:`calibration <calibration>` changes
predicted probabilities for a 3-class classification problem. Illustrated is
the standard 2-simplex, where the three corners correspond to the three
classes. Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
"""
# %%
# Data
# ----
# Below, we generate a classification dataset with 2000 samples, 2 features
# and 3 target classes. We then split the data as follows:
#
# * train: 600 samples (for training the classifier)
# * valid: 400 samples (for calibrating predicted probabilities)
# * test: 1000 samples
#
# Note that we also create `X_train_valid` and `y_train_valid`, which consists
# of both the train and valid subsets. This is used when we only want to train
# the classifier but not calibrate the predicted probabilities.
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from sklearn.datasets import make_blobs
np.random.seed(0)
X, y = make_blobs(
n_samples=2000, n_features=2, centers=3, random_state=42, cluster_std=5.0
)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:1000], y[600:1000]
X_train_valid, y_train_valid = X[:1000], y[:1000]
X_test, y_test = X[1000:], y[1000:]
# %%
# Fitting and calibration
# -----------------------
#
# First, we will train a :class:`~sklearn.ensemble.RandomForestClassifier`
# with 25 base estimators (trees) on the concatenated train and validation
# data (1000 samples). This is the uncalibrated classifier.
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
# %%
# To train the calibrated classifier, we start with the same
# :class:`~sklearn.ensemble.RandomForestClassifier` but train it using only
# the train data subset (600 samples) then calibrate, with `method='sigmoid'`,
# using the valid data subset (400 samples) in a 2-stage process.
from sklearn.calibration import CalibratedClassifierCV
from sklearn.frozen import FrozenEstimator
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
cal_clf = CalibratedClassifierCV(FrozenEstimator(clf), method="sigmoid")
cal_clf.fit(X_valid, y_valid)
# %%
# Compare probabilities
# ---------------------
# Below we plot a 2-simplex with arrows showing the change in predicted
# probabilities of the test samples.
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
colors = ["r", "g", "b"]
clf_probs = clf.predict_proba(X_test)
cal_clf_probs = cal_clf.predict_proba(X_test)
# Plot arrows
for i in range(clf_probs.shape[0]):
plt.arrow(
clf_probs[i, 0],
clf_probs[i, 1],
cal_clf_probs[i, 0] - clf_probs[i, 0],
cal_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]],
head_width=1e-2,
)
# Plot perfect predictions, at each vertex
plt.plot([1.0], [0.0], "ro", ms=20, label="Class 1")
plt.plot([0.0], [1.0], "go", ms=20, label="Class 2")
plt.plot([0.0], [0.0], "bo", ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], "k", label="Simplex")
# Annotate points 6 points around the simplex, and mid point inside simplex
plt.annotate(
r"($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)",
xy=(1.0 / 3, 1.0 / 3),
xytext=(1.0 / 3, 0.23),
xycoords="data",
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
plt.plot([1.0 / 3], [1.0 / 3], "ko", ms=5)
plt.annotate(
r"($\frac{1}{2}$, $0$, $\frac{1}{2}$)",
xy=(0.5, 0.0),
xytext=(0.5, 0.1),
xycoords="data",
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
plt.annotate(
r"($0$, $\frac{1}{2}$, $\frac{1}{2}$)",
xy=(0.0, 0.5),
xytext=(0.1, 0.5),
xycoords="data",
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
plt.annotate(
r"($\frac{1}{2}$, $\frac{1}{2}$, $0$)",
xy=(0.5, 0.5),
xytext=(0.6, 0.6),
xycoords="data",
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
plt.annotate(
r"($0$, $0$, $1$)",
xy=(0, 0),
xytext=(0.1, 0.1),
xycoords="data",
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
plt.annotate(
r"($1$, $0$, $0$)",
xy=(1, 0),
xytext=(1, 0.1),
xycoords="data",
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
plt.annotate(
r"($0$, $1$, $0$)",
xy=(0, 1),
xytext=(0.1, 1),
xycoords="data",
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
# Add grid
plt.grid(False)
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], "k", alpha=0.2)
plt.plot([0, 0 + (1 - x) / 2], [x, x + (1 - x) / 2], "k", alpha=0.2)
plt.plot([x, x + (1 - x) / 2], [0, 0 + (1 - x) / 2], "k", alpha=0.2)
plt.title("Change of predicted probabilities on test samples after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
_ = plt.legend(loc="best")
# %%
# In the figure above, each vertex of the simplex represents
# a perfectly predicted class (e.g., 1, 0, 0). The mid point
# inside the simplex represents predicting the three classes with equal
# probability (i.e., 1/3, 1/3, 1/3). Each arrow starts at the
# uncalibrated probabilities and end with the arrow head at the calibrated
# probability. The color of the arrow represents the true class of that test
# sample.
#
# The uncalibrated classifier is overly confident in its predictions and
# incurs a large :ref:`log loss <log_loss>`. The calibrated classifier incurs
# a lower :ref:`log loss <log_loss>` due to two factors. First, notice in the
# figure above that the arrows generally point away from the edges of the
# simplex, where the probability of one class is 0. Second, a large proportion
# of the arrows point towards the true class, e.g., green arrows (samples where
# the true class is 'green') generally point towards the green vertex. This
# results in fewer over-confident, 0 predicted probabilities and at the same
# time an increase in the predicted probabilities of the correct class.
# Thus, the calibrated classifier produces more accurate predicted probabilities
# that incur a lower :ref:`log loss <log_loss>`
#
# We can show this objectively by comparing the :ref:`log loss <log_loss>` of
# the uncalibrated and calibrated classifiers on the predictions of the 1000
# test samples. Note that an alternative would have been to increase the number
# of base estimators (trees) of the
# :class:`~sklearn.ensemble.RandomForestClassifier` which would have resulted
# in a similar decrease in :ref:`log loss <log_loss>`.
from sklearn.metrics import log_loss
loss = log_loss(y_test, clf_probs)
cal_loss = log_loss(y_test, cal_clf_probs)
print("Log-loss of:")
print(f" - uncalibrated classifier: {loss:.3f}")
print(f" - calibrated classifier: {cal_loss:.3f}")
# %%
# We can also assess calibration with the Brier score for probabilistics predictions
# (lower is better, possible range is [0, 2]):
from sklearn.metrics import brier_score_loss
loss = brier_score_loss(y_test, clf_probs)
cal_loss = brier_score_loss(y_test, cal_clf_probs)
print("Brier score of")
print(f" - uncalibrated classifier: {loss:.3f}")
print(f" - calibrated classifier: {cal_loss:.3f}")
# %%
# According to the Brier score, the calibrated classifier is not better than
# the original model.
#
# Finally we generate a grid of possible uncalibrated probabilities over
# the 2-simplex, compute the corresponding calibrated probabilities and
# plot arrows for each. The arrows are colored according the highest
# uncalibrated probability. This illustrates the learned calibration map:
plt.figure(figsize=(10, 10))
# Generate grid of probability values
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
# Use the three class-wise calibrators to compute calibrated probabilities
calibrated_classifier = cal_clf.calibrated_classifiers_[0]
prediction = np.vstack(
[
calibrator.predict(this_p)
for calibrator, this_p in zip(calibrated_classifier.calibrators, p.T)
]
).T
# Re-normalize the calibrated predictions to make sure they stay inside the
# simplex. This same renormalization step is performed internally by the
# predict method of CalibratedClassifierCV on multiclass problems.
prediction /= prediction.sum(axis=1)[:, None]
# Plot changes in predicted probabilities induced by the calibrators
for i in range(prediction.shape[0]):
plt.arrow(
p[i, 0],
p[i, 1],
prediction[i, 0] - p[i, 0],
prediction[i, 1] - p[i, 1],
head_width=1e-2,
color=colors[np.argmax(p[i])],
)
# Plot the boundaries of the unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], "k", label="Simplex")
plt.grid(False)
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], "k", alpha=0.2)
plt.plot([0, 0 + (1 - x) / 2], [x, x + (1 - x) / 2], "k", alpha=0.2)
plt.plot([x, x + (1 - x) / 2], [0, 0 + (1 - x) / 2], "k", alpha=0.2)
plt.title("Learned sigmoid calibration map")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
# %%
# One can observe that, on average, the calibrator is pushing highly confident
# predictions away from the boundaries of the simplex while simultaneously
# moving uncertain predictions towards one of three modes, one for each class.
# We can also observe that the mapping is not symmetric. Furthermore some
# arrows seem to cross class assignment boundaries which is not necessarily
# what one would expect from a calibration map as it means that some predicted
# classes will change after calibration.
#
# All in all, the One-vs-Rest multiclass-calibration strategy implemented in
# `CalibratedClassifierCV` should not be trusted blindly. | python | github | https://github.com/scikit-learn/scikit-learn | examples/calibration/plot_calibration_multiclass.py |
it("should allow to import an async module twice", async () => {
const result = await require("./main");
expect(result.default).toBe("hello world, hello world");
}); | javascript | github | https://github.com/webpack/webpack | test/cases/async-modules/double-import/index.js |
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
DEFAULT_CHECKM_COMPLETENESS = 50.0
DEFAULT_CHECKM_CONTAMINATION = 10.0
DEFAULT_QUALITY_THRESHOLD = 50.0
DEFAULT_QUALITY_WEIGHT = 5
# DEPRECATED WITH NEW METHOD
#AAI_CLUSTERING_THRESHOLD = 0.995
AAI_CLUSTERING_THRESHOLD = 0.85
FASTANI_CLUSTERING_THRESHOLD = 0.95
DEFAULT_DOMAIN_THRESHOLD = 10.0
# PARAMETERS FOR EXCEPTION LIST CREATION
EXCEPTION_FILTER_ONE_CHECKM_COMPLETENESS = 50.0
EXCEPTION_FILTER_ONE_CHECKM_CONTAMINATION = 15.0
EXCEPTION_FILTER_ONE_QUALITY_THRESHOLD = 30.0
EXCEPTION_FILTER_TWO_CHECKM_COMPLETENESS = 80.0
EXCEPTION_FILTER_TWO_CHECKM_CONTAMINATION = 10.0
EXCEPTION_FILTER_TWO_QUALITY_THRESHOLD = 50.0 | unknown | codeparrot/codeparrot-clean | ||
# mssql/pyodbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mssql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
Additional Connection Examples
-------------------------------
Examples of pyodbc connection string URLs:
* ``mssql+pyodbc://mydsn`` - connects using the specified DSN named ``mydsn``.
The connection string that is created will appear like::
dsn=mydsn;Trusted_Connection=Yes
* ``mssql+pyodbc://user:pass@mydsn`` - connects using the DSN named
``mydsn`` passing in the ``UID`` and ``PWD`` information. The
connection string that is created will appear like::
dsn=mydsn;UID=user;PWD=pass
* ``mssql+pyodbc://user:pass@mydsn/?LANGUAGE=us_english`` - connects
using the DSN named ``mydsn`` passing in the ``UID`` and ``PWD``
information, plus the additional connection configuration option
``LANGUAGE``. The connection string that is created will appear
like::
dsn=mydsn;UID=user;PWD=pass;LANGUAGE=us_english
* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection
that would appear like::
DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass
* ``mssql+pyodbc://user:pass@host:123/db`` - connects using a connection
string which includes the port
information using the comma syntax. This will create the following
connection string::
DRIVER={SQL Server};Server=host,123;Database=db;UID=user;PWD=pass
* ``mssql+pyodbc://user:pass@host/db?port=123`` - connects using a connection
string that includes the port
information as a separate ``port`` keyword. This will create the
following connection string::
DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass;port=123
* ``mssql+pyodbc://user:pass@host/db?driver=MyDriver`` - connects using a
connection string that includes a custom ODBC driver name. This will create
the following connection string::
DRIVER={MyDriver};Server=host;Database=db;UID=user;PWD=pass
If you require a connection string that is outside the options
presented above, use the ``odbc_connect`` keyword to pass in a
urlencoded connection string. What gets passed in will be urldecoded
and passed directly.
For example::
mssql+pyodbc:///?odbc_connect=dsn%3Dmydsn%3BDatabase%3Ddb
would create the following connection string::
dsn=mydsn;Database=db
Encoding your connection string can be easily accomplished through
the python shell. For example::
>>> import urllib
>>> urllib.quote_plus('dsn=mydsn;Database=db')
'dsn%3Dmydsn%3BDatabase%3Ddb'
Unicode Binds
-------------
The current state of PyODBC on a unix backend with FreeTDS and/or
EasySoft is poor regarding unicode; different OS platforms and versions of
UnixODBC versus IODBC versus FreeTDS/EasySoft versus PyODBC itself
dramatically alter how strings are received. The PyODBC dialect attempts to
use all the information it knows to determine whether or not a Python unicode
literal can be passed directly to the PyODBC driver or not; while SQLAlchemy
can encode these to bytestrings first, some users have reported that PyODBC
mis-handles bytestrings for certain encodings and requires a Python unicode
object, while the author has observed widespread cases where a Python unicode
is completely misinterpreted by PyODBC, particularly when dealing with
the information schema tables used in table reflection, and the value
must first be encoded to a bytestring.
It is for this reason that whether or not unicode literals for bound
parameters be sent to PyODBC can be controlled using the
``supports_unicode_binds`` parameter to ``create_engine()``. When
left at its default of ``None``, the PyODBC dialect will use its
best guess as to whether or not the driver deals with unicode literals
well. When ``False``, unicode literals will be encoded first, and when
``True`` unicode literals will be passed straight through. This is an interim
flag that hopefully should not be needed when the unicode situation stabilizes
for unix + PyODBC.
.. versionadded:: 0.7.7
``supports_unicode_binds`` parameter to ``create_engine()``\ .
"""
from .base import MSExecutionContext, MSDialect
from ...connectors.pyodbc import PyODBCConnector
from ... import types as sqltypes, util
import decimal
class _ms_numeric_pyodbc(object):
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
The routines here are needed for older pyodbc versions
as well as current mxODBC versions.
"""
def bind_processor(self, dialect):
super_process = super(_ms_numeric_pyodbc, self).\
bind_processor(dialect)
if not dialect._need_decimal_fix:
return super_process
def process(value):
if self.asdecimal and \
isinstance(value, decimal.Decimal):
adjusted = value.adjusted()
if adjusted < 0:
return self._small_dec_to_string(value)
elif adjusted > 7:
return self._large_dec_to_string(value)
if super_process:
return super_process(value)
else:
return value
return process
# these routines needed for older versions of pyodbc.
# as of 2.1.8 this logic is integrated.
def _small_dec_to_string(self, value):
return "%s0.%s%s" % (
(value < 0 and '-' or ''),
'0' * (abs(value.adjusted()) - 1),
"".join([str(nint) for nint in value.as_tuple()[1]]))
def _large_dec_to_string(self, value):
_int = value.as_tuple()[1]
if 'E' in str(value):
result = "%s%s%s" % (
(value < 0 and '-' or ''),
"".join([str(s) for s in _int]),
"0" * (value.adjusted() - (len(_int) - 1)))
else:
if (len(_int) - 1) > value.adjusted():
result = "%s%s.%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]),
"".join(
[str(s) for s in _int][value.adjusted() + 1:]))
else:
result = "%s%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]))
return result
class _MSNumeric_pyodbc(_ms_numeric_pyodbc, sqltypes.Numeric):
pass
class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float):
pass
class MSExecutionContext_pyodbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
"""where appropriate, issue "select scope_identity()" in the same
statement.
Background on why "scope_identity()" is preferable to "@@identity":
http://msdn.microsoft.com/en-us/library/ms190315.aspx
Background on why we attempt to embed "scope_identity()" into the same
statement as the INSERT:
http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
"""
super(MSExecutionContext_pyodbc, self).pre_exec()
# don't embed the scope_identity select into an
# "INSERT .. DEFAULT VALUES"
if self._select_lastrowid and \
self.dialect.use_scope_identity and \
len(self.parameters[0]):
self._embedded_scope_identity = True
self.statement += "; select scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
# Fetch the last inserted id from the manipulated statement
# We may have to skip over a number of result sets with
# no data (due to triggers, etc.)
while True:
try:
# fetchall() ensures the cursor is consumed
# without closing it (FreeTDS particularly)
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error as e:
# no way around this - nextset() consumes the previous set
# so we need to just keep flipping
self.cursor.nextset()
self._lastrowid = int(row[0])
else:
super(MSExecutionContext_pyodbc, self).post_exec()
class MSDialect_pyodbc(PyODBCConnector, MSDialect):
execution_ctx_cls = MSExecutionContext_pyodbc
pyodbc_driver_name = 'SQL Server'
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric: _MSNumeric_pyodbc,
sqltypes.Float: _MSFloat_pyodbc
}
)
def __init__(self, description_encoding=None, **params):
if 'description_encoding' in params:
self.description_encoding = params.pop('description_encoding')
super(MSDialect_pyodbc, self).__init__(**params)
self.use_scope_identity = self.use_scope_identity and \
self.dbapi and \
hasattr(self.dbapi.Cursor, 'nextset')
self._need_decimal_fix = self.dbapi and \
self._dbapi_version() < (2, 1, 8)
dialect = MSDialect_pyodbc | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.expression.spel.support;
import java.beans.PropertyEditor;
import org.springframework.beans.BeansException;
import org.springframework.beans.SimpleTypeConverter;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.BeanFactoryAware;
import org.springframework.beans.factory.config.ConfigurableBeanFactory;
import org.springframework.core.convert.ConversionService;
import org.springframework.core.convert.TypeDescriptor;
import org.springframework.core.convert.support.DefaultConversionService;
import org.springframework.expression.TypeConverter;
import org.springframework.util.ClassUtils;
/**
* Copied from Spring Integration for purposes of reproducing
* {@link Spr7538Tests}.
*/
class BeanFactoryTypeConverter implements TypeConverter, BeanFactoryAware {
private SimpleTypeConverter delegate = new SimpleTypeConverter();
private static ConversionService defaultConversionService;
private ConversionService conversionService;
public BeanFactoryTypeConverter() {
synchronized (this) {
if (defaultConversionService == null) {
defaultConversionService = new DefaultConversionService();
}
}
this.conversionService = defaultConversionService;
}
public BeanFactoryTypeConverter(ConversionService conversionService) {
this.conversionService = conversionService;
}
public void setConversionService(ConversionService conversionService) {
this.conversionService = conversionService;
}
@Override
public void setBeanFactory(BeanFactory beanFactory) throws BeansException {
if (beanFactory instanceof ConfigurableBeanFactory cbf &&
cbf.getTypeConverter() instanceof SimpleTypeConverter simpleTypeConverter) {
this.delegate = simpleTypeConverter;
}
}
public boolean canConvert(Class<?> sourceType, Class<?> targetType) {
if (conversionService.canConvert(sourceType, targetType)) {
return true;
}
if (!String.class.isAssignableFrom(sourceType) && !String.class.isAssignableFrom(targetType)) {
// PropertyEditor cannot convert non-Strings
return false;
}
if (!String.class.isAssignableFrom(sourceType)) {
return delegate.findCustomEditor(sourceType, null) != null || delegate.getDefaultEditor(sourceType) != null;
}
return delegate.findCustomEditor(targetType, null) != null || delegate.getDefaultEditor(targetType) != null;
}
@Override
public boolean canConvert(TypeDescriptor sourceTypeDescriptor, TypeDescriptor targetTypeDescriptor) {
if (conversionService.canConvert(sourceTypeDescriptor, targetTypeDescriptor)) {
return true;
}
Class<?> sourceType = sourceTypeDescriptor.getObjectType();
Class<?> targetType = targetTypeDescriptor.getObjectType();
return canConvert(sourceType, targetType);
}
@Override
public Object convertValue(Object value, TypeDescriptor sourceType, TypeDescriptor targetType) {
if (ClassUtils.isVoidType(targetType.getType())) {
return null;
}
if (conversionService.canConvert(sourceType, targetType)) {
return conversionService.convert(value, sourceType, targetType);
}
if (!String.class.isAssignableFrom(sourceType.getType())) {
PropertyEditor editor = delegate.findCustomEditor(sourceType.getType(), null);
editor.setValue(value);
return editor.getAsText();
}
return delegate.convertIfNecessary(value, targetType.getType());
}
} | java | github | https://github.com/spring-projects/spring-framework | integration-tests/src/test/java/org/springframework/expression/spel/support/BeanFactoryTypeConverter.java |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.finished_nodes_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow # pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class FinishedNodesTest(test_util.TensorFlowTestCase):
def setUp(self):
self.leaves = [1, 3, 4]
self.node_map = [-1, -1, -1, 0, 1, -1]
self.pcw_total_splits = [[6, 3, 3], [11, 4, 7], [0, 0, 0], [0, 0, 0],
[0, 0, 0]]
self.ops = training_ops.Load()
def testSimple(self):
with self.test_session():
finished = self.ops.finished_nodes(self.leaves, self.node_map,
self.pcw_total_splits,
num_split_after_samples=10)
self.assertAllEqual([4], finished.eval())
def testNoAccumulators(self):
with self.test_session():
finished = self.ops.finished_nodes(self.leaves, [-1] * 6,
self.pcw_total_splits,
num_split_after_samples=10)
self.assertAllEqual([], finished.eval())
def testBadInput(self):
with self.test_session():
with self.assertRaisesOpError(
'leaf_tensor should be one-dimensional'):
finished = self.ops.finished_nodes([self.leaves], self.node_map,
self.pcw_total_splits,
num_split_after_samples=10)
self.assertAllEqual([], finished.eval())
if __name__ == '__main__':
googletest.main() | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Tests\Database;
use Illuminate\Database\Capsule\Manager as DB;
use Illuminate\Database\Eloquent\Factories\Factory;
use Illuminate\Database\Eloquent\Factories\HasFactory;
use Illuminate\Database\Eloquent\Model;
use Illuminate\Database\Eloquent\Model as Eloquent;
use Illuminate\Database\Eloquent\Relations\BelongsTo;
use Illuminate\Database\Eloquent\Relations\HasOne;
use PHPUnit\Framework\TestCase;
class DatabaseEloquentInverseRelationHasOneTest extends TestCase
{
/**
* Setup the database schema.
*
* @return void
*/
protected function setUp(): void
{
$db = new DB;
$db->addConnection([
'driver' => 'sqlite',
'database' => ':memory:',
]);
$db->bootEloquent();
$db->setAsGlobal();
$this->createSchema();
}
protected function createSchema()
{
$this->schema()->create('test_parent', function ($table) {
$table->increments('id');
$table->timestamps();
});
$this->schema()->create('test_child', function ($table) {
$table->increments('id');
$table->foreignId('parent_id')->unique();
$table->timestamps();
});
}
/**
* Tear down the database schema.
*
* @return void
*/
protected function tearDown(): void
{
$this->schema()->drop('test_parent');
$this->schema()->drop('test_child');
parent::tearDown();
}
public function testHasOneInverseRelationIsProperlySetToParentWhenLazyLoaded()
{
HasOneInverseChildModel::factory(5)->create();
$models = HasOneInverseParentModel::all();
foreach ($models as $parent) {
$this->assertFalse($parent->relationLoaded('child'));
$child = $parent->child;
$this->assertTrue($child->relationLoaded('parent'));
$this->assertSame($parent, $child->parent);
}
}
public function testHasOneInverseRelationIsProperlySetToParentWhenEagerLoaded()
{
HasOneInverseChildModel::factory(5)->create();
$models = HasOneInverseParentModel::with('child')->get();
foreach ($models as $parent) {
$child = $parent->child;
$this->assertTrue($child->relationLoaded('parent'));
$this->assertSame($parent, $child->parent);
}
}
public function testHasOneInverseRelationIsProperlySetToParentWhenMaking()
{
$parent = HasOneInverseParentModel::create();
$child = $parent->child()->make();
$this->assertTrue($child->relationLoaded('parent'));
$this->assertSame($parent, $child->parent);
}
public function testHasOneInverseRelationIsProperlySetToParentWhenCreating()
{
$parent = HasOneInverseParentModel::create();
$child = $parent->child()->create();
$this->assertTrue($child->relationLoaded('parent'));
$this->assertSame($parent, $child->parent);
}
public function testHasOneInverseRelationIsProperlySetToParentWhenCreatingQuietly()
{
$parent = HasOneInverseParentModel::create();
$child = $parent->child()->createQuietly();
$this->assertTrue($child->relationLoaded('parent'));
$this->assertSame($parent, $child->parent);
}
public function testHasOneInverseRelationIsProperlySetToParentWhenForceCreating()
{
$parent = HasOneInverseParentModel::create();
$child = $parent->child()->forceCreate();
$this->assertTrue($child->relationLoaded('parent'));
$this->assertSame($parent, $child->parent);
}
public function testHasOneInverseRelationIsProperlySetToParentWhenSaving()
{
$parent = HasOneInverseParentModel::create();
$child = HasOneInverseChildModel::make();
$this->assertFalse($child->relationLoaded('parent'));
$parent->child()->save($child);
$this->assertTrue($child->relationLoaded('parent'));
$this->assertSame($parent, $child->parent);
}
public function testHasOneInverseRelationIsProperlySetToParentWhenSavingQuietly()
{
$parent = HasOneInverseParentModel::create();
$child = HasOneInverseChildModel::make();
$this->assertFalse($child->relationLoaded('parent'));
$parent->child()->saveQuietly($child);
$this->assertTrue($child->relationLoaded('parent'));
$this->assertSame($parent, $child->parent);
}
public function testHasOneInverseRelationIsProperlySetToParentWhenUpdating()
{
$parent = HasOneInverseParentModel::create();
$child = HasOneInverseChildModel::factory()->create();
$this->assertTrue($parent->isNot($child->parent));
$parent->child()->save($child);
$this->assertTrue($parent->is($child->parent));
$this->assertSame($parent, $child->parent);
}
/**
* Helpers...
*/
/**
* Get a database connection instance.
*
* @return \Illuminate\Database\Connection
*/
protected function connection($connection = 'default')
{
return Eloquent::getConnectionResolver()->connection($connection);
}
/**
* Get a schema builder instance.
*
* @return \Illuminate\Database\Schema\Builder
*/
protected function schema($connection = 'default')
{
return $this->connection($connection)->getSchemaBuilder();
}
}
class HasOneInverseParentModel extends Model
{
use HasFactory;
protected $table = 'test_parent';
protected $fillable = ['id'];
protected static function newFactory()
{
return new HasOneInverseParentModelFactory();
}
public function child(): HasOne
{
return $this->hasOne(HasOneInverseChildModel::class, 'parent_id')->inverse('parent');
}
}
class HasOneInverseParentModelFactory extends Factory
{
protected $model = HasOneInverseParentModel::class;
public function definition()
{
return [];
}
}
class HasOneInverseChildModel extends Model
{
use HasFactory;
protected $table = 'test_child';
protected $fillable = ['id', 'parent_id'];
protected static function newFactory()
{
return new HasOneInverseChildModelFactory();
}
public function parent(): BelongsTo
{
return $this->belongsTo(HasOneInverseParentModel::class, 'parent_id');
}
}
class HasOneInverseChildModelFactory extends Factory
{
protected $model = HasOneInverseChildModel::class;
public function definition()
{
return [
'parent_id' => HasOneInverseParentModel::factory(),
];
}
} | php | github | https://github.com/laravel/framework | tests/Database/DatabaseEloquentInverseRelationHasOneTest.php |
/*
* Copyright (c) 2007 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockitousage.bugs.injection;
import static org.junit.Assert.*;
import java.lang.reflect.Field;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class InjectionByTypeShouldFirstLookForExactTypeThenAncestorTest {
private static final Object REFERENCE = new Object();
@Mock private Bean mockedBean;
@InjectMocks private Service illegalInjectionExample = new Service();
@InjectMocks
private ServiceWithReversedOrder reversedOrderService = new ServiceWithReversedOrder();
@InjectMocks private WithNullObjectField withNullObjectField = new WithNullObjectField();
@Test
public void just_for_information_fields_are_read_in_declaration_order_see_Service() {
Field[] declaredFields = Service.class.getDeclaredFields();
assertEquals("mockShouldNotGoInHere", declaredFields[0].getName());
assertEquals("mockShouldGoInHere", declaredFields[1].getName());
}
@Test
public void mock_should_be_injected_once_and_in_the_best_matching_type() {
assertSame(REFERENCE, illegalInjectionExample.mockShouldNotGoInHere);
assertSame(mockedBean, illegalInjectionExample.mockShouldGoInHere);
}
@Test
public void should_match_be_consistent_regardless_of_order() {
assertSame(REFERENCE, reversedOrderService.mockShouldNotGoInHere);
assertSame(mockedBean, reversedOrderService.mockShouldGoInHere);
}
@Test
public void should_inject_the_mock_only_once_and_in_the_correct_type() {
assertNull(withNullObjectField.keepMeNull);
assertSame(mockedBean, withNullObjectField.injectMePlease);
}
public static class Bean {}
public static class Service {
public final Object mockShouldNotGoInHere = REFERENCE;
public Bean mockShouldGoInHere;
}
public static class ServiceWithReversedOrder {
public Bean mockShouldGoInHere;
public final Object mockShouldNotGoInHere = REFERENCE;
}
class WithNullObjectField {
Bean injectMePlease;
Object keepMeNull = null;
}
} | java | github | https://github.com/mockito/mockito | mockito-core/src/test/java/org/mockitousage/bugs/injection/InjectionByTypeShouldFirstLookForExactTypeThenAncestorTest.java |
export const enum WalkAction {
// Continue walking the tree. Default behavior.
Continue,
// Skip walking into the current node.
Skip,
// Stop walking the tree entirely.
Stop,
}
interface Walkable<T> {
each(cb: (node: T, index: number) => void): void
}
// Custom walk implementation where we can skip going into nodes when we don't
// need to process them.
export function walk<T>(
rule: Walkable<T>,
cb: (rule: T, idx: number, parent: Walkable<T>) => void | WalkAction,
): undefined | false {
let result: undefined | false = undefined
rule.each?.((node, idx) => {
let action = cb(node, idx, rule) ?? WalkAction.Continue
if (action === WalkAction.Stop) {
result = false
return result
}
if (action !== WalkAction.Skip) {
result = walk(node as Walkable<T>, cb)
return result
}
})
return result
}
// Depth first walk reversal implementation.
export function walkDepth<T>(rule: Walkable<T>, cb: (rule: T) => void) {
rule?.each?.((node) => {
walkDepth(node as Walkable<T>, cb)
cb(node)
})
} | typescript | github | https://github.com/tailwindlabs/tailwindcss | packages/@tailwindcss-upgrade/src/utils/walk.ts |
# coding=utf-8
###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
import binascii
class Case6_3_1(Case):
# invalid exactly on byte 12 (\xa0)
PAYLOAD1 = '\xce\xba\xe1\xbd\xb9\xcf\x83\xce\xbc\xce\xb5'
PAYLOAD2 = '\xed\xa0\x80'
PAYLOAD3 = '\x65\x64\x69\x74\x65\x64'
PAYLOAD = PAYLOAD1 + PAYLOAD2 + PAYLOAD3
DESCRIPTION = """Send invalid UTF-8 text message unfragmented.<br><br>MESSAGE:<br>%s""" % binascii.b2a_hex(PAYLOAD)
EXPECTATION = """The connection is failed immediately, since the payload is not valid UTF-8."""
def onOpen(self):
self.expected[Case.OK] = []
self.expectedClose = {"closedByMe": False,
"closeCode": [self.p.CLOSE_STATUS_CODE_INVALID_PAYLOAD],
"requireClean": False,
"closedByWrongEndpointIsFatal": True}
self.p.sendMessage(self.PAYLOAD, isBinary = False)
self.p.killAfter(1) | unknown | codeparrot/codeparrot-clean | ||
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import NotFound
from modularodm import Q
from framework.auth.core import User
from framework.auth.oauth_scopes import CoreScopes
from website.models import NodeLog, Node
from api.nodes.permissions import (
ContributorOrPublic,
)
from api.base.filters import ODMFilterMixin
from api.base.utils import get_user_auth, get_object_or_error
from api.base import permissions as base_permissions
from api.nodes.serializers import NodeSerializer
from api.users.serializers import UserSerializer
from api.logs.serializers import NodeLogSerializer
from api.base.views import JSONAPIBaseView
class LogMixin(object):
"""
Mixin with convenience method get_log
"""
def get_log(self):
log = NodeLog.load(self.kwargs.get('log_id'))
if not log:
raise NotFound(
detail='No log matching that log_id could be found.'
)
self.check_log_permission(log)
return log
def check_log_permission(self, log):
"""
Cycles through nodes on log backrefs. If user can view any of the nodes pertaining to the log, this means
the log itself can be viewed.
"""
auth_user = get_user_auth(self.request)
log_nodes = []
for node_id in log._backrefs['logged']['node']['logs']:
node = get_object_or_error(Node, node_id, display_name='node')
log_nodes.append(node)
if node.can_view(auth_user):
return
self.check_object_permissions(self.request, log_nodes[0]) # will raise 401 or 403, as appropriate
class LogNodeList(JSONAPIBaseView, generics.ListAPIView, LogMixin, ODMFilterMixin):
"""List of nodes that a given log is associated with. *Read-only*.
Paginated list of nodes that the user contributes to. Each resource contains the full representation of the node,
meaning additional requests to an individual node's detail view are not necessary. If the user id in the path is the
same as the logged-in user, all nodes will be visible. Otherwise, you will only be able to see the other user's
publicly-visible nodes. The special user id `me` can be used to represent the currently logged-in user.
##Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
---------------------------------------------------------------------------------
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
registration boolean has this project been registered?
collection boolean is this node a collection of other nodes?
dashboard boolean is this node visible on the user dashboard?
public boolean has this node been made publicly-visible?
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
*None*.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
<!--- Copied Query Params from NodeList -->
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ContributorOrPublic
)
required_read_scopes = [CoreScopes.NODE_LOG_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeSerializer
view_category = 'logs'
view_name = 'log-nodes'
order = ('-date', )
def get_queryset(self):
log = self.get_log()
auth_user = get_user_auth(self.request)
return [
node for node in log.node__logged
if node.can_view(auth_user)
]
class NodeLogDetail(JSONAPIBaseView, generics.RetrieveAPIView, LogMixin):
"""List of nodes that a given log is associated with. *Read-only*.
Paginated list of nodes that the user contributes to. Each resource contains the full representation of the node,
meaning additional requests to an individual node's detail view are not necessary. If the user id in the path is the
same as the logged-in user, all nodes will be visible. Otherwise, you will only be able to see the other user's
publicly-visible nodes. The special user id `me` can be used to represent the currently logged-in user.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed.
##Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
---------------------------------------------------------------------------------
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
registration boolean has this project been registered?
collection boolean is this node a collection of other nodes?
dashboard boolean is this node visible on the user dashboard?
public boolean has this node been made publicly-visible?
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
*None*.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
<!--- Copied Query Params from NodeList -->
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ContributorOrPublic
)
required_read_scopes = [CoreScopes.NODE_LOG_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeLogSerializer
view_category = 'logs'
view_name = 'log-detail'
# overrides RetrieveUpdateDestroyAPIView
def get_object(self):
log = self.get_log()
return log
# overrides RetrieveUpdateDestroyAPIView
def perform_destroy(self, instance):
pass
class NodeLogContributors(JSONAPIBaseView, generics.ListAPIView, ODMFilterMixin, LogMixin):
"""List of contributors that a given log is associated with. *Read-only*.
Paginated list of users that were associated with a contributor log action. For example, if a log action was `contributor_added`,
the new contributors' names would be found at this endpoint. If the relevant log had nothing to do with contributors,
an empty list would be returned. Each resource contains the full representation of the user, meaning additional requests
to an individual user's detail view are not necessary.
##User Attributes
<!--- Copied Attributes from UserDetail -->
OSF User entities have the "users" `type`.
name type description
----------------------------------------------------------------------------------------
full_name string full name of the user; used for display
given_name string given name of the user; for bibliographic citations
middle_names string middle name of user; for bibliographic citations
family_name string family name of user; for bibliographic citations
suffix string suffix of user's name for bibliographic citations
date_registered iso8601 timestamp timestamp when the user's account was created
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
*None*.
<!--- Copied Query Params from UserList -->
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Users may be filtered by their `id`, `full_name`, `given_name`, `middle_names`, or `family_name`.
+ `profile_image_size=<Int>` -- Modifies `/links/profile_image_url` of the user entities so that it points to
the user's profile image scaled to the given size in pixels. If left blank, the size depends on the image provider.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ContributorOrPublic
)
required_read_scopes = [CoreScopes.USERS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = UserSerializer
view_category = 'logs'
view_name = 'log-contributors'
# overrides ListAPIView
def get_queryset(self):
log = self.get_log()
associated_contrib_ids = log.params.get('contributors')
if associated_contrib_ids is None:
return []
associated_users = User.find(Q('_id', 'in', associated_contrib_ids))
return associated_users | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
#
# sleep_sort.py - A unique approach to sorting numbers :-)
# Usage: sleep_sort.py 6 1 9 7
# 1
# 6
# 7
# 9
#
# Copyright (C) 2019 Michael Davies <michael@the-davies.net>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.#
#
import os
import sys
import time
def my_child(sec):
time.sleep(sec)
print(sec)
def sleep_sort(arr):
for i in arr:
try:
pid = os.fork()
except OSError:
exit("Could not create a child process")
if pid == 0:
my_child(i)
exit()
for i in arr:
finished = os.waitpid(0, 0)
if __name__ == '__main__':
if (len(sys.argv) == 1):
progname = os.path.basename(__file__)
sys.exit('Usage: %s <array of numbers to sort>' % progname)
else:
arr = [int(x) for x in sys.argv[1:]]
sleep_sort(arr) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Yun, Liu<yunx.liu@intel.com>
import unittest
import os
import shutil
import json
from xml.etree import ElementTree
import sys
sys.path.append("../")
import comm
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_tools_version(self):
comm.setUp()
os.chdir(comm.XwalkPath)
cmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app version"
(return_code, output) = comm.getstatusoutput(cmd)
with open(comm.PackTools + "../package.json") as json_file:
data = json.load(json_file)
self.assertEquals(
data['version'].strip(os.linesep),
output[0].strip(os.linesep))
def test_build_debug(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
comm.build(self, buildcmd)
comm.run(self)
comm.clear("org.xwalk.test")
def test_check_host_without_platforms(self):
comm.setUp()
os.chdir(comm.XwalkPath)
cmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app check"
(return_code, output) = comm.getstatusoutput(cmd)
self.assertEquals(return_code, 0)
def test_check_host_android(self):
comm.setUp()
os.chdir(comm.XwalkPath)
cmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app check android"
(return_code, output) = comm.getstatusoutput(cmd)
self.assertEquals(return_code, 0)
self.assertNotIn("ERROR:", output[0])
def test_target_create(self):
comm.setUp()
if comm.SHELL_FLAG == "False":
cmd = "where android"
else:
cmd = "which android"
(return_code, androidpath) = comm.getstatusoutput(cmd)
targetversionpath = os.path.dirname(os.path.dirname(androidpath[0]))
os.rename(targetversionpath + "/platforms/", targetversionpath + "/backup/")
comm.clear("org.xwalk.test")
os.chdir(comm.XwalkPath)
createcmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-app create org.xwalk.test" + comm.MODE + " --android-crosswalk=" + \
comm.crosswalkzip
(return_create_code, output) = comm.getstatusoutput(createcmd)
os.rename(targetversionpath + "/backup/", targetversionpath + "/platforms/")
comm.clear("org.xwalk.test")
self.assertNotEquals(return_create_code, 0)
def test_target_build(self):
comm.setUp()
comm.clear("org.xwalk.test")
os.chdir(comm.XwalkPath)
comm.create(self)
if comm.SHELL_FLAG == "False":
cmd = "where android"
else:
cmd = "which android"
(return_code, androidpath) = comm.getstatusoutput(cmd)
targetversionpath = os.path.dirname(os.path.dirname(androidpath[0]))
os.rename(targetversionpath + "/platforms/", targetversionpath + "/backup/")
os.chdir(comm.XwalkPath + "org.xwalk.test")
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
(return_build_code, buildstatus) = comm.getstatusoutput(buildcmd)
os.rename(targetversionpath + "/backup/", targetversionpath + "/platforms/")
comm.clear("org.xwalk.test")
self.assertNotEquals(return_build_code, 0)
self.assertIn("project target", buildstatus[0])
def test_create_with_platform_android(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
cmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app create org.xwalk.test" + comm.MODE + " --platform=android --android-crosswalk=" + comm.crosswalkzip
packstatus = os.system(cmd)
self.assertEquals(packstatus, 0)
os.chdir('org.xwalk.test')
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
comm.build(self, buildcmd)
comm.run(self)
comm.clear("org.xwalk.test")
def test_init_manifest_androidPlatforms(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-app manifest " + \
comm.XwalkPath + "org.xwalk.test --platform=android"
os.system(cmd)
with open(comm.ConstPath + "/../tools/org.xwalk.test/manifest.json") as json_file:
data = json.load(json_file)
comm.clear("org.xwalk.test")
self.assertEquals(data['xwalk_target_platforms'][0].strip(os.linesep), "android")
def test_init_manifest_packageid(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
cmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-app manifest " + \
comm.XwalkPath + "org.xwalk.test --platform=android --package-id=org.xwalk.test"
os.system(cmd)
with open(comm.ConstPath + "/../tools/org.xwalk.test/manifest.json") as json_file:
data = json.load(json_file)
updatecmd = comm.HOST_PREFIX + comm.PackTools + \
"crosswalk-app manifest " + \
comm.XwalkPath + "org.xwalk.test --platform=android --package-id=org.test.foo"
os.system(updatecmd)
with open(comm.ConstPath + "/../tools/org.xwalk.test/manifest.json") as json_file_update:
updatedata = json.load(json_file_update)
comm.clear("org.xwalk.test")
self.assertEquals(data['xwalk_package_id'].strip(os.linesep), "org.xwalk.test")
self.assertEquals(updatedata['xwalk_package_id'].strip(os.linesep), "org.test.foo")
def test_list_target_platforms(self):
comm.setUp()
os.chdir(comm.XwalkPath)
cmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app platforms"
status = os.popen(cmd).readlines()
self.assertEquals("android", status[0].strip(" * " + os.linesep))
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import annotations
import logging
import re
import warnings
from typing import TYPE_CHECKING
from scrapy import Request, Spider, signals
from scrapy.exceptions import IgnoreRequest
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.httpobj import urlparse_cached
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.statscollectors import StatsCollector
logger = logging.getLogger(__name__)
class OffsiteMiddleware:
crawler: Crawler
def __init__(self, stats: StatsCollector):
self.stats = stats
self.domains_seen: set[str] = set()
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
assert crawler.stats
o = cls(crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(o.request_scheduled, signal=signals.request_scheduled)
o.crawler = crawler
return o
def spider_opened(self, spider: Spider) -> None:
self.host_regex: re.Pattern[str] = self.get_host_regex(spider)
def request_scheduled(self, request: Request, spider: Spider) -> None:
self.process_request(request)
@_warn_spider_arg
def process_request(self, request: Request, spider: Spider | None = None) -> None:
assert self.crawler.spider
if (
request.dont_filter
or request.meta.get("allow_offsite")
or self.should_follow(request, self.crawler.spider)
):
return
domain = urlparse_cached(request).hostname
if domain and domain not in self.domains_seen:
self.domains_seen.add(domain)
logger.debug(
"Filtered offsite request to %(domain)r: %(request)s",
{"domain": domain, "request": request},
extra={"spider": self.crawler.spider},
)
self.stats.inc_value("offsite/domains")
self.stats.inc_value("offsite/filtered")
raise IgnoreRequest
def should_follow(self, request: Request, spider: Spider) -> bool:
regex = self.host_regex
# hostname can be None for wrong urls (like javascript links)
host = urlparse_cached(request).hostname or ""
return bool(regex.search(host))
def get_host_regex(self, spider: Spider) -> re.Pattern[str]:
"""Override this method to implement a different offsite policy"""
allowed_domains = getattr(spider, "allowed_domains", None)
if not allowed_domains:
return re.compile("") # allow all by default
url_pattern = re.compile(r"^https?://.*$")
port_pattern = re.compile(r":\d+$")
domains = []
for domain in allowed_domains:
if domain is None:
continue
if url_pattern.match(domain):
message = (
"allowed_domains accepts only domains, not URLs. "
f"Ignoring URL entry {domain} in allowed_domains."
)
warnings.warn(message)
elif port_pattern.search(domain):
message = (
"allowed_domains accepts only domains without ports. "
f"Ignoring entry {domain} in allowed_domains."
)
warnings.warn(message)
else:
domains.append(re.escape(domain))
regex = rf"^(.*\.)?({'|'.join(domains)})$"
return re.compile(regex) | python | github | https://github.com/scrapy/scrapy | scrapy/downloadermiddlewares/offsite.py |
import zlib
__doc__ = zlib.__doc__
del zlib
from zlib import * | python | github | https://github.com/python/cpython | Lib/compression/zlib.py |
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, X.shape[0]):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
return _check_cv(cv, X=X, y=y, classifier=classifier)
def _check_cv(cv, X=None, y=None, classifier=False):
# This exists for internal use while indices is being deprecated.
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests | unknown | codeparrot/codeparrot-clean | ||
"""
Defines a form for providing validation of CourseEmail templates.
"""
import logging
from django import forms
from django.core.exceptions import ValidationError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from six import text_type
from bulk_email.models import COURSE_EMAIL_MESSAGE_BODY_TAG, CourseAuthorization, CourseEmailTemplate
from openedx.core.lib.courses import clean_course_id
from xmodule.modulestore.django import modulestore
log = logging.getLogger(__name__)
class CourseEmailTemplateForm(forms.ModelForm):
"""Form providing validation of CourseEmail templates."""
name = forms.CharField(required=False)
class Meta(object):
model = CourseEmailTemplate
fields = ('html_template', 'plain_template', 'name')
def _validate_template(self, template):
"""Check the template for required tags."""
index = template.find(COURSE_EMAIL_MESSAGE_BODY_TAG)
if index < 0:
msg = 'Missing tag: "{}"'.format(COURSE_EMAIL_MESSAGE_BODY_TAG)
log.warning(msg)
raise ValidationError(msg)
if template.find(COURSE_EMAIL_MESSAGE_BODY_TAG, index + 1) >= 0:
msg = 'Multiple instances of tag: "{}"'.format(COURSE_EMAIL_MESSAGE_BODY_TAG)
log.warning(msg)
raise ValidationError(msg)
# TODO: add more validation here, including the set of known tags
# for which values will be supplied. (Email will fail if the template
# uses tags for which values are not supplied.)
def clean_html_template(self):
"""Validate the HTML template."""
template = self.cleaned_data["html_template"]
self._validate_template(template)
return template
def clean_plain_template(self):
"""Validate the plaintext template."""
template = self.cleaned_data["plain_template"]
self._validate_template(template)
return template
def clean_name(self):
"""Validate the name field. Enforce uniqueness constraint on 'name' field"""
# Note that we get back a blank string in the Form for an empty 'name' field
# we want those to be set to None in Python and NULL in the database
name = self.cleaned_data.get("name").strip() or None
# if we are creating a new CourseEmailTemplate, then we need to
# enforce the uniquess constraint as part of the Form validation
if not self.instance.pk:
try:
CourseEmailTemplate.get_template(name)
# already exists, this is no good
raise ValidationError('Name of "{}" already exists, this must be unique.'.format(name))
except CourseEmailTemplate.DoesNotExist:
# this is actually the successful validation
pass
return name
class CourseAuthorizationAdminForm(forms.ModelForm):
"""Input form for email enabling, allowing us to verify data."""
class Meta(object):
model = CourseAuthorization
fields = '__all__'
def clean_course_id(self):
"""
Validate the course id
"""
return clean_course_id(self) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.platform
import com.intellij.openapi.components.service
import com.intellij.openapi.project.Project
import org.jetbrains.kotlin.analysis.api.KaPlatformInterface
import org.jetbrains.kotlin.analysis.api.projectStructure.KaLibraryModule
/**
* [KotlinPlatformSettings] allow the Analysis API platform to control the behavior of the Analysis API engine.
*/
@KaPlatformInterface
public interface KotlinPlatformSettings : KotlinPlatformComponent {
/**
* @see KotlinDeserializedDeclarationsOrigin
*/
public val deserializedDeclarationsOrigin: KotlinDeserializedDeclarationsOrigin
/**
* Whether analysis of use-site [KaLibraryModule]s is allowed by the platform. When this is `false`, attempts to analyze a
* [KaLibraryModule] will result in an exception.
*
* See KT-76042 for more information.
*/
public val allowUseSiteLibraryModuleAnalysis: Boolean
get() = true
@KaPlatformInterface
public companion object {
public fun getInstance(project: Project): KotlinPlatformSettings = project.service()
}
}
/**
* This [setting][KotlinPlatformSettings] controls where [declarations][org.jetbrains.kotlin.analysis.api.platform.declarations.KotlinDeclarationProvider]
* provided by the platform come from.
*
* The origin directly affects whether declaration providers have to provide library entities in addition to source entities, which is the
* case for the [STUBS] origin.
*
* Internally, the Analysis API engine has to use different implementations of symbol providers for [BINARIES] and [STUBS].
*/
@KaPlatformInterface
public enum class KotlinDeserializedDeclarationsOrigin {
/**
* Library content is deserialized from `.class` files, KLIBs, and metadata.
*
* Kotlin FIR declarations deserialized from binaries don't have associated PSI elements.
*/
BINARIES,
/**
* Library content is pre-indexed to [stubs](https://plugins.jetbrains.com/docs/intellij/stub-indexes.html), which are then provided by
* [declaration providers][org.jetbrains.kotlin.analysis.api.platform.declarations.KotlinDeclarationProvider].
*
* This mode is used by the IntelliJ K2 plugin because libraries are already indexed in a stub format, and we can avoid additionally
* loading binaries.
*/
STUBS,
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-platform-interface/src/org/jetbrains/kotlin/analysis/api/platform/KotlinPlatformSettings.kt |
# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script provides CLI access to run security tests on a Chrome OS images.
The entry point is available as image_lib.SecurityTest. Call that directly when
possible.
Note: You probably will need an internal checkout by default for these
tests to be useful. You can provide your own baselines, but you
can certainly provide your own set of configs.
Note: These tests will fail on dev images. They are designed to
check release recovery images only.
Note: The --image argument can be a path or a basename. When a basename is
provided, the --board argument is always used to build the path.
Consequently, `./image_name.bin` and `image_name.bin` are treated
very differently.
"""
from __future__ import print_function
import re
import sys
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import image_lib
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
def GetParser():
"""Build the Argument Parser."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('--board', help='The board to test an image for.')
# Avoiding type='path' to allow the use of `./` to distinguish between a
# local image (e.g. `./image_name.bin`) and a basename (`image_name.bin`) in
# the board's build directory. The `./` would be normalized out of a
# type='path' argument, making it look like it's a basename.
parser.add_argument('--image',
help='Source release image to use (recovery_image.bin by '
'default). May be a path to an image or just the '
'basename of the image if a board is also provided.')
parser.add_argument('--baselines', type='path',
help='Directory to load security baselines from (default '
'from cros-signing).')
parser.add_argument('--vboot-hash',
help='The git rev of the vboot tree to checkout (default '
'to the signer hash).')
return parser
def _ParseArgs(argv):
"""Parse and validate arguments."""
parser = GetParser()
opts = parser.parse_args(argv)
# Need the board if no image provided or only the basename is provided so
# we can build out the full path to an image file.
opts.board = opts.board or cros_build_lib.GetDefaultBoard()
try:
opts.image = image_lib.BuildImagePath(opts.board, opts.image)
except image_lib.ImageDoesNotExistError as e:
# Replace |arg| with --arg, otherwise messages still relevant.
message = re.sub(r'\|(\w+)\|', r'--\1', str(e))
parser.error(message)
opts.Freeze()
return opts
def main(argv):
cros_build_lib.AssertInsideChroot()
opts = _ParseArgs(argv)
try:
success = image_lib.SecurityTest(board=opts.board, image=opts.image,
baselines=opts.baselines,
vboot_hash=opts.vboot_hash)
except image_lib.Error as e:
cros_build_lib.Die(e)
else:
return 0 if success else 1 | unknown | codeparrot/codeparrot-clean | ||
"""report.py - Utilities for reporting statistics about benchmark results
"""
import os
import re
import copy
class BenchmarkColor(object):
def __init__(self, name, code):
self.name = name
self.code = code
def __repr__(self):
return '%s%r' % (self.__class__.__name__,
(self.name, self.code))
def __format__(self, format):
return self.code
# Benchmark Colors Enumeration
BC_NONE = BenchmarkColor('NONE', '')
BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m')
BC_CYAN = BenchmarkColor('CYAN', '\033[96m')
BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m')
BC_HEADER = BenchmarkColor('HEADER', '\033[92m')
BC_WARNING = BenchmarkColor('WARNING', '\033[93m')
BC_WHITE = BenchmarkColor('WHITE', '\033[97m')
BC_FAIL = BenchmarkColor('FAIL', '\033[91m')
BC_ENDC = BenchmarkColor('ENDC', '\033[0m')
BC_BOLD = BenchmarkColor('BOLD', '\033[1m')
BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m')
def color_format(use_color, fmt_str, *args, **kwargs):
"""
Return the result of 'fmt_str.format(*args, **kwargs)' after transforming
'args' and 'kwargs' according to the value of 'use_color'. If 'use_color'
is False then all color codes in 'args' and 'kwargs' are replaced with
the empty string.
"""
assert use_color is True or use_color is False
if not use_color:
args = [arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for arg in args]
kwargs = {key: arg if not isinstance(arg, BenchmarkColor) else BC_NONE
for key, arg in kwargs.items()}
return fmt_str.format(*args, **kwargs)
def find_longest_name(benchmark_list):
"""
Return the length of the longest benchmark name in a given list of
benchmark JSON objects
"""
longest_name = 1
for bc in benchmark_list:
if len(bc['name']) > longest_name:
longest_name = len(bc['name'])
return longest_name
def calculate_change(old_val, new_val):
"""
Return a float representing the decimal change between old_val and new_val.
"""
if old_val == 0 and new_val == 0:
return 0.0
if old_val == 0:
return float(new_val - old_val) / (float(old_val + new_val) / 2)
return float(new_val - old_val) / abs(old_val)
def filter_benchmark(json_orig, family, replacement=""):
"""
Apply a filter to the json, and only leave the 'family' of benchmarks.
"""
regex = re.compile(family)
filtered = {}
filtered['benchmarks'] = []
for be in json_orig['benchmarks']:
if not regex.search(be['name']):
continue
filteredbench = copy.deepcopy(be) # Do NOT modify the old name!
filteredbench['name'] = regex.sub(replacement, filteredbench['name'])
filtered['benchmarks'].append(filteredbench)
return filtered
def generate_difference_report(json1, json2, use_color=True):
"""
Calculate and report the difference between each test of two benchmarks
runs specified as 'json1' and 'json2'.
"""
first_col_width = find_longest_name(json1['benchmarks'])
def find_test(name):
for b in json2['benchmarks']:
if b['name'] == name:
return b
return None
first_col_width = max(first_col_width, len('Benchmark'))
first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format(
'Benchmark', 12 + first_col_width)
output_strs = [first_line, '-' * len(first_line)]
gen = (bn for bn in json1['benchmarks'] if 'real_time' in bn and 'cpu_time' in bn)
for bn in gen:
other_bench = find_test(bn['name'])
if not other_bench:
continue
if bn['time_unit'] != other_bench['time_unit']:
continue
def get_color(res):
if res > 0.05:
return BC_FAIL
elif res > -0.07:
return BC_WHITE
else:
return BC_CYAN
fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
tres = calculate_change(bn['real_time'], other_bench['real_time'])
cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
output_strs += [color_format(use_color, fmt_str,
BC_HEADER, bn['name'], first_col_width,
get_color(tres), tres, get_color(cpures), cpures,
bn['real_time'], other_bench['real_time'],
bn['cpu_time'], other_bench['cpu_time'],
endc=BC_ENDC)]
return output_strs
###############################################################################
# Unit tests
import unittest
class TestReportDifference(unittest.TestCase):
def load_results(self):
import json
testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs')
testOutput1 = os.path.join(testInputs, 'test1_run1.json')
testOutput2 = os.path.join(testInputs, 'test1_run2.json')
with open(testOutput1, 'r') as f:
json1 = json.load(f)
with open(testOutput2, 'r') as f:
json2 = json.load(f)
return json1, json2
def test_basic(self):
expect_lines = [
['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'],
['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'],
['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'],
['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'],
['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'],
['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'],
['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'],
['BM_100xSlower', '+99.0000', '+99.0000', '100', '10000', '100', '10000'],
['BM_100xFaster', '-0.9900', '-0.9900', '10000', '100', '10000', '100'],
['BM_10PercentCPUToTime', '+0.1000', '-0.1000', '100', '110', '100', '90'],
['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
]
json1, json2 = self.load_results()
output_lines_with_header = generate_difference_report(json1, json2, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(len(parts), 7)
self.assertEqual(parts, expect_lines[i])
class TestReportDifferenceBetweenFamilies(unittest.TestCase):
def load_result(self):
import json
testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs')
testOutput = os.path.join(testInputs, 'test2_run.json')
with open(testOutput, 'r') as f:
json = json.load(f)
return json
def test_basic(self):
expect_lines = [
['.', '-0.5000', '-0.5000', '10', '5', '10', '5'],
['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'],
['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'],
['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'],
]
json = self.load_result()
json1 = filter_benchmark(json, "BM_Z.ro", ".")
json2 = filter_benchmark(json, "BM_O.e", ".")
output_lines_with_header = generate_difference_report(json1, json2, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
self.assertEqual(len(output_lines), len(expect_lines))
for i in range(0, len(output_lines)):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(len(parts), 7)
self.assertEqual(parts, expect_lines[i])
if __name__ == '__main__':
unittest.main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified; | unknown | codeparrot/codeparrot-clean | ||
"""
The httplib2 algorithms ported for use with requests.
"""
import re
import calendar
import time
from email.utils import parsedate_tz
from pip._vendor.requests.structures import CaseInsensitiveDict
from .cache import DictCache
from .serialize import Serializer
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
class CacheController(object):
"""An interface to see if request should cached or not.
"""
def __init__(self, cache=None, cache_etags=True, serializer=None):
self.cache = cache or DictCache()
self.cache_etags = cache_etags
self.serializer = serializer or Serializer()
def _urlnorm(self, uri):
"""Normalize the URL to create a safe key for the cache"""
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
scheme = scheme.lower()
authority = authority.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
defrag_uri = scheme + "://" + authority + request_uri
return defrag_uri
def cache_url(self, uri):
return self._urlnorm(uri)
def parse_cache_control(self, headers):
"""
Parse the cache control headers returning a dictionary with values
for the different directives.
"""
retval = {}
cc_header = 'cache-control'
if 'Cache-Control' in headers:
cc_header = 'Cache-Control'
if cc_header in headers:
parts = headers[cc_header].split(',')
parts_with_args = [
tuple([x.strip().lower() for x in part.split("=", 1)])
for part in parts if -1 != part.find("=")
]
parts_wo_args = [
(name.strip().lower(), 1)
for name in parts if -1 == name.find("=")
]
retval = dict(parts_with_args + parts_wo_args)
return retval
def cached_request(self, request):
"""
Return a cached response if it exists in the cache, otherwise
return False.
"""
cache_url = self.cache_url(request.url)
cc = self.parse_cache_control(request.headers)
# non-caching states
no_cache = True if 'no-cache' in cc else False
if 'max-age' in cc and cc['max-age'] == 0:
no_cache = True
# Bail out if no-cache was set
if no_cache:
return False
# It is in the cache, so lets see if it is going to be
# fresh enough
resp = self.serializer.loads(request, self.cache.get(cache_url))
# Check to see if we have a cached object
if not resp:
return False
headers = CaseInsensitiveDict(resp.headers)
now = time.time()
date = calendar.timegm(
parsedate_tz(headers['date'])
)
current_age = max(0, now - date)
# TODO: There is an assumption that the result will be a
# urllib3 response object. This may not be best since we
# could probably avoid instantiating or constructing the
# response until we know we need it.
resp_cc = self.parse_cache_control(headers)
# determine freshness
freshness_lifetime = 0
# Check the max-age pragma in the cache control header
if 'max-age' in resp_cc and resp_cc['max-age'].isdigit():
freshness_lifetime = int(resp_cc['max-age'])
# If there isn't a max-age, check for an expires header
elif 'expires' in headers:
expires = parsedate_tz(headers['expires'])
if expires is not None:
expire_time = calendar.timegm(expires) - date
freshness_lifetime = max(0, expire_time)
# determine if we are setting freshness limit in the req
if 'max-age' in cc:
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if 'min-fresh' in cc:
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
# adjust our current age by our min fresh
current_age += min_fresh
# see how fresh we actually are
fresh = (freshness_lifetime > current_age)
if fresh:
return resp
# we're not fresh. If we don't have an Etag, clear it out
if 'etag' not in headers:
self.cache.delete(cache_url)
# return the original handler
return False
def conditional_headers(self, request):
cache_url = self.cache_url(request.url)
resp = self.serializer.loads(request, self.cache.get(cache_url))
new_headers = {}
if resp:
headers = CaseInsensitiveDict(resp.headers)
if 'etag' in headers:
new_headers['If-None-Match'] = headers['ETag']
if 'last-modified' in headers:
new_headers['If-Modified-Since'] = headers['Last-Modified']
return new_headers
def cache_response(self, request, response, body=None):
"""
Algorithm for caching requests.
This assumes a requests Response object.
"""
# From httplib2: Don't cache 206's since we aren't going to
# handle byte range requests
if response.status not in [200, 203]:
return
response_headers = CaseInsensitiveDict(response.headers)
cc_req = self.parse_cache_control(request.headers)
cc = self.parse_cache_control(response_headers)
cache_url = self.cache_url(request.url)
# Delete it from the cache if we happen to have it stored there
no_store = cc.get('no-store') or cc_req.get('no-store')
if no_store and self.cache.get(cache_url):
self.cache.delete(cache_url)
# If we've been given an etag, then keep the response
if self.cache_etags and 'etag' in response_headers:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
# Add to the cache if the response headers demand it. If there
# is no date header then we can't do anything about expiring
# the cache.
elif 'date' in response_headers:
# cache when there is a max-age > 0
if cc and cc.get('max-age'):
if int(cc['max-age']) > 0:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
# If the request can expire, it means we should cache it
# in the meantime.
elif 'expires' in response_headers:
if response_headers['expires']:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
def update_cached_response(self, request, response):
"""On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response.
"""
cache_url = self.cache_url(request.url)
cached_response = self.serializer.loads(request, self.cache.get(cache_url))
if not cached_response:
# we didn't have a cached response
return response
# Lets update our headers with the headers from the new request:
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
#
# The server isn't supposed to send headers that would make
# the cached body invalid. But... just in case, we'll be sure
# to strip out ones we know that might be problmatic due to
# typical assumptions.
excluded_headers = [
"content-length",
]
cached_response.headers.update(
dict((k, v) for k, v in response.headers.items()
if k.lower() not in excluded_headers)
)
# we want a 200 b/c we have content via the cache
cached_response.status = 200
# update our cache
self.cache.set(
cache_url,
self.serializer.dumps(request, cached_response),
)
return cached_response | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.bigip_policy import Parameters
from library.bigip_policy import ModuleManager
from library.bigip_policy import SimpleManager
from library.bigip_policy import ComplexManager
from library.bigip_policy import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_policy import Parameters
from ansible.modules.network.f5.bigip_policy import ModuleManager
from ansible.modules.network.f5.bigip_policy import SimpleManager
from ansible.modules.network.f5.bigip_policy import ComplexManager
from ansible.modules.network.f5.bigip_policy import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters_none_strategy(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
user='admin'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy is None
def test_module_parameters_with_strategy_no_partition(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
strategy='foo',
user='admin',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Common/foo'
def test_module_parameters_with_strategy_partition(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
strategy='/Common/foo',
user='admin',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Common/foo'
def test_module_parameters_with_strategy_different_partition(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
strategy='/Foo/bar',
user='admin',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Foo/bar'
def test_api_parameters(self):
args = dict(
name='foo',
description='asdf asdf asdf',
strategy='/Common/asdf'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Common/asdf'
class TestSimpleTrafficPolicyManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_policy(self, *args):
set_module_args(dict(
name="Policy-Foo",
state='present',
strategy='best',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = SimpleManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=True)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True | unknown | codeparrot/codeparrot-clean | ||
# Just like ThreadedResolver, but doesn't suck
#
# The contents of this file are subject to the Python Software Foundation
# License Version 2.3 (the License). You may not copy or use this file, in
# either source code or executable form, except in compliance with the License.
# You may obtain a copy of the License at http://www.python.org/license.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# by Greg Hazel
import socket
import operator
from twisted.internet import error, defer, threads
from twisted.python import failure
class SaneThreadedResolver:
# I won't do this. Zope.interface sucks.
#implements(IResolverSimple)
def __init__(self, reactor):
self.reactor = reactor
self._runningQueries = {}
def _fail(self, name, err):
err = error.DNSLookupError("address %r not found: %s" % (name, err))
return failure.Failure(err)
def _checkTimeout(self, result, name, userDeferred):
if userDeferred in self._runningQueries:
cancelCall = self._runningQueries.pop(userDeferred)
cancelCall.cancel()
if userDeferred.called:
return
if isinstance(result, failure.Failure):
userDeferred.errback(self._fail(name, result.getErrorMessage()))
else:
userDeferred.callback(result)
def _doGetHostByName(self, name, onStart):
self.reactor.callFromThread(onStart)
return socket.gethostbyname(name)
def getHostByName(self, name, timeout = (1, 3, 11, 45)):
if timeout:
timeoutDelay = reduce(operator.add, timeout)
else:
timeoutDelay = 60
userDeferred = defer.Deferred()
def _onStart():
cancelCall = self.reactor.callLater(
timeoutDelay, self._checkTimeout,
self._fail(name, "timeout error"), name, userDeferred)
self._runningQueries[userDeferred] = cancelCall
lookupDeferred = threads.deferToThread(self._doGetHostByName, name, _onStart)
lookupDeferred.addBoth(self._checkTimeout, name, userDeferred)
return userDeferred | unknown | codeparrot/codeparrot-clean | ||
import pytest
from unittest import mock
import json
# AWX models
from awx.main.models import (
ActivityStream,
Organization,
JobTemplate,
Credential,
CredentialType,
Inventory,
InventorySource,
Project,
User
)
# other AWX
from awx.main.utils import model_to_dict, model_instance_diff
from awx.main.utils.common import get_allowed_fields
from awx.main.signals import model_serializer_mapping
# Django
from django.contrib.auth.models import AnonymousUser
# Django-CRUM
from crum import impersonate
class TestImplicitRolesOmitted:
'''
Test that there is exactly 1 "create" entry in the activity stream for
common items in the system.
These tests will fail if `rbac_activity_stream` creates
false-positive entries.
'''
@pytest.mark.django_db
def test_activity_stream_create_organization(self):
Organization.objects.create(name='test-organization2')
qs = ActivityStream.objects.filter(organization__isnull=False)
assert qs.count() == 1
assert qs[0].operation == 'create'
@pytest.mark.django_db
def test_activity_stream_delete_organization(self):
org = Organization.objects.create(name='gYSlNSOFEW')
org.delete()
qs = ActivityStream.objects.filter(changes__icontains='gYSlNSOFEW')
assert qs.count() == 2
assert qs[1].operation == 'delete'
@pytest.mark.django_db
def test_activity_stream_create_JT(self, project, inventory):
JobTemplate.objects.create(
name='test-jt',
project=project,
inventory=inventory,
)
qs = ActivityStream.objects.filter(job_template__isnull=False)
assert qs.count() == 1
assert qs[0].operation == 'create'
@pytest.mark.django_db
def test_activity_stream_create_inventory(self, organization):
organization.inventories.create(name='test-inv')
qs = ActivityStream.objects.filter(inventory__isnull=False)
assert qs.count() == 1
assert qs[0].operation == 'create'
@pytest.mark.django_db
def test_activity_stream_create_credential(self, organization):
organization.inventories.create(name='test-inv')
qs = ActivityStream.objects.filter(inventory__isnull=False)
assert qs.count() == 1
assert qs[0].operation == 'create'
@pytest.mark.django_db
class TestRolesAssociationEntries:
'''
Test that non-implicit role associations have a corresponding
activity stream entry.
These tests will fail if `rbac_activity_stream` skipping logic
in signals is wrong.
'''
def test_non_implicit_associations_are_recorded(self, project):
org2 = Organization.objects.create(name='test-organization2')
# check that duplicate adds do not get recorded in 2nd loop
for i in range(2):
# Not supported, should not be possible via API
# org2.admin_role.children.add(project.admin_role)
project.admin_role.parents.add(org2.admin_role)
assert ActivityStream.objects.filter(
role=org2.admin_role,
organization=org2,
project=project
).count() == 1, 'In loop %s' % i
def test_model_associations_are_recorded(self, organization):
proj1 = Project.objects.create(name='proj1', organization=organization)
proj2 = Project.objects.create(name='proj2', organization=organization)
proj2.use_role.parents.add(proj1.admin_role)
assert ActivityStream.objects.filter(role=proj1.admin_role, project=proj2).count() == 1
@pytest.mark.parametrize('value', [True, False])
def test_auditor_is_recorded(self, post, value):
u = User.objects.create(username='foouser')
assert not u.is_system_auditor
u.is_system_auditor = value
u = User.objects.get(pk=u.pk) # refresh from db
assert u.is_system_auditor == value
entry_qs = ActivityStream.objects.filter(user=u)
if value:
assert len(entry_qs) == 2
else:
assert len(entry_qs) == 1
# unfortunate, the original creation does _not_ set a real is_auditor field
assert 'is_system_auditor' not in json.loads(entry_qs[0].changes)
if value:
auditor_changes = json.loads(entry_qs[1].changes)
assert auditor_changes['object2'] == 'user'
assert auditor_changes['object2_pk'] == u.pk
def test_user_no_op_api(self, system_auditor):
as_ct = ActivityStream.objects.count()
system_auditor.is_system_auditor = True # already auditor
assert ActivityStream.objects.count() == as_ct
@pytest.fixture
def somecloud_type():
return CredentialType.objects.create(
kind='cloud',
name='SomeCloud',
managed_by_tower=False,
inputs={
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string',
'secret': True
}]
},
injectors={
'env': {
'MY_CLOUD_API_TOKEN': '{{api_token.foo()}}'
}
}
)
@pytest.mark.django_db
class TestCredentialModels:
'''
Assure that core elements of activity stream feature are working
'''
def test_create_credential_type(self, somecloud_type):
assert ActivityStream.objects.filter(credential_type=somecloud_type).count() == 1
entry = ActivityStream.objects.filter(credential_type=somecloud_type)[0]
assert entry.operation == 'create'
def test_credential_hidden_information(self, somecloud_type):
cred = Credential.objects.create(
credential_type=somecloud_type,
inputs = {'api_token': 'ABC123'}
)
entry = ActivityStream.objects.filter(credential=cred)[0]
assert entry.operation == 'create'
assert json.loads(entry.changes)['inputs'] == 'hidden'
@pytest.mark.django_db
class TestUserModels:
def test_user_hidden_information(self, alice):
entry = ActivityStream.objects.filter(user=alice)[0]
assert entry.operation == 'create'
assert json.loads(entry.changes)['password'] == 'hidden'
@pytest.mark.django_db
def test_missing_related_on_delete(inventory_source):
old_is = InventorySource.objects.get(name=inventory_source.name)
inventory_source.inventory.delete()
d = model_to_dict(old_is, serializer_mapping=model_serializer_mapping())
assert d['inventory'] == '<missing inventory source>-{}'.format(old_is.inventory_id)
@pytest.mark.django_db
def test_activity_stream_actor(admin_user):
with impersonate(admin_user):
o = Organization.objects.create(name='test organization')
entry = o.activitystream_set.get(operation='create')
assert entry.actor == admin_user
@pytest.mark.django_db
def test_annon_user_action():
with mock.patch('awx.main.signals.get_current_user') as u_mock:
u_mock.return_value = AnonymousUser()
inv = Inventory.objects.create(name='ainventory')
entry = inv.activitystream_set.filter(operation='create').first()
assert not entry.actor
@pytest.mark.django_db
def test_activity_stream_deleted_actor(alice, bob):
alice.first_name = 'Alice'
alice.last_name = 'Doe'
alice.save()
with impersonate(alice):
o = Organization.objects.create(name='test organization')
entry = o.activitystream_set.get(operation='create')
assert entry.actor == alice
alice.delete()
entry = o.activitystream_set.get(operation='create')
assert entry.actor is None
deleted = entry.deleted_actor
assert deleted['username'] == 'alice'
assert deleted['first_name'] == 'Alice'
assert deleted['last_name'] == 'Doe'
entry.actor = bob
entry.save(update_fields=['actor'])
deleted = entry.deleted_actor
entry = ActivityStream.objects.get(id=entry.pk)
assert entry.deleted_actor['username'] == 'bob'
@pytest.mark.django_db
def test_modified_not_allowed_field(somecloud_type):
'''
If this test fails, that means that read-only fields are showing
up in the activity stream serialization of an instance.
That _probably_ means that you just connected a new model to the
activity_stream_registrar, but did not add its serializer to
the model->serializer mapping.
'''
from awx.main.registrar import activity_stream_registrar
for Model in activity_stream_registrar.models:
assert 'modified' not in get_allowed_fields(Model(), model_serializer_mapping()), Model
@pytest.mark.django_db
def test_survey_spec_create_entry(job_template, survey_spec_factory):
start_count = job_template.activitystream_set.count()
job_template.survey_spec = survey_spec_factory('foo')
job_template.save()
assert job_template.activitystream_set.count() == start_count + 1
@pytest.mark.django_db
def test_survey_create_diff(job_template, survey_spec_factory):
old = JobTemplate.objects.get(pk=job_template.pk)
job_template.survey_spec = survey_spec_factory('foo')
before, after = model_instance_diff(old, job_template, model_serializer_mapping())['survey_spec']
assert before == '{}'
assert json.loads(after) == survey_spec_factory('foo')
@pytest.mark.django_db
def test_saved_passwords_hidden_activity(workflow_job_template, job_template_with_survey_passwords):
node_with_passwords = workflow_job_template.workflow_nodes.create(
unified_job_template=job_template_with_survey_passwords,
extra_data={'bbbb': '$encrypted$fooooo'},
survey_passwords={'bbbb': '$encrypted$'}
)
node_with_passwords.delete()
entry = ActivityStream.objects.order_by('timestamp').last()
changes = json.loads(entry.changes)
assert 'survey_passwords' not in changes
assert json.loads(changes['extra_data'])['bbbb'] == '$encrypted$'
@pytest.mark.django_db
def test_cluster_node_recorded(inventory, project):
jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project)
with mock.patch('awx.main.models.activity_stream.settings.CLUSTER_HOST_ID', 'foo_host'):
job = jt.create_unified_job()
entry = ActivityStream.objects.filter(job=job).first()
assert entry.action_node == 'foo_host'
@pytest.mark.django_db
def test_cluster_node_long_node_name(inventory, project):
jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project)
with mock.patch('awx.main.models.activity_stream.settings.CLUSTER_HOST_ID', 'f' * 700):
job = jt.create_unified_job()
# node name is very long, we just want to make sure it does not error
entry = ActivityStream.objects.filter(job=job).first()
assert entry.action_node.startswith('ffffff')
@pytest.mark.django_db
def test_credential_defaults_idempotency():
CredentialType.setup_tower_managed_defaults()
old_inputs = CredentialType.objects.get(name='Ansible Tower', kind='cloud').inputs
prior_count = ActivityStream.objects.count()
# this is commonly re-ran in migrations, and no changes should be shown
# because inputs and injectors are not actually tracked in the database
CredentialType.setup_tower_managed_defaults()
assert CredentialType.objects.get(name='Ansible Tower', kind='cloud').inputs == old_inputs
assert ActivityStream.objects.count() == prior_count | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_SPARSE_REORDER_OP_H_
#define TENSORFLOW_CORE_KERNELS_SPARSE_REORDER_OP_H_
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
namespace functor {
template <typename Device, typename T>
struct SparseReorderFunctor {
void operator()(OpKernelContext* context, const Tensor& input_ind,
const Tensor& input_val, const Tensor& input_shape_in);
};
} // namespace functor
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_SPARSE_REORDER_OP_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/kernels/sparse_reorder_op.h |
# -*- coding: utf-8 -*-
from xhtml2pdf.context import pisaContext
from xhtml2pdf.default import DEFAULT_CSS
from xhtml2pdf.parser import pisaParser
from reportlab.platypus.flowables import Spacer
from reportlab.platypus.frames import Frame
from xhtml2pdf.xhtml2pdf_reportlab import PmlBaseDoc, PmlPageTemplate
from xhtml2pdf.util import pisaTempFile, getBox, pyPdf
import cgi
import logging
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
log = logging.getLogger("xhtml2pdf")
def pisaErrorDocument(dest, c):
out = pisaTempFile(capacity=c.capacity)
out.write("<p style='background-color:red;'><strong>%d error(s) occured:</strong><p>" % c.err)
for mode, line, msg, _ in c.log:
if mode == "error":
out.write("<pre>%s in line %d: %s</pre>" % (mode, line, cgi.escape(msg)))
out.write("<p><strong>%d warning(s) occured:</strong><p>" % c.warn)
for mode, line, msg, _ in c.log:
if mode == "warning":
out.write("<p>%s in line %d: %s</p>" % (mode, line, cgi.escape(msg)))
return pisaDocument(out.getvalue(), dest, raise_exception=False)
def pisaStory(src, path=None, link_callback=None, debug=0, default_css=None,
xhtml=False, encoding=None, context=None, xml_output=None,
**kw):
# Prepare Context
if not context:
context = pisaContext(path, debug=debug)
context.pathCallback = link_callback
# Use a default set of CSS definitions to get an expected output
if default_css is None:
default_css = DEFAULT_CSS
# Parse and fill the story
pisaParser(src, context, default_css, xhtml, encoding, xml_output)
# Avoid empty documents
if not context.story:
context.story = [Spacer(1, 1)]
if context.indexing_story:
context.story.append(context.indexing_story)
# Remove anchors if they do not exist (because of a bug in Reportlab)
for frag, anchor in context.anchorFrag:
if anchor not in context.anchorName:
frag.link = None
return context
def pisaDocument(src, dest=None, path=None, link_callback=None, debug=0,
default_css=None, xhtml=False, encoding=None, xml_output=None,
raise_exception=True, capacity=100 * 1024, **kw):
log.debug("pisaDocument options:\n src = %r\n dest = %r\n path = %r\n link_callback = %r\n xhtml = %r",
src,
dest,
path,
link_callback,
xhtml)
# Prepare simple context
context = pisaContext(path, debug=debug, capacity=capacity)
context.pathCallback = link_callback
# Build story
context = pisaStory(src, path, link_callback, debug, default_css, xhtml,
encoding, context=context, xml_output=xml_output)
# Buffer PDF into memory
out = pisaTempFile(capacity=context.capacity)
doc = PmlBaseDoc(
out,
pagesize=context.pageSize,
author=context.meta["author"].strip(),
subject=context.meta["subject"].strip(),
keywords=[x.strip() for x in
context.meta["keywords"].strip().split(",") if x],
title=context.meta["title"].strip(),
showBoundary=0,
allowSplitting=1)
# Prepare templates and their frames
if "body" in context.templateList:
body = context.templateList["body"]
del context.templateList["body"]
else:
x, y, w, h = getBox("1cm 1cm -1cm -1cm", context.pageSize)
body = PmlPageTemplate(
id="body",
frames=[
Frame(x, y, w, h,
id="body",
leftPadding=0,
rightPadding=0,
bottomPadding=0,
topPadding=0)],
pagesize=context.pageSize)
doc.addPageTemplates([body] + context.templateList.values())
# Use multibuild e.g. if a TOC has to be created
if context.multiBuild:
doc.multiBuild(context.story)
else:
doc.build(context.story)
# Add watermarks
if pyPdf:
for bgouter in context.pisaBackgroundList:
# If we have at least one background, then lets do it
if bgouter:
istream = out
output = pyPdf.PdfFileWriter()
input1 = pyPdf.PdfFileReader(istream)
ctr = 0
# TODO: Why do we loop over the same list again?
# see bgouter at line 137
for bg in context.pisaBackgroundList:
page = input1.getPage(ctr)
if (bg and not bg.notFound()
and (bg.mimetype == "application/pdf")):
bginput = pyPdf.PdfFileReader(bg.getFile())
pagebg = bginput.getPage(0)
pagebg.mergePage(page)
page = pagebg
else:
log.warn(context.warning(
"Background PDF %s doesn't exist.", bg))
output.addPage(page)
ctr += 1
out = pisaTempFile(capacity=context.capacity)
output.write(out)
# data = sout.getvalue()
# Found a background? So leave loop after first occurence
break
else:
log.warn(context.warning("pyPDF not installed!"))
# Get the resulting PDF and write it to the file object
# passed from the caller
if dest is None:
# No output file was passed - Let's use a pisaTempFile
dest = pisaTempFile(capacity=context.capacity)
context.dest = dest
data = out.getvalue() # TODO: That load all the tempfile in RAM - Why bother with a swapping tempfile then?
context.dest.write(data) # TODO: context.dest is a tempfile as well...
return context | unknown | codeparrot/codeparrot-clean | ||
#### Note: this error code is no longer emitted by the compiler.
More than one function was declared with the `#[main]` attribute.
Erroneous code example:
```compile_fail
#![feature(main)]
#[main]
fn foo() {}
#[main]
fn f() {} // error: multiple functions with a `#[main]` attribute
```
This error indicates that the compiler found multiple functions with the
`#[main]` attribute. This is an error because there must be a unique entry
point into a Rust program. Example:
```compile_fail
#![feature(main)]
#[main]
fn f() {} // ok!
``` | unknown | github | https://github.com/rust-lang/rust | compiler/rustc_error_codes/src/error_codes/E0137.md |
#!/usr/bin/env python
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
import logging as _logging
from lib.utility import misc
from lib.aws.webservice import AWSQueryClient, AWSSignature
from lib.aws.exception import AccessDeniedException, AwsErrorCode, AwsServiceException, \
MissingParameterException, InsufficientPrivilegesException, \
InvalidParameterValueException, OptInRequiredException
from lib.iam.exception import IamErrorCode, IamEntityAlreadyExistsException,\
IamNoSuchEntityException, IamMalformedPolicyDocumentException, IamLimitExceededException
from lib.iam.request import Request, Response
from lib.iam.model import InstanceProfile, Role
from scli.constants import IamEndpoint, IamRegion
log = _logging.getLogger('aws')
class IamClient(object):
'''
Web service client for IAM
'''
_signature_version = AWSSignature.SigV4
_api_version = '2010-05-08'
_service_name = 'iam'
def __init__(self, accessKey, secretKey, result_format = 'json'):
'''
Constructor
'''
self._accessKey = accessKey
self._secretKey = secretKey
self._endpoint = IamEndpoint
self._format = result_format
self._region = IamRegion
self._client = AWSQueryClient(self._accessKey, self._secretKey,
self._endpoint, self._region,
self._service_name, self._format,
self._signature_version, self._api_version)
def call(self, request):
'''Make API call and translate AWSServiceException to more specific exception'''
try:
log.debug(request)
return_msg = self._client.call(request, self._format)
log.debug('Request ID: {0}'.format(list(return_msg.json().values())[0]\
['ResponseMetadata']['RequestId']))
return return_msg.json()
except AwsServiceException as ex:
log.debug(misc.to_unicode(ex))
# Translate general IAM exception
if misc.string_equal_ignore_case(ex.code, AwsErrorCode.AccessDenied):
raise AccessDeniedException(ex)
elif misc.string_equal_ignore_case(ex.code, AwsErrorCode.OptInRequired):
raise OptInRequiredException(ex)
elif misc.string_equal_ignore_case(ex.code, AwsErrorCode.InsufficientPrivileges):
raise InsufficientPrivilegesException(ex)
elif misc.string_equal_ignore_case(ex.code, AwsErrorCode.InvalidParameterValue):
raise InvalidParameterValueException(ex)
elif misc.string_equal_ignore_case(ex.code, AwsErrorCode.MissingParameter):
raise MissingParameterException(ex)
elif misc.string_equal_ignore_case(ex.code, IamErrorCode.EntityAlreadyExists):
raise IamEntityAlreadyExistsException(ex)
elif misc.string_equal_ignore_case(ex.code, IamErrorCode.NoSuchEntity):
raise IamNoSuchEntityException(ex)
elif misc.string_equal_ignore_case(ex.code, IamErrorCode.MalformedPolicyDocument):
raise IamMalformedPolicyDocumentException(ex)
elif misc.string_equal_ignore_case(ex.code, IamErrorCode.LimitExceeded):
raise IamLimitExceededException(ex)
raise
#---------------------------------------
# service calls
def create_role(self, role_name, assume_role_policy_document, path = None):
request = Request()
request.set_action('CreateRole')
request.set_role_name(role_name)
request.set_assume_role_policy_document(assume_role_policy_document)
if path is not None:
request.set_path(path)
try:
response = self.call(request)
except AwsServiceException:
raise
role = Role.from_json(response['CreateRoleResponse']['CreateRoleResult']['Role'])
request_id = response['CreateRoleResponse']['ResponseMetadata']['RequestId']
return Response(request_id, role)
def create_instance_profile(self, instance_profile_name, path = None):
request = Request()
request.set_action('CreateInstanceProfile')
request.set_instance_profile_name(instance_profile_name)
if path is not None:
request.set_path(path)
try:
response = self.call(request)
except AwsServiceException:
raise
profile = InstanceProfile.from_json(response['CreateInstanceProfileResponse']\
['CreateInstanceProfileResult']['InstanceProfile'])
request_id = response['CreateInstanceProfileResponse']\
['ResponseMetadata']['RequestId']
return Response(request_id, profile)
def add_role_to_instance_profile(self, role_name, instance_profile_name):
request = Request()
request.set_action('AddRoleToInstanceProfile')
request.set_role_name(role_name)
request.set_instance_profile_name(instance_profile_name)
try:
response = self.call(request)
except AwsServiceException:
raise
request_id = response['AddRoleToInstanceProfileResponse']\
['ResponseMetadata']['RequestId']
return Response(request_id)
def put_role_policy(self, role_name, policy_name, policy_document):
request = Request()
request.set_action('PutRolePolicy')
request.set_role_name(role_name)
request.set_policy_name(policy_name)
request.set_policy_document(policy_document)
try:
response = self.call(request)
except AwsServiceException:
raise
request_id = response['PutRolePolicyResponse']\
['ResponseMetadata']['RequestId']
return Response(request_id)
def list_instance_profiles(self, max_items = None, path_prefix = None, marker = None):
request = Request()
request.set_action('ListInstanceProfiles')
if max_items is not None:
request.set_max_items(max_items)
if path_prefix is not None:
request.set_path_prefix(path_prefix)
if marker is not None:
request.set_marker(marker)
try:
response = self.call(request)
except AwsServiceException:
raise
results = response['ListInstanceProfilesResponse']\
['ListInstanceProfilesResult']['InstanceProfiles']
request_id = response['ListInstanceProfilesResponse']\
['ResponseMetadata']['RequestId']
profiles = []
for result in results:
profiles.append(InstanceProfile.from_json(result))
return Response(request_id, profiles) | unknown | codeparrot/codeparrot-clean | ||
package client
import (
"context"
"encoding/json"
"errors"
"net/url"
"github.com/distribution/reference"
"github.com/moby/moby/api/types/container"
)
// ContainerCommitOptions holds parameters to commit changes into a container.
type ContainerCommitOptions struct {
Reference string
Comment string
Author string
Changes []string
NoPause bool // NoPause disables pausing the container during commit.
Config *container.Config
}
// ContainerCommitResult is the result from committing a container.
type ContainerCommitResult struct {
ID string
}
// ContainerCommit applies changes to a container and creates a new tagged image.
func (cli *Client) ContainerCommit(ctx context.Context, containerID string, options ContainerCommitOptions) (ContainerCommitResult, error) {
containerID, err := trimID("container", containerID)
if err != nil {
return ContainerCommitResult{}, err
}
var repository, tag string
if options.Reference != "" {
ref, err := reference.ParseNormalizedNamed(options.Reference)
if err != nil {
return ContainerCommitResult{}, err
}
if _, ok := ref.(reference.Digested); ok {
return ContainerCommitResult{}, errors.New("refusing to create a tag with a digest reference")
}
ref = reference.TagNameOnly(ref)
if tagged, ok := ref.(reference.Tagged); ok {
tag = tagged.Tag()
}
repository = ref.Name()
}
query := url.Values{}
query.Set("container", containerID)
query.Set("repo", repository)
query.Set("tag", tag)
query.Set("comment", options.Comment)
query.Set("author", options.Author)
for _, change := range options.Changes {
query.Add("changes", change)
}
if options.NoPause {
query.Set("pause", "0")
}
var response container.CommitResponse
resp, err := cli.post(ctx, "/commit", query, options.Config, nil)
defer ensureReaderClosed(resp)
if err != nil {
return ContainerCommitResult{}, err
}
err = json.NewDecoder(resp.Body).Decode(&response)
return ContainerCommitResult{ID: response.ID}, err
} | go | github | https://github.com/moby/moby | client/container_commit.go |
#!/usr/bin/python
#
#
#
#
# Usage: $0 SERVER PROXY:PORT [SYSTEMID] [PROXY_USER] [PROXY_PASS]
import sys
import httplib
sys.path.append('..')
from rhn.rpclib import Server
SERVER = "xmlrpc.rhn.redhat.com"
HANDLER = "/XMLRPC"
PROXY = "proxy.example.com:8080"
PROXY_USERNAME = None
PROXY_PASSWORD = None
system_id_file = '/etc/sysconfig/rhn/systemid'
if len(sys.argv) < 3:
print "Non efficient cmd-line arguments! Provide at least server & proxy!"
sys.exit(1);
try:
SERVER = sys.argv[1];
PROXY = sys.argv[2];
system_id_file = sys.argv[3]
PROXY_USERNAME = sys.argv[4];
PROXY_PASSWORD = sys.argv[5];
except:
pass
def get_test_server_proxy_http():
global SERVER, HANDLER, PROXY
return Server("http://%s%s" % (SERVER, HANDLER), proxy=PROXY,
username=PROXY_USERNAME, password=PROXY_PASSWORD)
def get_test_server_proxy_https():
global SERVER, HANDLER, PROXY
return Server("https://%s%s" % (SERVER, HANDLER), proxy=PROXY,
username=PROXY_USERNAME, password=PROXY_PASSWORD)
if __name__ == '__main__':
systemid = open(system_id_file).read()
tests = [
get_test_server_proxy_http,
get_test_server_proxy_https,
]
for gs in tests:
s = gs()
print s.up2date.login(systemid) | unknown | codeparrot/codeparrot-clean | ||
// This file is part of ICU4X. For terms of use, please see the file
// called LICENSE at the top level of the ICU4X source tree
// (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ).
mod fixtures;
mod helpers;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use icu_locale_core::Locale;
fn locale_benches(c: &mut Criterion) {
let data = serde_json::from_str::<fixtures::LocaleList>(include_str!("fixtures/locale.json"))
.expect("Failed to read a fixture");
// Overview
{
let mut group = c.benchmark_group("locale");
overview!(group, Locale, &data.canonicalized, "en-US");
group.finish();
}
{
use criterion::BenchmarkId;
// Construct
{
let mut group = c.benchmark_group("locale/construct");
construct!(group, Locale, "locale", &data.canonicalized);
group.finish();
}
// Stringify
{
let mut group = c.benchmark_group("locale/to_string");
let locales: Vec<Locale> = data
.canonicalized
.iter()
.map(|s| s.parse().unwrap())
.collect();
to_string!(group, Locale, "locale", &locales);
group.finish();
}
// Compare
{
let mut group = c.benchmark_group("locale/compare");
let locales: Vec<Locale> = data
.canonicalized
.iter()
.map(|s| s.parse().unwrap())
.collect();
let locales2: Vec<Locale> = data
.canonicalized
.iter()
.map(|s| s.parse().unwrap())
.collect();
compare_struct!(group, Locale, "locale", &locales, &locales2);
compare_str!(group, Locale, "locale", &locales, &data.canonicalized);
group.finish();
}
// Canonicalize
{
let mut group = c.benchmark_group("locale/canonicalize");
canonicalize!(group, Locale, "locale", &data.casing);
group.finish();
}
}
}
criterion_group!(benches, locale_benches,);
criterion_main!(benches); | rust | github | https://github.com/nodejs/node | deps/crates/vendor/icu_locale_core/benches/locale.rs |
#! /usr/bin/env python
from __future__ import absolute_import
import json
import re
import os
import logging
from validator import Validator
from validator.utils import find_file
PAT = re.compile('\s+-->\s(?P<fname>.*?):(?P<lnum>\d+):(?P<col>\d+)')
logger = logging.getLogger('validator')
class Cargo(Validator):
__filetype__ = 'rust'
instant = False
checker = 'cargo'
args = 'check --color never'
def parse_loclist(self, loclist, bufnr):
logger.info('parse input = %s', [self, loclist, bufnr])
lists = []
j = 0
cwd = self.cwd
for i, l in enumerate(loclist):
r = PAT.match(l)
if r:
j += 1
msg = r.groupdict()
fname = msg.get('fname')
if not fname:
continue
path = os.path.join(cwd, fname)
if path != self.filename:
continue
text = loclist[i - 1] if i > 0 else ''
ty = 'E' if text.startswith('error') else 'W'
loc = self.compose_loc(j, bufnr, ty, text)
loc.update(msg)
lists.append(loc)
logger.info(lists)
return json.dumps(lists)
def cmd(self, fname):
return '{} {}'.format(self.binary, self.args)
@property
def cwd(self):
path = find_file('Cargo.toml')
return os.path.dirname(path) if path else os.getcwd() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2000-2004 Michael Hudson-Doyle <micahel@gmail.com>
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import annotations
import _colorize
from abc import ABC, abstractmethod
import ast
import code
import linecache
from dataclasses import dataclass, field
import os.path
import re
import sys
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import IO
from typing import Callable
@dataclass
class Event:
evt: str
data: str
raw: bytes = b""
@dataclass
class Console(ABC):
posxy: tuple[int, int]
screen: list[str] = field(default_factory=list)
height: int = 25
width: int = 80
def __init__(
self,
f_in: IO[bytes] | int = 0,
f_out: IO[bytes] | int = 1,
term: str = "",
encoding: str = "",
):
self.encoding = encoding or sys.getdefaultencoding()
if isinstance(f_in, int):
self.input_fd = f_in
else:
self.input_fd = f_in.fileno()
if isinstance(f_out, int):
self.output_fd = f_out
else:
self.output_fd = f_out.fileno()
@abstractmethod
def refresh(self, screen: list[str], xy: tuple[int, int]) -> None: ...
@abstractmethod
def prepare(self) -> None: ...
@abstractmethod
def restore(self) -> None: ...
@abstractmethod
def move_cursor(self, x: int, y: int) -> None: ...
@abstractmethod
def set_cursor_vis(self, visible: bool) -> None: ...
@abstractmethod
def getheightwidth(self) -> tuple[int, int]:
"""Return (height, width) where height and width are the height
and width of the terminal window in characters."""
...
@abstractmethod
def get_event(self, block: bool = True) -> Event | None:
"""Return an Event instance. Returns None if |block| is false
and there is no event pending, otherwise waits for the
completion of an event."""
...
@abstractmethod
def push_char(self, char: int | bytes) -> None:
"""
Push a character to the console event queue.
"""
...
@abstractmethod
def beep(self) -> None: ...
@abstractmethod
def clear(self) -> None:
"""Wipe the screen"""
...
@abstractmethod
def finish(self) -> None:
"""Move the cursor to the end of the display and otherwise get
ready for end. XXX could be merged with restore? Hmm."""
...
@abstractmethod
def flushoutput(self) -> None:
"""Flush all output to the screen (assuming there's some
buffering going on somewhere)."""
...
@abstractmethod
def forgetinput(self) -> None:
"""Forget all pending, but not yet processed input."""
...
@abstractmethod
def getpending(self) -> Event:
"""Return the characters that have been typed but not yet
processed."""
...
@abstractmethod
def wait(self, timeout: float | None) -> bool:
"""Wait for an event. The return value is True if an event is
available, False if the timeout has been reached. If timeout is
None, wait forever. The timeout is in milliseconds."""
...
@property
def input_hook(self) -> Callable[[], int] | None:
"""Returns the current input hook."""
...
@abstractmethod
def repaint(self) -> None: ...
class InteractiveColoredConsole(code.InteractiveConsole):
STATEMENT_FAILED = object()
def __init__(
self,
locals: dict[str, object] | None = None,
filename: str = "<console>",
*,
local_exit: bool = False,
) -> None:
super().__init__(locals=locals, filename=filename, local_exit=local_exit)
self.can_colorize = _colorize.can_colorize()
def showsyntaxerror(self, filename=None, **kwargs):
super().showsyntaxerror(filename=filename, **kwargs)
def _excepthook(self, typ, value, tb):
import traceback
lines = traceback.format_exception(
typ, value, tb,
colorize=self.can_colorize,
limit=traceback.BUILTIN_EXCEPTION_LIMIT)
self.write(''.join(lines))
def runcode(self, code):
try:
exec(code, self.locals)
except SystemExit:
raise
except BaseException:
self.showtraceback()
return self.STATEMENT_FAILED
return None
def runsource(self, source, filename="<input>", symbol="single"):
try:
tree = self.compile.compiler(
source,
filename,
"exec",
ast.PyCF_ONLY_AST,
incomplete_input=False,
)
except SyntaxError as e:
# If it looks like pip install was entered (a common beginner
# mistake), provide a hint to use the system command prompt.
if re.match(r"^\s*(pip3?|py(thon3?)? -m pip) install.*", source):
e.add_note(
"The Python package manager (pip) can only be used"
" outside of the Python REPL.\n"
"Try the 'pip' command in a separate terminal or"
" command prompt."
)
self.showsyntaxerror(filename, source=source)
return False
except (OverflowError, ValueError):
self.showsyntaxerror(filename, source=source)
return False
if tree.body:
*_, last_stmt = tree.body
for stmt in tree.body:
wrapper = ast.Interactive if stmt is last_stmt else ast.Module
the_symbol = symbol if stmt is last_stmt else "exec"
item = wrapper([stmt])
try:
code = self.compile.compiler(item, filename, the_symbol)
linecache._register_code(code, source, filename)
except SyntaxError as e:
if e.args[0] == "'await' outside function":
python = os.path.basename(sys.executable)
e.add_note(
f"Try the asyncio REPL ({python} -m asyncio) to use"
f" top-level 'await' and run background asyncio tasks."
)
self.showsyntaxerror(filename, source=source)
return False
except (OverflowError, ValueError):
self.showsyntaxerror(filename, source=source)
return False
if code is None:
return True
result = self.runcode(code)
if result is self.STATEMENT_FAILED:
break
return False | python | github | https://github.com/python/cpython | Lib/_pyrepl/console.py |
number_string = """08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48"""
# convert the big block number string into a two dimensional array of integers
# This list comprehension parses the rows first and then each column, which means that we will
# end up with matrix[y][x] instead of matrix[x][y] which would have been more intuitive
int_matrix = [[int(number_string) for number_string in row_string.split(" ")] for row_string in number_string.split("\n")]
def get_cell(x, y):
if (0 <= x <= 19 and 0 <= y <= 19):
# reversed coordinate axis (use y,x instead of x,y) due to parsing
return int_matrix[y][x]
else:
# hack to make sure products involving this cell value will be zero
# wow this is sooo ugly :-(
return 0
def check_vertical(x, y):
return get_cell(x,y) * get_cell(x,y+1) * get_cell(x,y+2) * get_cell(x,y+3)
def check_horizontal(x, y):
return get_cell(x,y) * get_cell(x+1,y) * get_cell(x+2,y) * get_cell(x+3,y)
# south west (sw) to north east (ne)
def check_nw_se_diagonal(x, y):
return get_cell(x,y) * get_cell(x+1,y+1) * get_cell(x+2,y+2) * get_cell(x+3,y+3)
# north east (ne) to south west (sw)
def check_ne_sw_diagonal(x, y):
return get_cell(x,y) * get_cell(x-1,y+1) * get_cell(x-2,y+2) * get_cell(x-3,y+3)
def get_highest_cell_product(x, y):
return max(check_vertical(x, y), check_horizontal(x, y), check_nw_se_diagonal(x, y), check_ne_sw_diagonal(x, y))
for y in xrange(0,20):
for x in xrange(0,20):
print str(get_cell(x,y)).zfill(2),
print ""
greatest_cell_product = 0
for y in xrange(0,20):
for x in xrange(0,20):
cell_product = get_highest_cell_product(x, y)
if (cell_product > greatest_cell_product):
greatest_cell_product = cell_product
print "greatest_product==", greatest_cell_product | unknown | codeparrot/codeparrot-clean | ||
use crate::internals::ast::{Container, Data, Field, Style, Variant};
use crate::private;
use proc_macro2::TokenStream;
use quote::{format_ident, quote};
// Suppress dead_code warnings that would otherwise appear when using a remote
// derive. Other than this pretend code, a struct annotated with remote derive
// never has its fields referenced and an enum annotated with remote derive
// never has its variants constructed.
//
// warning: field is never used: `i`
// --> src/main.rs:4:20
// |
// 4 | struct StructDef { i: i32 }
// | ^^^^^^
//
// warning: variant is never constructed: `V`
// --> src/main.rs:8:16
// |
// 8 | enum EnumDef { V }
// | ^
//
pub fn pretend_used(cont: &Container, is_packed: bool) -> TokenStream {
let pretend_fields = pretend_fields_used(cont, is_packed);
let pretend_variants = pretend_variants_used(cont);
quote! {
#pretend_fields
#pretend_variants
}
}
// For structs with named fields, expands to:
//
// match None::<&T> {
// Some(T { a: __v0, b: __v1 }) => {}
// _ => {}
// }
//
// For packed structs on sufficiently new rustc, expands to:
//
// match None::<&T> {
// Some(__v @ T { a: _, b: _ }) => {
// let _ = addr_of!(__v.a);
// let _ = addr_of!(__v.b);
// }
// _ => {}
// }
//
// For packed structs on older rustc, we assume Sized and !Drop, and expand to:
//
// match None::<T> {
// Some(T { a: __v0, b: __v1 }) => {}
// _ => {}
// }
//
// For enums, expands to the following but only including struct variants:
//
// match None::<&T> {
// Some(T::A { a: __v0 }) => {}
// Some(T::B { b: __v0 }) => {}
// _ => {}
// }
//
fn pretend_fields_used(cont: &Container, is_packed: bool) -> TokenStream {
match &cont.data {
Data::Enum(variants) => pretend_fields_used_enum(cont, variants),
Data::Struct(Style::Struct | Style::Tuple | Style::Newtype, fields) => {
if is_packed {
pretend_fields_used_struct_packed(cont, fields)
} else {
pretend_fields_used_struct(cont, fields)
}
}
Data::Struct(Style::Unit, _) => quote!(),
}
}
fn pretend_fields_used_struct(cont: &Container, fields: &[Field]) -> TokenStream {
let type_ident = &cont.ident;
let (_, ty_generics, _) = cont.generics.split_for_impl();
let members = fields.iter().map(|field| &field.member);
let placeholders = (0usize..).map(|i| format_ident!("__v{}", i));
quote! {
match _serde::#private::None::<&#type_ident #ty_generics> {
_serde::#private::Some(#type_ident { #(#members: #placeholders),* }) => {}
_ => {}
}
}
}
fn pretend_fields_used_struct_packed(cont: &Container, fields: &[Field]) -> TokenStream {
let type_ident = &cont.ident;
let (_, ty_generics, _) = cont.generics.split_for_impl();
let members = fields.iter().map(|field| &field.member).collect::<Vec<_>>();
let private2 = private;
quote! {
match _serde::#private::None::<&#type_ident #ty_generics> {
_serde::#private::Some(__v @ #type_ident { #(#members: _),* }) => {
#(
let _ = _serde::#private2::ptr::addr_of!(__v.#members);
)*
}
_ => {}
}
}
}
fn pretend_fields_used_enum(cont: &Container, variants: &[Variant]) -> TokenStream {
let type_ident = &cont.ident;
let (_, ty_generics, _) = cont.generics.split_for_impl();
let mut patterns = Vec::new();
for variant in variants {
match variant.style {
Style::Struct | Style::Tuple | Style::Newtype => {
let variant_ident = &variant.ident;
let members = variant.fields.iter().map(|field| &field.member);
let placeholders = (0usize..).map(|i| format_ident!("__v{}", i));
patterns.push(quote!(#type_ident::#variant_ident { #(#members: #placeholders),* }));
}
Style::Unit => {}
}
}
let private2 = private;
quote! {
match _serde::#private::None::<&#type_ident #ty_generics> {
#(
_serde::#private2::Some(#patterns) => {}
)*
_ => {}
}
}
}
// Expands to one of these per enum variant:
//
// match None {
// Some((__v0, __v1,)) => {
// let _ = E::V { a: __v0, b: __v1 };
// }
// _ => {}
// }
//
fn pretend_variants_used(cont: &Container) -> TokenStream {
let variants = match &cont.data {
Data::Enum(variants) => variants,
Data::Struct(_, _) => {
return quote!();
}
};
let type_ident = &cont.ident;
let (_, ty_generics, _) = cont.generics.split_for_impl();
let turbofish = ty_generics.as_turbofish();
let cases = variants.iter().map(|variant| {
let variant_ident = &variant.ident;
let placeholders = &(0..variant.fields.len())
.map(|i| format_ident!("__v{}", i))
.collect::<Vec<_>>();
let pat = match variant.style {
Style::Struct => {
let members = variant.fields.iter().map(|field| &field.member);
quote!({ #(#members: #placeholders),* })
}
Style::Tuple | Style::Newtype => quote!(( #(#placeholders),* )),
Style::Unit => quote!(),
};
quote! {
match _serde::#private::None {
_serde::#private::Some((#(#placeholders,)*)) => {
let _ = #type_ident::#variant_ident #turbofish #pat;
}
_ => {}
}
}
});
quote!(#(#cases)*)
} | rust | github | https://github.com/serde-rs/serde | serde_derive/src/pretend.rs |
# SPDX-License-Identifier: GPL-2.0-only
%YAML 1.2
---
$id: http://devicetree.org/schemas/crypto/qcom,prng.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Pseudo Random Number Generator
maintainers:
- Vinod Koul <vkoul@kernel.org>
properties:
compatible:
oneOf:
- enum:
- qcom,prng # 8916 etc.
- qcom,prng-ee # 8996 and later using EE
- items:
- enum:
- qcom,ipq5332-trng
- qcom,ipq5424-trng
- qcom,ipq9574-trng
- qcom,kaanapali-trng
- qcom,milos-trng
- qcom,qcs615-trng
- qcom,qcs8300-trng
- qcom,sa8255p-trng
- qcom,sa8775p-trng
- qcom,sc7280-trng
- qcom,sm8450-trng
- qcom,sm8550-trng
- qcom,sm8650-trng
- qcom,sm8750-trng
- qcom,x1e80100-trng
- const: qcom,trng
reg:
maxItems: 1
clocks:
maxItems: 1
clock-names:
items:
- const: core
required:
- compatible
- reg
allOf:
- if:
not:
properties:
compatible:
contains:
const: qcom,trng
then:
required:
- clocks
- clock-names
additionalProperties: false
examples:
- |
rng@f9bff000 {
compatible = "qcom,prng";
reg = <0xf9bff000 0x200>;
clocks = <&clk 125>;
clock-names = "core";
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/crypto/qcom,prng.yaml |
from __future__ import division, absolute_import, print_function
from numpy.testing import *
import numpy as np
class TestFinancial(TestCase):
def test_rate(self):
assert_almost_equal(np.rate(10, 0, -3500, 10000),
0.1107, 4)
def test_irr(self):
v = [-150000, 15000, 25000, 35000, 45000, 60000]
assert_almost_equal(np.irr(v),
0.0524, 2)
v = [-100, 0, 0, 74]
assert_almost_equal(np.irr(v),
-0.0955, 2)
v = [-100, 39, 59, 55, 20]
assert_almost_equal(np.irr(v),
0.28095, 2)
v = [-100, 100, 0, -7]
assert_almost_equal(np.irr(v),
-0.0833, 2)
v = [-100, 100, 0, 7]
assert_almost_equal(np.irr(v),
0.06206, 2)
v = [-5, 10.5, 1, -8, 1]
assert_almost_equal(np.irr(v),
0.0886, 2)
def test_pv(self):
assert_almost_equal(np.pv(0.07, 20, 12000, 0),
-127128.17, 2)
def test_fv(self):
assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0),
86609.36, 2)
def test_pmt(self):
assert_almost_equal(np.pmt(0.08/12, 5*12, 15000),
-304.146, 3)
def test_ppmt(self):
np.round(np.ppmt(0.1/12, 1, 60, 55000), 2) == 710.25
def test_ipmt(self):
np.round(np.ipmt(0.1/12, 1, 24, 2000), 2) == 16.67
def test_nper(self):
assert_almost_equal(np.nper(0.075, -2000, 0, 100000.),
21.54, 2)
def test_nper2(self):
assert_almost_equal(np.nper(0.0, -2000, 0, 100000.),
50.0, 1)
def test_npv(self):
assert_almost_equal(
np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]),
122.89, 2)
def test_mirr(self):
val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000]
assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4)
val = [-120000, 39000, 30000, 21000, 37000, 46000]
assert_almost_equal(np.mirr(val, 0.10, 0.12), 0.126094, 6)
val = [100, 200, -50, 300, -200]
assert_almost_equal(np.mirr(val, 0.05, 0.06), 0.3428, 4)
val = [39000, 30000, 21000, 37000, 46000]
assert_(np.isnan(np.mirr(val, 0.10, 0.12)))
def test_when(self):
#begin
assert_almost_equal(np.rate(10, 20, -3500, 10000, 1),
np.rate(10, 20, -3500, 10000, 'begin'), 4)
#end
assert_almost_equal(np.rate(10, 20, -3500, 10000),
np.rate(10, 20, -3500, 10000, 'end'), 4)
assert_almost_equal(np.rate(10, 20, -3500, 10000, 0),
np.rate(10, 20, -3500, 10000, 'end'), 4)
# begin
assert_almost_equal(np.pv(0.07, 20, 12000, 0, 1),
np.pv(0.07, 20, 12000, 0, 'begin'), 2)
# end
assert_almost_equal(np.pv(0.07, 20, 12000, 0),
np.pv(0.07, 20, 12000, 0, 'end'), 2)
assert_almost_equal(np.pv(0.07, 20, 12000, 0, 0),
np.pv(0.07, 20, 12000, 0, 'end'), 2)
# begin
assert_almost_equal(np.fv(0.075, 20, -2000, 0, 1),
np.fv(0.075, 20, -2000, 0, 'begin'), 4)
# end
assert_almost_equal(np.fv(0.075, 20, -2000, 0),
np.fv(0.075, 20, -2000, 0, 'end'), 4)
assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0),
np.fv(0.075, 20, -2000, 0, 'end'), 4)
# begin
assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 1),
np.pmt(0.08/12, 5*12, 15000., 0, 'begin'), 4)
# end
assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0),
np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4)
assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 0),
np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4)
# begin
assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 1),
np.ppmt(0.1/12, 1, 60, 55000, 0, 'begin'), 4)
# end
assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0),
np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4)
assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 0),
np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4)
# begin
assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 1),
np.ipmt(0.1/12, 1, 24, 2000, 0, 'begin'), 4)
# end
assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0),
np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4)
assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 0),
np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4)
# begin
assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 1),
np.nper(0.075, -2000, 0, 100000., 'begin'), 4)
# end
assert_almost_equal(np.nper(0.075, -2000, 0, 100000.),
np.nper(0.075, -2000, 0, 100000., 'end'), 4)
assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 0),
np.nper(0.075, -2000, 0, 100000., 'end'), 4)
def test_broadcast(self):
assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]),
[21.5449442, 20.76156441], 4)
assert_almost_equal(np.ipmt(0.1/12, list(range(5)), 24, 2000),
[-17.29165168, -16.66666667, -16.03647345,
-15.40102862, -14.76028842], 4)
assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000),
[-74.998201, -75.62318601, -76.25337923,
-76.88882405, -77.52956425], 4)
assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000, 0,
[0, 0, 1, 'end', 'begin']),
[-74.998201, -75.62318601, -75.62318601,
-76.88882405, -76.88882405], 4)
if __name__ == "__main__":
run_module_suite() | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import hashlib
from mock import Mock
from libcloud.utils.py3 import StringIO
from libcloud.utils.py3 import PY3
from libcloud.utils.py3 import b
if PY3:
from io import FileIO as file
from libcloud.storage.base import StorageDriver
from libcloud.storage.base import DEFAULT_CONTENT_TYPE
from libcloud.test import unittest
from libcloud.test import StorageMockHttp
class BaseStorageTests(unittest.TestCase):
def setUp(self):
self.send_called = 0
StorageDriver.connectionCls.conn_classes = (None, StorageMockHttp)
self.driver1 = StorageDriver('username', 'key', host='localhost')
self.driver1.supports_chunked_encoding = True
self.driver2 = StorageDriver('username', 'key', host='localhost')
self.driver2.supports_chunked_encoding = False
self.driver1.strict_mode = False
self.driver1.strict_mode = False
def test__upload_object_iterator_must_have_next_method(self):
class Iterator(object):
def next(self):
pass
class Iterator2(file):
def __init__(self):
pass
class SomeClass(object):
pass
valid_iterators = [Iterator(), Iterator2(), StringIO('bar')]
invalid_iterators = ['foobar', '', False, True, 1, object()]
def upload_func(*args, **kwargs):
return True, 'barfoo', 100
kwargs = {'object_name': 'foo', 'content_type': 'foo/bar',
'upload_func': upload_func, 'upload_func_kwargs': {},
'request_path': '/', 'headers': {}}
for value in valid_iterators:
kwargs['iterator'] = value
self.driver1._upload_object(**kwargs)
for value in invalid_iterators:
kwargs['iterator'] = value
try:
self.driver1._upload_object(**kwargs)
except AttributeError:
pass
else:
self.fail('Exception was not thrown')
def test_upload_zero_bytes_long_object_via_stream(self):
iterator = Mock()
if PY3:
iterator.__next__ = Mock()
iterator.__next__.side_effect = StopIteration()
else:
iterator.next.side_effect = StopIteration()
def mock_send(data):
self.send_called += 1
response = Mock()
response.connection.connection.send = mock_send
# Normal
success, data_hash, bytes_transferred = \
self.driver1._stream_data(response=response,
iterator=iterator,
chunked=False, calculate_hash=True)
self.assertTrue(success)
self.assertEqual(data_hash, hashlib.md5(b('')).hexdigest())
self.assertEqual(bytes_transferred, 0)
self.assertEqual(self.send_called, 1)
# Chunked
success, data_hash, bytes_transferred = \
self.driver1._stream_data(response=response,
iterator=iterator,
chunked=True, calculate_hash=True)
self.assertTrue(success)
self.assertEqual(data_hash, hashlib.md5(b('')).hexdigest())
self.assertEqual(bytes_transferred, 0)
self.assertEqual(self.send_called, 5)
def test__upload_data(self):
def mock_send(data):
self.send_called += 1
response = Mock()
response.connection.connection.send = mock_send
data = '123456789901234567'
success, data_hash, bytes_transferred = \
self.driver1._upload_data(response=response, data=data,
calculate_hash=True)
self.assertTrue(success)
self.assertEqual(data_hash, hashlib.md5(b(data)).hexdigest())
self.assertEqual(bytes_transferred, (len(data)))
self.assertEqual(self.send_called, 1)
def test__get_hash_function(self):
self.driver1.hash_type = 'md5'
func = self.driver1._get_hash_function()
self.assertTrue(func)
self.driver1.hash_type = 'sha1'
func = self.driver1._get_hash_function()
self.assertTrue(func)
try:
self.driver1.hash_type = 'invalid-hash-function'
func = self.driver1._get_hash_function()
except RuntimeError:
pass
else:
self.fail('Invalid hash type but exception was not thrown')
def test_upload_no_content_type_supplied_or_detected(self):
iterator = StringIO()
upload_func = Mock()
upload_func.return_value = True, '', 0
# strict_mode is disabled, default content type should be used
self.driver1.connection = Mock()
self.driver1._upload_object(object_name='test',
content_type=None,
upload_func=upload_func,
upload_func_kwargs={},
request_path='/',
iterator=iterator)
headers = self.driver1.connection.request.call_args[-1]['headers']
self.assertEqual(headers['Content-Type'], DEFAULT_CONTENT_TYPE)
# strict_mode is enabled, exception should be thrown
self.driver1.strict_mode = True
expected_msg = ('File content-type could not be guessed and no'
' content_type value is provided')
self.assertRaisesRegexp(AttributeError, expected_msg,
self.driver1._upload_object,
object_name='test',
content_type=None,
upload_func=upload_func,
upload_func_kwargs={},
request_path='/',
iterator=iterator)
if __name__ == '__main__':
sys.exit(unittest.main()) | unknown | codeparrot/codeparrot-clean | ||
from . import AbstractIndicator
# this class is responsible for estimating distance to the next extremum (minimum or maximum)
# on the x axis
class ExtremumFinder(AbstractIndicator):
def __init__(self):
AbstractIndicator.__init__(self)
self.reset()
def reset(self):
self.__all_input_values = []
self.__all_distances = []
@property
def result(self):
return self.__all_distances[-1] if len(self.__all_distances) > 0 else None
@property
def all_result(self):
return self.__all_distances
def on_new_upstream_value(self, new_value):
if new_value is None:
raise ValueError("None is not allowed")
if type(new_value) is list:
del self.__all_input_values[:len(new_value)]
self.__all_input_values.extend(new_value)
else:
self.__all_input_values.append(new_value)
self.__calculate_all_distances()
def __calculate_all_distances(self):
previous_differential_l1 = None
previous_value = None
self.__all_distances = []
for current_value in self.__all_input_values:
current_distance = None
if previous_value is not None:
current_differential = current_value - previous_value
if previous_differential_l1 is not None:
current_distance = self.__calculate_distance_to_extremum(previous_differential_l1, current_differential)
previous_differential_l1 = current_differential
previous_value = current_value
self.__all_distances.append(current_distance)
def __calculate_distance_to_extremum(self, previous_differential_l1, current_differential_l1):
current_differential_l2 = current_differential_l1 - previous_differential_l1
# we need to calculate when differential will hit 0
if current_differential_l1 == 0:
return 0
# constant growth, above 0
if current_differential_l1 > 0 and current_differential_l2 > 0:
return None
# constant fall, below 0
if current_differential_l1 < 0 and current_differential_l2 < 0:
return None
# constant differential (== zero on L2 differential)
if current_differential_l2 == 0:
return None
current_distance = abs(current_differential_l1 * 1.0 / current_differential_l2)
return current_distance | unknown | codeparrot/codeparrot-clean | ||
name: Performance Issue
description: Report slow performance or memory issues when running pandas code
title: "PERF: "
labels: [Performance, Needs Triage]
body:
- type: checkboxes
id: checks
attributes:
label: Pandas version checks
options:
- label: >
I have checked that this issue has not already been reported.
required: true
- label: >
I have confirmed this issue exists on the
[latest version](https://pandas.pydata.org/docs/whatsnew/index.html) of pandas.
required: true
- label: >
I have confirmed this issue exists on the main branch of pandas.
- type: textarea
id: example
attributes:
label: Reproducible Example
description: >
Please provide a minimal, copy-pastable example that quantifies
[slow runtime](https://docs.python.org/3/library/timeit.html) or
[memory](https://pypi.org/project/memory-profiler/) issues. Reports
without reproducible examples will generally be closed
until they are provided.
validations:
required: true
- type: textarea
id: version
attributes:
label: Installed Versions
description: >
Please paste the output of ``pd.show_versions()``
value: >
<details>
Replace this line with the output of pd.show_versions()
</details>
validations:
required: true
- type: textarea
id: prior-performance
attributes:
label: Prior Performance
description: >
If applicable, please provide the prior version of pandas and output
of the same reproducible example where the performance issue did not exist. | unknown | github | https://github.com/pandas-dev/pandas | .github/ISSUE_TEMPLATE/performance_issue.yaml |
/*-------------------------------------------------------------------------
*
* toasting.c
* This file contains routines to support creation of toast tables
*
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/catalog/toasting.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/heapam.h"
#include "access/toast_compression.h"
#include "access/xact.h"
#include "catalog/binary_upgrade.h"
#include "catalog/catalog.h"
#include "catalog/dependency.h"
#include "catalog/heap.h"
#include "catalog/index.h"
#include "catalog/namespace.h"
#include "catalog/pg_am.h"
#include "catalog/pg_namespace.h"
#include "catalog/pg_opclass.h"
#include "catalog/toasting.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "utils/fmgroids.h"
#include "utils/rel.h"
#include "utils/syscache.h"
static void CheckAndCreateToastTable(Oid relOid, Datum reloptions,
LOCKMODE lockmode, bool check,
Oid OIDOldToast);
static bool create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
Datum reloptions, LOCKMODE lockmode, bool check,
Oid OIDOldToast);
static bool needs_toast_table(Relation rel);
/*
* CreateToastTable variants
* If the table needs a toast table, and doesn't already have one,
* then create a toast table for it.
*
* reloptions for the toast table can be passed, too. Pass (Datum) 0
* for default reloptions.
*
* We expect the caller to have verified that the relation is a table and have
* already done any necessary permission checks. Callers expect this function
* to end with CommandCounterIncrement if it makes any changes.
*/
void
AlterTableCreateToastTable(Oid relOid, Datum reloptions, LOCKMODE lockmode)
{
CheckAndCreateToastTable(relOid, reloptions, lockmode, true, InvalidOid);
}
void
NewHeapCreateToastTable(Oid relOid, Datum reloptions, LOCKMODE lockmode,
Oid OIDOldToast)
{
CheckAndCreateToastTable(relOid, reloptions, lockmode, false, OIDOldToast);
}
void
NewRelationCreateToastTable(Oid relOid, Datum reloptions)
{
CheckAndCreateToastTable(relOid, reloptions, AccessExclusiveLock, false,
InvalidOid);
}
static void
CheckAndCreateToastTable(Oid relOid, Datum reloptions, LOCKMODE lockmode,
bool check, Oid OIDOldToast)
{
Relation rel;
rel = table_open(relOid, lockmode);
/* create_toast_table does all the work */
(void) create_toast_table(rel, InvalidOid, InvalidOid, reloptions, lockmode,
check, OIDOldToast);
table_close(rel, NoLock);
}
/*
* Create a toast table during bootstrap
*
* Here we need to prespecify the OIDs of the toast table and its index
*/
void
BootstrapToastTable(char *relName, Oid toastOid, Oid toastIndexOid)
{
Relation rel;
rel = table_openrv(makeRangeVar(NULL, relName, -1), AccessExclusiveLock);
if (rel->rd_rel->relkind != RELKIND_RELATION &&
rel->rd_rel->relkind != RELKIND_MATVIEW)
elog(ERROR, "\"%s\" is not a table or materialized view",
relName);
/* create_toast_table does all the work */
if (!create_toast_table(rel, toastOid, toastIndexOid, (Datum) 0,
AccessExclusiveLock, false, InvalidOid))
elog(ERROR, "\"%s\" does not require a toast table",
relName);
table_close(rel, NoLock);
}
/*
* create_toast_table --- internal workhorse
*
* rel is already opened and locked
* toastOid and toastIndexOid are normally InvalidOid, but during
* bootstrap they can be nonzero to specify hand-assigned OIDs
*/
static bool
create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
Datum reloptions, LOCKMODE lockmode, bool check,
Oid OIDOldToast)
{
Oid relOid = RelationGetRelid(rel);
HeapTuple reltup;
TupleDesc tupdesc;
bool shared_relation;
bool mapped_relation;
Relation toast_rel;
Relation class_rel;
Oid toast_relid;
Oid namespaceid;
char toast_relname[NAMEDATALEN];
char toast_idxname[NAMEDATALEN];
IndexInfo *indexInfo;
Oid collationIds[2];
Oid opclassIds[2];
int16 coloptions[2];
ObjectAddress baseobject,
toastobject;
/*
* Is it already toasted?
*/
if (rel->rd_rel->reltoastrelid != InvalidOid)
return false;
/*
* Check to see whether the table actually needs a TOAST table.
*/
if (!IsBinaryUpgrade)
{
/* Normal mode, normal check */
if (!needs_toast_table(rel))
return false;
}
else
{
/*
* In binary-upgrade mode, create a TOAST table if and only if
* pg_upgrade told us to (ie, a TOAST table OID has been provided).
*
* This indicates that the old cluster had a TOAST table for the
* current table. We must create a TOAST table to receive the old
* TOAST file, even if the table seems not to need one.
*
* Contrariwise, if the old cluster did not have a TOAST table, we
* should be able to get along without one even if the new version's
* needs_toast_table rules suggest we should have one. There is a lot
* of daylight between where we will create a TOAST table and where
* one is really necessary to avoid failures, so small cross-version
* differences in the when-to-create heuristic shouldn't be a problem.
* If we tried to create a TOAST table anyway, we would have the
* problem that it might take up an OID that will conflict with some
* old-cluster table we haven't seen yet.
*/
if (!OidIsValid(binary_upgrade_next_toast_pg_class_oid))
return false;
}
/*
* If requested check lockmode is sufficient. This is a cross check in
* case of errors or conflicting decisions in earlier code.
*/
if (check && lockmode != AccessExclusiveLock)
elog(ERROR, "AccessExclusiveLock required to add toast table.");
/*
* Create the toast table and its index
*/
snprintf(toast_relname, sizeof(toast_relname),
"pg_toast_%u", relOid);
snprintf(toast_idxname, sizeof(toast_idxname),
"pg_toast_%u_index", relOid);
/* this is pretty painful... need a tuple descriptor */
tupdesc = CreateTemplateTupleDesc(3);
TupleDescInitEntry(tupdesc, (AttrNumber) 1,
"chunk_id",
OIDOID,
-1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 2,
"chunk_seq",
INT4OID,
-1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 3,
"chunk_data",
BYTEAOID,
-1, 0);
/*
* Ensure that the toast table doesn't itself get toasted, or we'll be
* toast :-(. This is essential for chunk_data because type bytea is
* toastable; hit the other two just to be sure.
*/
TupleDescAttr(tupdesc, 0)->attstorage = TYPSTORAGE_PLAIN;
TupleDescAttr(tupdesc, 1)->attstorage = TYPSTORAGE_PLAIN;
TupleDescAttr(tupdesc, 2)->attstorage = TYPSTORAGE_PLAIN;
/* Toast field should not be compressed */
TupleDescAttr(tupdesc, 0)->attcompression = InvalidCompressionMethod;
TupleDescAttr(tupdesc, 1)->attcompression = InvalidCompressionMethod;
TupleDescAttr(tupdesc, 2)->attcompression = InvalidCompressionMethod;
/*
* Toast tables for regular relations go in pg_toast; those for temp
* relations go into the per-backend temp-toast-table namespace.
*/
if (isTempOrTempToastNamespace(rel->rd_rel->relnamespace))
namespaceid = GetTempToastNamespace();
else
namespaceid = PG_TOAST_NAMESPACE;
/* Toast table is shared if and only if its parent is. */
shared_relation = rel->rd_rel->relisshared;
/* It's mapped if and only if its parent is, too */
mapped_relation = RelationIsMapped(rel);
toast_relid = heap_create_with_catalog(toast_relname,
namespaceid,
rel->rd_rel->reltablespace,
toastOid,
InvalidOid,
InvalidOid,
rel->rd_rel->relowner,
table_relation_toast_am(rel),
tupdesc,
NIL,
RELKIND_TOASTVALUE,
rel->rd_rel->relpersistence,
shared_relation,
mapped_relation,
ONCOMMIT_NOOP,
reloptions,
false,
true,
true,
OIDOldToast,
NULL);
Assert(toast_relid != InvalidOid);
/* make the toast relation visible, else table_open will fail */
CommandCounterIncrement();
/* ShareLock is not really needed here, but take it anyway */
toast_rel = table_open(toast_relid, ShareLock);
/*
* Create unique index on chunk_id, chunk_seq.
*
* NOTE: the normal TOAST access routines could actually function with a
* single-column index on chunk_id only. However, the slice access
* routines use both columns for faster access to an individual chunk. In
* addition, we want it to be unique as a check against the possibility of
* duplicate TOAST chunk OIDs. The index might also be a little more
* efficient this way, since btree isn't all that happy with large numbers
* of equal keys.
*/
indexInfo = makeNode(IndexInfo);
indexInfo->ii_NumIndexAttrs = 2;
indexInfo->ii_NumIndexKeyAttrs = 2;
indexInfo->ii_IndexAttrNumbers[0] = 1;
indexInfo->ii_IndexAttrNumbers[1] = 2;
indexInfo->ii_Expressions = NIL;
indexInfo->ii_ExpressionsState = NIL;
indexInfo->ii_Predicate = NIL;
indexInfo->ii_PredicateState = NULL;
indexInfo->ii_ExclusionOps = NULL;
indexInfo->ii_ExclusionProcs = NULL;
indexInfo->ii_ExclusionStrats = NULL;
indexInfo->ii_Unique = true;
indexInfo->ii_NullsNotDistinct = false;
indexInfo->ii_ReadyForInserts = true;
indexInfo->ii_CheckedUnchanged = false;
indexInfo->ii_IndexUnchanged = false;
indexInfo->ii_Concurrent = false;
indexInfo->ii_BrokenHotChain = false;
indexInfo->ii_ParallelWorkers = 0;
indexInfo->ii_Am = BTREE_AM_OID;
indexInfo->ii_AmCache = NULL;
indexInfo->ii_Context = CurrentMemoryContext;
collationIds[0] = InvalidOid;
collationIds[1] = InvalidOid;
opclassIds[0] = OID_BTREE_OPS_OID;
opclassIds[1] = INT4_BTREE_OPS_OID;
coloptions[0] = 0;
coloptions[1] = 0;
index_create(toast_rel, toast_idxname, toastIndexOid, InvalidOid,
InvalidOid, InvalidOid,
indexInfo,
list_make2("chunk_id", "chunk_seq"),
BTREE_AM_OID,
rel->rd_rel->reltablespace,
collationIds, opclassIds, NULL, coloptions, NULL, (Datum) 0,
INDEX_CREATE_IS_PRIMARY, 0, true, true, NULL);
table_close(toast_rel, NoLock);
/*
* Store the toast table's OID in the parent relation's pg_class row
*/
class_rel = table_open(RelationRelationId, RowExclusiveLock);
if (!IsBootstrapProcessingMode())
{
/* normal case, use a transactional update */
reltup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relOid));
if (!HeapTupleIsValid(reltup))
elog(ERROR, "cache lookup failed for relation %u", relOid);
((Form_pg_class) GETSTRUCT(reltup))->reltoastrelid = toast_relid;
CatalogTupleUpdate(class_rel, &reltup->t_self, reltup);
}
else
{
/* While bootstrapping, we cannot UPDATE, so overwrite in-place */
ScanKeyData key[1];
void *state;
ScanKeyInit(&key[0],
Anum_pg_class_oid,
BTEqualStrategyNumber, F_OIDEQ,
ObjectIdGetDatum(relOid));
systable_inplace_update_begin(class_rel, ClassOidIndexId, true,
NULL, 1, key, &reltup, &state);
if (!HeapTupleIsValid(reltup))
elog(ERROR, "cache lookup failed for relation %u", relOid);
((Form_pg_class) GETSTRUCT(reltup))->reltoastrelid = toast_relid;
systable_inplace_update_finish(state, reltup);
}
heap_freetuple(reltup);
table_close(class_rel, RowExclusiveLock);
/*
* Register dependency from the toast table to the main, so that the toast
* table will be deleted if the main is. Skip this in bootstrap mode.
*/
if (!IsBootstrapProcessingMode())
{
baseobject.classId = RelationRelationId;
baseobject.objectId = relOid;
baseobject.objectSubId = 0;
toastobject.classId = RelationRelationId;
toastobject.objectId = toast_relid;
toastobject.objectSubId = 0;
recordDependencyOn(&toastobject, &baseobject, DEPENDENCY_INTERNAL);
}
/*
* Make changes visible
*/
CommandCounterIncrement();
return true;
}
/*
* Check to see whether the table needs a TOAST table.
*/
static bool
needs_toast_table(Relation rel)
{
/*
* No need to create a TOAST table for partitioned tables.
*/
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
return false;
/*
* We cannot allow toasting a shared relation after initdb (because
* there's no way to mark it toasted in other databases' pg_class).
*/
if (rel->rd_rel->relisshared && !IsBootstrapProcessingMode())
return false;
/*
* Ignore attempts to create toast tables on catalog tables after initdb.
* Which catalogs get toast tables is explicitly chosen in catalog/pg_*.h.
* (We could get here via some ALTER TABLE command if the catalog doesn't
* have a toast table.)
*/
if (IsCatalogRelation(rel) && !IsBootstrapProcessingMode())
return false;
/* Otherwise, let the AM decide. */
return table_relation_needs_toast_table(rel);
} | c | github | https://github.com/postgres/postgres | src/backend/catalog/toasting.c |
#!/usr/bin/python
# -*- coding:utf-8 -*-
# Copyright(c) 2015-2016 JmGo Company
# All rights reserved.
#
# 文件名 : yoke_command.py
# 作者 : YuanRong
# 电子邮箱 : ryuan@jmgo.com
# 日期 : 2017/5/5 17:47
#
# 描述 : 命令模式 —— 将一个请求封装成对象,从而可以使用不同的请求对用户进行参数化
# 对请求排队、记录请求日志和撤销操作;
#
import os
class MoveFileCommand(object):
def __init__(self, t_src, t_dest):
self.src = t_src
self.dest = t_dest
def excute(self):
self()
def __call__(self):
print "{} to rename {}".format(self.src, self.dest)
path = os.getcwd()
os.rename(path+self.src, path+self.dest)
def undo(self):
path = os.getcwd()
os.rename(path+self.dest, path+self.src)
if __name__ == '__main__':
move_list = []
move_list.append(MoveFileCommand("a2017.txt", "b2017.txt"))
move_list.append(MoveFileCommand("b2017.txt", "c2017.txt"))
for item in move_list:
item.excute()
for item in reversed(move_list):
item.undo() | unknown | codeparrot/codeparrot-clean | ||
/*
* ginfuncs.c
* Functions to investigate the content of GIN indexes
*
* Copyright (c) 2014-2026, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pageinspect/ginfuncs.c
*/
#include "postgres.h"
#include "access/gin_private.h"
#include "access/htup_details.h"
#include "catalog/pg_type.h"
#include "funcapi.h"
#include "miscadmin.h"
#include "pageinspect.h"
#include "utils/array.h"
#include "utils/builtins.h"
PG_FUNCTION_INFO_V1(gin_metapage_info);
PG_FUNCTION_INFO_V1(gin_page_opaque_info);
PG_FUNCTION_INFO_V1(gin_leafpage_items);
Datum
gin_metapage_info(PG_FUNCTION_ARGS)
{
bytea *raw_page = PG_GETARG_BYTEA_P(0);
TupleDesc tupdesc;
Page page;
GinPageOpaque opaq;
GinMetaPageData *metadata;
HeapTuple resultTuple;
Datum values[10];
bool nulls[10];
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to use raw page functions")));
page = get_page_from_raw(raw_page);
if (PageIsNew(page))
PG_RETURN_NULL();
if (PageGetSpecialSize(page) != MAXALIGN(sizeof(GinPageOpaqueData)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("input page is not a valid GIN metapage"),
errdetail("Expected special size %d, got %d.",
(int) MAXALIGN(sizeof(GinPageOpaqueData)),
(int) PageGetSpecialSize(page))));
opaq = GinPageGetOpaque(page);
if (opaq->flags != GIN_META)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("input page is not a GIN metapage"),
errdetail("Flags %04X, expected %04X",
opaq->flags, GIN_META)));
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
metadata = GinPageGetMeta(page);
memset(nulls, 0, sizeof(nulls));
values[0] = Int64GetDatum(metadata->head);
values[1] = Int64GetDatum(metadata->tail);
values[2] = UInt32GetDatum(metadata->tailFreeSize);
values[3] = Int64GetDatum(metadata->nPendingPages);
values[4] = Int64GetDatum(metadata->nPendingHeapTuples);
/* statistics, updated by VACUUM */
values[5] = Int64GetDatum(metadata->nTotalPages);
values[6] = Int64GetDatum(metadata->nEntryPages);
values[7] = Int64GetDatum(metadata->nDataPages);
values[8] = Int64GetDatum(metadata->nEntries);
values[9] = Int32GetDatum(metadata->ginVersion);
/* Build and return the result tuple. */
resultTuple = heap_form_tuple(tupdesc, values, nulls);
return HeapTupleGetDatum(resultTuple);
}
Datum
gin_page_opaque_info(PG_FUNCTION_ARGS)
{
bytea *raw_page = PG_GETARG_BYTEA_P(0);
TupleDesc tupdesc;
Page page;
GinPageOpaque opaq;
HeapTuple resultTuple;
Datum values[3];
bool nulls[3];
Datum flags[16];
int nflags = 0;
uint16 flagbits;
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to use raw page functions")));
page = get_page_from_raw(raw_page);
if (PageIsNew(page))
PG_RETURN_NULL();
if (PageGetSpecialSize(page) != MAXALIGN(sizeof(GinPageOpaqueData)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("input page is not a valid GIN data leaf page"),
errdetail("Expected special size %d, got %d.",
(int) MAXALIGN(sizeof(GinPageOpaqueData)),
(int) PageGetSpecialSize(page))));
opaq = GinPageGetOpaque(page);
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
/* Convert the flags bitmask to an array of human-readable names */
flagbits = opaq->flags;
if (flagbits & GIN_DATA)
flags[nflags++] = CStringGetTextDatum("data");
if (flagbits & GIN_LEAF)
flags[nflags++] = CStringGetTextDatum("leaf");
if (flagbits & GIN_DELETED)
flags[nflags++] = CStringGetTextDatum("deleted");
if (flagbits & GIN_META)
flags[nflags++] = CStringGetTextDatum("meta");
if (flagbits & GIN_LIST)
flags[nflags++] = CStringGetTextDatum("list");
if (flagbits & GIN_LIST_FULLROW)
flags[nflags++] = CStringGetTextDatum("list_fullrow");
if (flagbits & GIN_INCOMPLETE_SPLIT)
flags[nflags++] = CStringGetTextDatum("incomplete_split");
if (flagbits & GIN_COMPRESSED)
flags[nflags++] = CStringGetTextDatum("compressed");
flagbits &= ~(GIN_DATA | GIN_LEAF | GIN_DELETED | GIN_META | GIN_LIST |
GIN_LIST_FULLROW | GIN_INCOMPLETE_SPLIT | GIN_COMPRESSED);
if (flagbits)
{
/* any flags we don't recognize are printed in hex */
flags[nflags++] = DirectFunctionCall1(to_hex32, Int32GetDatum(flagbits));
}
memset(nulls, 0, sizeof(nulls));
values[0] = Int64GetDatum(opaq->rightlink);
values[1] = Int32GetDatum(opaq->maxoff);
values[2] = PointerGetDatum(construct_array_builtin(flags, nflags, TEXTOID));
/* Build and return the result tuple. */
resultTuple = heap_form_tuple(tupdesc, values, nulls);
return HeapTupleGetDatum(resultTuple);
}
typedef struct gin_leafpage_items_state
{
TupleDesc tupd;
GinPostingList *seg;
GinPostingList *lastseg;
} gin_leafpage_items_state;
Datum
gin_leafpage_items(PG_FUNCTION_ARGS)
{
bytea *raw_page = PG_GETARG_BYTEA_P(0);
FuncCallContext *fctx;
gin_leafpage_items_state *inter_call_data;
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to use raw page functions")));
if (SRF_IS_FIRSTCALL())
{
TupleDesc tupdesc;
MemoryContext mctx;
Page page;
GinPageOpaque opaq;
fctx = SRF_FIRSTCALL_INIT();
mctx = MemoryContextSwitchTo(fctx->multi_call_memory_ctx);
page = get_page_from_raw(raw_page);
if (PageIsNew(page))
{
MemoryContextSwitchTo(mctx);
PG_RETURN_NULL();
}
if (PageGetSpecialSize(page) != MAXALIGN(sizeof(GinPageOpaqueData)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("input page is not a valid GIN data leaf page"),
errdetail("Expected special size %d, got %d.",
(int) MAXALIGN(sizeof(GinPageOpaqueData)),
(int) PageGetSpecialSize(page))));
opaq = GinPageGetOpaque(page);
if (opaq->flags != (GIN_DATA | GIN_LEAF | GIN_COMPRESSED))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("input page is not a compressed GIN data leaf page"),
errdetail("Flags %04X, expected %04X",
opaq->flags,
(GIN_DATA | GIN_LEAF | GIN_COMPRESSED))));
inter_call_data = palloc_object(gin_leafpage_items_state);
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
inter_call_data->tupd = tupdesc;
inter_call_data->seg = GinDataLeafPageGetPostingList(page);
inter_call_data->lastseg = (GinPostingList *)
(((char *) inter_call_data->seg) +
GinDataLeafPageGetPostingListSize(page));
fctx->user_fctx = inter_call_data;
MemoryContextSwitchTo(mctx);
}
fctx = SRF_PERCALL_SETUP();
inter_call_data = fctx->user_fctx;
if (inter_call_data->seg != inter_call_data->lastseg)
{
GinPostingList *cur = inter_call_data->seg;
HeapTuple resultTuple;
Datum result;
Datum values[3];
bool nulls[3];
int ndecoded,
i;
ItemPointer tids;
Datum *tids_datum;
memset(nulls, 0, sizeof(nulls));
values[0] = ItemPointerGetDatum(&cur->first);
values[1] = UInt16GetDatum(cur->nbytes);
/* build an array of decoded item pointers */
tids = ginPostingListDecode(cur, &ndecoded);
tids_datum = (Datum *) palloc(ndecoded * sizeof(Datum));
for (i = 0; i < ndecoded; i++)
tids_datum[i] = ItemPointerGetDatum(&tids[i]);
values[2] = PointerGetDatum(construct_array_builtin(tids_datum, ndecoded, TIDOID));
pfree(tids_datum);
pfree(tids);
/* Build and return the result tuple. */
resultTuple = heap_form_tuple(inter_call_data->tupd, values, nulls);
result = HeapTupleGetDatum(resultTuple);
inter_call_data->seg = GinNextPostingListSegment(cur);
SRF_RETURN_NEXT(fctx, result);
}
SRF_RETURN_DONE(fctx);
} | c | github | https://github.com/postgres/postgres | contrib/pageinspect/ginfuncs.c |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Save Tower CSV data as NetCDF
# <markdowncell>
# ### Set local variables
# <codecell>
url='http://geoport.whoi.edu/thredds/fileServer/usgs/data2/notebook/data/CR3000_SN3557_Table1_MainTowerCR3000_ground_V6.CR3.txt'
input_data="data.txt"
output_dir="/data"
output_file="julia.nc"
fillvalue=-9999.9
# <markdowncell>
# ### Download the data
# <codecell>
import urllib
urllib.urlretrieve(url, input_data)
# <codecell>
import pandas as pd
df = pd.read_csv(input_data,skiprows=[0,2,3],
parse_dates=True,
index_col='TIMESTAMP',
low_memory=False,
na_values=['NAN',''],
tupleize_cols=True)
df = df.fillna(fillvalue)
df.head()
# <markdowncell>
# ### Simple plot
# <codecell>
import matplotlib.pyplot as plt
%matplotlib inline
df[['Tsoil10cmTree_Avg','Tsoil20cmTree_Avg']].plot(figsize=(12,4));
# <markdowncell>
# ### Create netCDF file
# <codecell>
import numpy as np
def pd_to_secs(df):
# convert a pandas datetime index to seconds since 1970
import calendar
return np.asarray([ calendar.timegm(x.timetuple()) for x in df.index ], dtype=np.int64)
def cf_safe_name(name):
# Create a CF safe name for a group/dimension/variable
import re
if isinstance(name, basestring):
if re.match('^[0-9_]', name):
# Add a letter to the front
name = "v_{}".format(name)
return re.sub(r'[^_a-zA-Z0-9]', "_", name)
return name
# <codecell>
import os
out_file = os.path.join(output_dir, output_file)
if os.path.isfile(out_file):
os.remove(out_file)
from pyaxiom.netcdf.sensors import TimeSeries
ts = TimeSeries(output_dir,
latitude=0.39,
longitude=36.7,
station_name='urn:ioos:station:edu.princeton.ecohydrolab:MainTower',
global_attributes={},
times=pd_to_secs(df),
verticals=[10],
output_filename=output_file)
# <codecell>
for c in df.columns[::-1]:
# Add units based on column name?
var_attributes = dict()
ts.add_variable(cf_safe_name(c), df[c].values, attributes=var_attributes, fillvalue=-9999.9)
# <codecell> | unknown | codeparrot/codeparrot-clean | ||
""" Test Codecs (used by test_charmapcodec)
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x78: "abc", # 1-n decoding mapping
b"abc": 0x0078,# 1-n encoding mapping
0x01: None, # decoding mapping to <undefined>
0x79: "", # decoding mapping to <remove character>
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package workdir
import (
"testing"
version "github.com/hashicorp/go-version"
tfaddr "github.com/hashicorp/terraform-registry-address"
svchost "github.com/hashicorp/terraform-svchost"
)
// getTestProviderState is a test helper that returns a state representation
// of a provider used for managing state via pluggable state storage.
// The Hash is always hardcoded at 12345.
func getTestProviderState(t *testing.T, semVer, hostname, namespace, typeName, config string) *ProviderConfigState {
t.Helper()
var ver *version.Version
if semVer == "" {
// Allow passing no version in; leave ver nil
ver = nil
} else {
var err error
ver, err = version.NewSemver(semVer)
if err != nil {
t.Fatalf("test setup failed when creating version.Version: %s", err)
}
}
return &ProviderConfigState{
Version: ver,
Source: &tfaddr.Provider{
Hostname: svchost.Hostname(hostname),
Namespace: namespace,
Type: typeName,
},
ConfigRaw: []byte(config),
}
} | go | github | https://github.com/hashicorp/terraform | internal/command/workdir/testing.go |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'connect.ui'
#
# Created: Wed Jul 24 12:42:01 2013
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_connectDialog(object):
def setupUi(self, connectDialog):
connectDialog.setObjectName(_fromUtf8("connectDialog"))
connectDialog.resize(400, 124)
self.gridLayout = QtGui.QGridLayout(connectDialog)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(connectDialog)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 2)
self.radioButtonConnectNow = QtGui.QRadioButton(connectDialog)
self.radioButtonConnectNow.setChecked(True)
self.radioButtonConnectNow.setObjectName(_fromUtf8("radioButtonConnectNow"))
self.gridLayout.addWidget(self.radioButtonConnectNow, 1, 0, 1, 2)
self.radioButtonConfigureNetwork = QtGui.QRadioButton(connectDialog)
self.radioButtonConfigureNetwork.setObjectName(_fromUtf8("radioButtonConfigureNetwork"))
self.gridLayout.addWidget(self.radioButtonConfigureNetwork, 2, 0, 1, 2)
spacerItem = QtGui.QSpacerItem(185, 24, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 3, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(connectDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout.addWidget(self.buttonBox, 3, 1, 1, 1)
self.retranslateUi(connectDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), connectDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), connectDialog.reject)
QtCore.QMetaObject.connectSlotsByName(connectDialog)
def retranslateUi(self, connectDialog):
connectDialog.setWindowTitle(_translate("connectDialog", "Bitmessage", None))
self.label.setText(_translate("connectDialog", "Bitmessage won\'t connect to anyone until you let it. ", None))
self.radioButtonConnectNow.setText(_translate("connectDialog", "Connect now", None))
self.radioButtonConfigureNetwork.setText(_translate("connectDialog", "Let me configure special network settings first", None)) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals, division, absolute_import
import logging
import os
from flexget import plugin
from flexget.event import event
log = logging.getLogger('free_space')
def get_free_space(folder):
""" Return folder/drive free space (in megabytes)"""
if os.name == 'nt':
import ctypes
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
return free_bytes.value / (1024 * 1024)
else:
stats = os.statvfs(folder)
return (stats.f_bavail * stats.f_frsize) / (1024 * 1024)
class PluginFreeSpace(object):
"""Aborts a task if an entry is accepted and there is less than a certain amount of space free on a drive."""
schema = {
'oneOf': [
{'type': 'number'},
{
'type': 'object',
'properties': {
'space': {'type': 'number'},
'path': {'type': 'string', 'format': 'path'}
},
'required': ['space'],
'additionalProperties': False
}
]
}
def prepare_config(self, config):
if isinstance(config, (float, int)):
config = {'space': config}
# Use config path if none is specified
if not config.get('path'):
config['path'] = task.manager.config_base
return config
@plugin.priority(255)
def on_task_download(self, task, config):
config = self.prepare_config(config)
# Only bother aborting if there were accepted entries this run.
if task.accepted:
if get_free_space(config['path']) < config['space']:
log.error('Less than %d MB of free space in %s aborting task.' % (config['space'], config['path']))
# backlog plugin will save and restore the task content, if available
task.abort('Less than %d MB of free space in %s' % (config['space'], config['path']))
@event('plugin.register')
def register_plugin():
plugin.register(PluginFreeSpace, 'free_space', api_ver=2) | unknown | codeparrot/codeparrot-clean | ||
/* SPDX-License-Identifier: MIT */
/* origin: musl src/math/fma.c. Ported to generic Rust algorithm in 2025, TG. */
use crate::support::{
CastFrom, CastInto, DInt, Float, FpResult, HInt, Int, IntTy, MinInt, Round, Status,
};
/// Fused multiply-add that works when there is not a larger float size available. Computes
/// `(x * y) + z`.
#[inline]
pub fn fma_round<F>(x: F, y: F, z: F, _round: Round) -> FpResult<F>
where
F: Float,
F: CastFrom<F::SignedInt>,
F: CastFrom<i8>,
F::Int: HInt,
u32: CastInto<F::Int>,
{
let one = IntTy::<F>::ONE;
let zero = IntTy::<F>::ZERO;
// Normalize such that the top of the mantissa is zero and we have a guard bit.
let nx = Norm::from_float(x);
let ny = Norm::from_float(y);
let nz = Norm::from_float(z);
if nx.is_zero_nan_inf() || ny.is_zero_nan_inf() {
// Value will overflow, defer to non-fused operations.
return FpResult::ok(x * y + z);
}
if nz.is_zero_nan_inf() {
if nz.is_zero() {
// Empty add component means we only need to multiply.
return FpResult::ok(x * y);
}
// `z` is NaN or infinity, which sets the result.
return FpResult::ok(z);
}
// multiply: r = x * y
let zhi: F::Int;
let zlo: F::Int;
let (mut rlo, mut rhi) = nx.m.widen_mul(ny.m).lo_hi();
// Exponent result of multiplication
let mut e: i32 = nx.e + ny.e;
// Needed shift to align `z` to the multiplication result
let mut d: i32 = nz.e - e;
let sbits = F::BITS as i32;
// Scale `z`. Shift `z <<= kz`, `r >>= kr`, so `kz+kr == d`, set `e = e+kr` (== ez-kz)
if d > 0 {
// The magnitude of `z` is larger than `x * y`
if d < sbits {
// Maximum shift of one `F::BITS` means shifted `z` will fit into `2 * F::BITS`. Shift
// it into `(zhi, zlo)`. No exponent adjustment necessary.
zlo = nz.m << d;
zhi = nz.m >> (sbits - d);
} else {
// Shift larger than `sbits`, `z` only needs the top half `zhi`. Place it there (acts
// as a shift by `sbits`).
zlo = zero;
zhi = nz.m;
d -= sbits;
// `z`'s exponent is large enough that it now needs to be taken into account.
e = nz.e - sbits;
if d == 0 {
// Exactly `sbits`, nothing to do
} else if d < sbits {
// Remaining shift fits within `sbits`. Leave `z` in place, shift `x * y`
rlo = (rhi << (sbits - d)) | (rlo >> d);
// Set the sticky bit
rlo |= IntTy::<F>::from((rlo << (sbits - d)) != zero);
rhi = rhi >> d;
} else {
// `z`'s magnitude is enough that `x * y` is irrelevant. It was nonzero, so set
// the sticky bit.
rlo = one;
rhi = zero;
}
}
} else {
// `z`'s magnitude once shifted fits entirely within `zlo`
zhi = zero;
d = -d;
if d == 0 {
// No shift needed
zlo = nz.m;
} else if d < sbits {
// Shift s.t. `nz.m` fits into `zlo`
let sticky = IntTy::<F>::from((nz.m << (sbits - d)) != zero);
zlo = (nz.m >> d) | sticky;
} else {
// Would be entirely shifted out, only set the sticky bit
zlo = one;
}
}
/* addition */
let mut neg = nx.neg ^ ny.neg;
let samesign: bool = !neg ^ nz.neg;
let mut rhi_nonzero = true;
if samesign {
// r += z
rlo = rlo.wrapping_add(zlo);
rhi += zhi + IntTy::<F>::from(rlo < zlo);
} else {
// r -= z
let (res, borrow) = rlo.overflowing_sub(zlo);
rlo = res;
rhi = rhi.wrapping_sub(zhi.wrapping_add(IntTy::<F>::from(borrow)));
if (rhi >> (F::BITS - 1)) != zero {
rlo = rlo.signed().wrapping_neg().unsigned();
rhi = rhi.signed().wrapping_neg().unsigned() - IntTy::<F>::from(rlo != zero);
neg = !neg;
}
rhi_nonzero = rhi != zero;
}
/* Construct result */
// Shift result into `rhi`, left-aligned. Last bit is sticky
if rhi_nonzero {
// `d` > 0, need to shift both `rhi` and `rlo` into result
e += sbits;
d = rhi.leading_zeros() as i32 - 1;
rhi = (rhi << d) | (rlo >> (sbits - d));
// Update sticky
rhi |= IntTy::<F>::from((rlo << d) != zero);
} else if rlo != zero {
// `rhi` is zero, `rlo` is the entire result and needs to be shifted
d = rlo.leading_zeros() as i32 - 1;
if d < 0 {
// Shift and set sticky
rhi = (rlo >> 1) | (rlo & one);
} else {
rhi = rlo << d;
}
} else {
// exact +/- 0.0
return FpResult::ok(x * y + z);
}
e -= d;
// Use int->float conversion to populate the significand.
// i is in [1 << (BITS - 2), (1 << (BITS - 1)) - 1]
let mut i: F::SignedInt = rhi.signed();
if neg {
i = -i;
}
// `|r|` is in `[0x1p62,0x1p63]` for `f64`
let mut r: F = F::cast_from_lossy(i);
/* Account for subnormal and rounding */
// Unbiased exponent for the maximum value of `r`
let max_pow = F::BITS - 1 + F::EXP_BIAS;
let mut status = Status::OK;
if e < -(max_pow as i32 - 2) {
// Result is subnormal before rounding
if e == -(max_pow as i32 - 1) {
let mut c = F::from_parts(false, max_pow, zero);
if neg {
c = -c;
}
if r == c {
// Min normal after rounding,
status.set_underflow(true);
r = F::MIN_POSITIVE_NORMAL.copysign(r);
return FpResult::new(r, status);
}
if (rhi << (F::SIG_BITS + 1)) != zero {
// Account for truncated bits. One bit will be lost in the `scalbn` call, add
// another top bit to avoid double rounding if inexact.
let iu: F::Int = (rhi >> 1) | (rhi & one) | (one << (F::BITS - 2));
i = iu.signed();
if neg {
i = -i;
}
r = F::cast_from_lossy(i);
// Remove the top bit
r = F::cast_from(2i8) * r - c;
status.set_underflow(true);
}
} else {
// Only round once when scaled
d = F::EXP_BITS as i32 - 1;
let sticky = IntTy::<F>::from(rhi << (F::BITS as i32 - d) != zero);
i = (((rhi >> d) | sticky) << d).signed();
if neg {
i = -i;
}
r = F::cast_from_lossy(i);
}
}
// Use our exponent to scale the final value.
FpResult::new(super::scalbn(r, e), status)
}
/// Representation of `F` that has handled subnormals.
#[derive(Clone, Copy, Debug)]
struct Norm<F: Float> {
/// Normalized significand with one guard bit, unsigned.
m: F::Int,
/// Exponent of the mantissa such that `m * 2^e = x`. Accounts for the shift in the mantissa
/// and the guard bit; that is, 1.0 will normalize as `m = 1 << 53` and `e = -53`.
e: i32,
neg: bool,
}
impl<F: Float> Norm<F> {
/// Unbias the exponent and account for the mantissa's precision, including the guard bit.
const EXP_UNBIAS: u32 = F::EXP_BIAS + F::SIG_BITS + 1;
/// Values greater than this had a saturated exponent (infinity or NaN), OR were zero and we
/// adjusted the exponent such that it exceeds this threashold.
const ZERO_INF_NAN: u32 = F::EXP_SAT - Self::EXP_UNBIAS;
fn from_float(x: F) -> Self {
let mut ix = x.to_bits();
let mut e = x.ex() as i32;
let neg = x.is_sign_negative();
if e == 0 {
// Normalize subnormals by multiplication
let scale_i = F::BITS - 1;
let scale_f = F::from_parts(false, scale_i + F::EXP_BIAS, F::Int::ZERO);
let scaled = x * scale_f;
ix = scaled.to_bits();
e = scaled.ex() as i32;
e = if e == 0 {
// If the exponent is still zero, the input was zero. Artifically set this value
// such that the final `e` will exceed `ZERO_INF_NAN`.
1 << F::EXP_BITS
} else {
// Otherwise, account for the scaling we just did.
e - scale_i as i32
};
}
e -= Self::EXP_UNBIAS as i32;
// Absolute value, set the implicit bit, and shift to create a guard bit
ix &= F::SIG_MASK;
ix |= F::IMPLICIT_BIT;
ix <<= 1;
Self { m: ix, e, neg }
}
/// True if the value was zero, infinity, or NaN.
fn is_zero_nan_inf(self) -> bool {
self.e >= Self::ZERO_INF_NAN as i32
}
/// The only value we have
fn is_zero(self) -> bool {
// The only exponent that strictly exceeds this value is our sentinel value for zero.
self.e > Self::ZERO_INF_NAN as i32
}
} | rust | github | https://github.com/nodejs/node | deps/crates/vendor/libm/src/math/generic/fma.rs |
'''
https://leetcode.com/contest/weekly-contest-174/problems/maximum-product-of-splitted-binary-tree/
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def maxProduct(self, root: TreeNode) -> int:
mod = 10**9 + 7
def multiply(root):
if root is None: return 0
root.val += multiply(root.right)
root.val += multiply(root.left)
return root.val
product = multiply(root)
def split(node):
if node is None: return 0
max_prod = max(split(node.right), split(node.left))
if node.left:
max_prod = max(max_prod, (root.val - node.left.val) * node.left.val)
if node.right:
max_prod = max(max_prod, (root.val - node.right.val) * node.right.val)
return max_prod
return split(root) % mod | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
import io
from setuptools import setup, find_packages
setup(
name='django-pipeline',
version='1.5.1',
description='Pipeline is an asset packaging library for Django.',
long_description=io.open('README.rst', encoding='utf-8').read() + '\n\n' +
io.open('HISTORY.rst', encoding='utf-8').read(),
author='Timothée Peignier',
author_email='timothee.peignier@tryphon.org',
url='https://github.com/cyberdelia/django-pipeline',
license='MIT',
packages=find_packages(exclude=['tests', 'tests.tests']),
zip_safe=False,
install_requires=[
'futures>=2.1.3',
],
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Utilities',
]
) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
# Program Parameters
#
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'PeptideShaker-1.14.6.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
java = java_executable()
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
import copy
class equation():
def __init__(self, operand, operations = None):
self.operand = operand
self.operations = operations
def __str__(self):
out = "Operand : " + self.operand +'\n'
out += "Operations :" + ','.join(self.operations)
return out
def __eq__(self, other):
if self.operand != other.operand:
print " "*8,
print "Operand mismatch :",self.operand, " != ",other.operand
if self.operations != other.operations:
print " "*8,
print "Operations mismatch for operand:", self.operand, '(',\
len(self.operations), ') != ', other.operand,' (',len(other.operations),')'
return self.operand == other.operand and self.operations == other.operations
class ocamlOutputFile():
def __init__(self, filename, verbose=False):
self.filename = filename
self.verbose = verbose
self.eqList = []
def parseFile(self):
f = open(self.filename, 'r')
contents = f.read()
f.close()
contents = contents.split('\n')
for line in contents:
if line.startswith('[EQLIST]'):
self.eqList.append([])
line = line.split()
line = ' '.join(line[1:])
if line.strip().startswith('EQL->'):
line = line.split()
if line[0] == 'EQL->':
temp = line[1].split(':')
operand = temp[0][1:]
operations = temp[1].split(',')
operations[-1] = operations[-1][:-1]
eq = equation(operand=operand, operations=operations)
self.eqList[-1].append(eq)
class subSumptionClass():
def __init__(self, filename, verbose=False):
self.filename = filename
self.verbose = verbose
self.eqList = []
def parseFile(self):
f = open(self.filename, 'r')
contents = f.read()
f.close()
contents = contents.split('\n')
for line in contents:
if line.startswith('**[SSO]'):
self.eqList.append([])
line = line.split()
line = ' '.join(line[1:])
#print "SSO line: ", line
temp = line.split(':')
operand = temp[0][1:]
operations = temp[1].split(',')
operations[-1] = operations[-1][:-1]
eq = equation(operand=operand, operations=operations)
self.eqList[-1].append(eq)
if line.strip().startswith('-> [Subsumes]'):
line = line.split()
line = ' '.join(line[1:])
line = line.split()
if line[0] == '[Subsumes]':
temp = line[1].split(':')
operand = temp[0][1:]
operations = temp[1].split(',')
operations[-1] = operations[-1][:-1]
eq = equation(operand=operand, operations=operations)
self.eqList[-1].append(eq)
class hook():
def __init__(self, hookName, secSensitiveOper=[]):
self.hookName = hookName
self.secSensitiveOper = secSensitiveOper
class function():
def __init__(self, functionName, hook=[]):
self.functionName = functionName
self.hook = hook
class ocamlFunctionFile():
def __init__(self, filename, verbose = False):
self.filename = filename
self.verbose = verbose
self.functions = []
def parseFile(self):
f = open(self.filename, 'r')
contents = f.read()
f.close()
contents = contents.split('\n')
for line in contents:
if line.strip().startswith('Function'):
name = line.split()
name = line[0].split(':')[1][1:-1]
self.functions.append(function(functionName=name))
if line.split()[1] == 'Hook':
line = line.split()
if line[0] == 'EQL->':
temp = line[1].split(':')
operand = temp[0]
operations = temp[1].split(',')
eq = equation(operand=operand, operations=operations)
self.eqList[-1].append(eq)
class AutoHook():
def __init__(self, hook, fileName, line,stmt=None):
self.hook = hook
self.fileName = fileName
self.line = line
self.ifStmt = False
self.stmt = stmt
self.SSOs = []
self.domHooks = []
# def __eq__(self, other):
#
# selfOperands = [s.operand for s in self.SSOs]
# otherOperands = [s.operand for s in other.SSOs]
#
# for operand in selfOperands:
# if operand not in otherOperands:
# print " "*8,
# print "Operand Removed:", operand
# for operand in otherOperands:
# if operand not in selfOperands:
# print " "*8,
# print "Operand Added:", operand
#
# ssoMatch = self.SSOs == other.SSOs
#
# return ssoMatch #and self.domHooks == other.domHooks
class ManualHook():
def __init__(self, hook, fileName, line):
self.hook = hook
self.fileName = fileName
self.line = line
self.Autohooks = []
def __eq__(self, other):
return self.hook == other.hook and self.fileName == other.fileName
class ocamlMaualHooks():
def __init__(self, filename, verbose=False):
self.filename = filename
self.verbose = verbose
self.ManHooks = []
self.unmediatedHooks = []
self.fileMap ={}
def createFilemap(self):
manHooks = []
for hook in self.ManHooks:
try:
self.fileMap[hook.fileName]
except KeyError:
self.fileMap[hook.fileName] = []
self.fileMap[hook.fileName].append(hook)
#self.fileMap[hook.fileName].append(hook)
for fileHooks in self.fileMap:
hooks = self.fileMap[fileHooks]
hooks = hooks.sort(key=lambda x: int(x.line), reverse=False)
#self.fileMap[fileHooks] = hooks
def parseFile(self):
f = open(self.filename, 'r')
contents = f.read()
f.close()
contents = contents.split('\n')
isDom = False
Autohooklist = []
for line in contents:
#print len(Autohooklist)
if line.startswith('[ManualHook]'):
Autohooklist = []
#self.ManHooks.append([])
line = line.split()
hookCall = line[2].split('@')[0]
filename = line[4].split('@')[0]
lineNumber = line[4].split('@')[1]
ManHook = ManualHook(hook=hookCall,fileName=filename,line=lineNumber)
self.ManHooks.append(ManHook)
#print line
line = ' '.join(line[1:])
#print "Manual Hook: ", line
if line.strip().startswith('[AutoHook]'):
isDom = False
line = line.split()
hookCall = line[1]
filename = line[3].split('@')[0]
lineNumber = line[3].split('@')[1]
if hookCall.startswith('stm'):
stmt = line[5]
ahook = AutoHook(hookCall,filename,lineNumber,stmt)
else:
ahook = AutoHook(hookCall, filename, lineNumber)
self.ManHooks[len(self.ManHooks) - 1].Autohooks.append(ahook)
#Autohooklist.append(ahook)
# if line[0] == '[AutoHook]':
# print "Auto hook: ", line
line = ' '.join(line[1:])
if line.strip().startswith('[SSO]'):
line = line.split()
temp = line[1].split(':')
operand = temp[0]
operations = temp[1].split(',')
eq = equation(operand=operand, operations=operations)
line = ' '.join(line[1:])
manHooklength = len(self.ManHooks)
hooklength = len(self.ManHooks[manHooklength - 1].Autohooks)
domLength = len(self.ManHooks[manHooklength - 1].Autohooks[hooklength-1].domHooks)
if(isDom):
self.ManHooks[manHooklength - 1].Autohooks[hooklength - 1].domHooks[domLength-1].SSOs.append(eq)
else:
self.ManHooks[manHooklength - 1].Autohooks[hooklength-1].SSOs.append(eq)
if line.strip().startswith('- [Dom]'):
isDom = True
line = line.split()
manHooklength = len(self.ManHooks)
hooklength = len(self.ManHooks[manHooklength - 1].Autohooks)
hookCall = line[2]
filename = line[4].split('@')[0]
lineNumber = line[4].split('@')[1]
if hookCall.startswith('stm'):
stmt = line[5]
ahook = AutoHook(hookCall, filename, lineNumber, stmt)
else:
ahook = AutoHook(hookCall, filename, lineNumber)
self.ManHooks[len(self.ManHooks) - 1].Autohooks[hooklength-1].domHooks.append(ahook)
line = ' '.join(line[1:])
if line.strip().startswith('[Unmediated Hook]'):
line = line.split()
#print line
hookName = line[2]
fileName = line[4].split('@')[0]
lineNumber = line[4].split('@')[1]
if hookName.startswith('Stm'):
stmt = line[6].split('@')[0]
ahook = AutoHook(hookName, fileName, lineNumber, stmt)
ahook.ifStmt = True
else:
ahook = AutoHook(hookName, fileName, lineNumber)
self.unmediatedHooks.append(ahook)
self.createFilemap()
class ocamlAutoHooks():
def __init__(self, filename, verbose=False):
self.filename = filename
self.verbose = verbose
self.eqList = []
def parseFile(self):
f = open(self.filename, 'r')
contents = f.read()
f.close()
contents = contents.split('\n')
for line in contents:
if line.startswith('[EQLIST]'):
self.eqList.append([])
line = line.split()
line = ' '.join(line[1:])
if line.strip().startswith('EQL->'):
line = line.split()
if line[0] == 'EQL->':
temp = line[1].split(':')
operand = temp[0][1:]
operations = temp[1].split(',')
operations[-1] = operations[-1][:-1]
eq = equation(operand=operand, operations=operations)
self.eqList[-1].append(eq)
if __name__ == '__main__':
file1 = '../server13_ifiles/results/OUT-MHOOK-AHOOK1.txt'
o = ocamlMaualHooks(filename=file1)
o.parseFile()
# print "Man Hooks : ", len(o.ManHooks)
# for manHook in o.ManHooks:
# #print manHook
# print manHook.hook, manHook.fileName, manHook.line
# print len(manHook.Autohooks)
# for hook in manHook.Autohooks:
# print hook.hook, hook.fileName, hook.line
# print "SSO :",len(hook.SSOs)
#
# print "Unmediated Hooks : ", len(o.unmediatedHooks)
#print manHook.hook, manHook.fileName, manHook.line
#print "Dominated Autohooks :",len(manHook.Autohooks)
#print manHook.Autohooks
# print "Number of lists :", len(o.eqList)
# for i in range(len(o.eqList)):
# print "List Number ", i
# print "Equations :", len(o.eqList[i]) | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.